user_home = details[:home] || account["home"] || "#{node[:accounts][:home]}/#{name}"
manage_home = details[:manage_home] || account["manage_home"] || node[:accounts][:manage_home]
- group_members = group_members.collect { |m| m.to_s }.sort
+ group_members = group_members.collect(&:to_s).sort
case details[:status]
when "role"
end
end
-node[:accounts][:groups].each do |name,details|
+node[:accounts][:groups].each do |name, details|
group name do
action :modify
members details[:members]
-default[:bind] = { }
+default[:bind] = {}
validation_key '/etc/chef/validation.pem'
chef_server_url 'https://chef.openstreetmap.org'
cache_type 'BasicFile'
-cache_options( :path => '.chef/checksums' )
+cache_options :path => '.chef/checksums'
cookbook_path [ 'cookbooks' ]
cookbook_copyright 'OpenStreetMap Administrators'
cookbook_email 'admins@openstreetmap.org'
class Util
def self.compare_versions(a, b)
if a.kind_of?(String)
- a = a.split(".").map { |c| c.to_i }
+ a = a.split(".").map(&:to_i)
end
if b.kind_of?(String)
- b = b.split(".").map { |c| c.to_i }
+ b = b.split(".").map(&:to_i)
end
a <=> b
command = scm(:info)
status, svn_info, error_message = output_of_command(command, run_options(:cwd => cwd))
- unless [0,1].include?(status.exitstatus)
+ unless [0, 1].include?(status.exitstatus)
handle_command_failures(status, "STDOUT: #{svn_info}\nSTDERR: #{error_message}")
end
details = node[:accounts][:users][name] || {}
port = 7000 + account["uid"].to_i
- if ["user","administrator"].include?(details[:status])
+ if ["user", "administrator"].include?(details[:status])
user_home = details[:home] || account["home"] || "#{node[:accounts][:home]}/#{name}"
if File.directory?("#{user_home}/public_html")
cluster "9.3/main"
end
- node[:dev][:rails].each do |name,details|
+ node[:dev][:rails].each do |name, details|
database_name = details[:database] || "apis_#{name}"
site_name = "#{name}.apis.dev.openstreetmap.org"
rails_directory = "/srv/#{name}.apis.dev.openstreetmap.org"
default[:exim][:trusted_users] = [ ]
default[:exim][:smarthost_name] = nil
default[:exim][:smarthost_via] = "mail.openstreetmap.org:26"
-default[:exim][:routes] = { }
+default[:exim][:routes] = {}
default[:exim][:aliases][:root] = "tomh"
default[:exim][:rewrites] = [ ]
attribute "exim/routes",
:display_name => "Custom Routes",
:description => "Custom routes for handling local mail",
- :default => { }
+ :default => {}
attribute "exim/aliases",
:display_name => "Mail Aliases",
:description => "Mail aliases",
- :default => { }
+ :default => {}
if node[:exim][:private_aliases]
aliases = data_bag_item("exim", "aliases")
- aliases[node[:exim][:private_aliases]].each do |name,address|
+ aliases[node[:exim][:private_aliases]].each do |name, address|
node.default[:exim][:aliases][name] = address
end
end
sensors ||= {}
results = []
- sensors.sort.each do |sensor,attributes|
+ sensors.sort.each do |sensor, attributes|
if attributes[:ignore]
results << "ignore #{sensor}"
else
end
end
-node[:block_device].each do |name,attributes|
+node[:block_device].each do |name, attributes|
if attributes[:vendor] == "HP" and attributes[:model] == "LOGICAL VOLUME"
if name =~ /^cciss!(c[0-9]+)d[0-9]+$/
status_packages["cciss-vol-status"] |= [ "cciss/#{$1}d0" ]
incrontabs[user].push("#{path} #{mask} #{command}")
end
-incrontabs.each do |user,lines|
+incrontabs.each do |user, lines|
file "/var/spool/incron/#{user}" do
owner user
group "incron"
:private_accounts => params[:private_accounts] || FALSE,
:private => params[:private] || FALSE,
:recaptcha_public => params[:recaptcha_public_key],
- :recaptcha_private => params[:recaptcha_private_key],
+ :recaptcha_private => params[:recaptcha_private_key]
}
#----------------
ruby_block "rename-installer-localsettings" do
action :nothing
block do
- ::File.rename("#{mediawiki[:directory]}/LocalSettings.php","#{mediawiki[:directory]}/LocalSettings-install.php")
+ ::File.rename("#{mediawiki[:directory]}/LocalSettings.php", "#{mediawiki[:directory]}/LocalSettings-install.php")
end
end
doc = Hpricot.parse(file)
tables = doc / 'table'
rows = (tables[0] / 'tr')[1..-1]
- data = rows.collect {|r| (r / 'td').collect {|x| x.inner_html} }
+ data = rows.collect { |r| (r / 'td').collect(&:inner_html) }
# filter where the PID is numeric, status is 'W' and host matches the server
- matching_data = data.select {|r| (r[1].to_i > 0) && r[3].match(/W/) && r[11].match(server)}
+ matching_data = data.select { |r| (r[1].to_i > 0) && r[3].match(/W/) && r[11].match(server) }
# return only the URI part
- matching_data.collect {|r| r[12]}
+ matching_data.collect { |r| r[12] }
end
CALL_TYPES = {
else
counts = uris_from_status(server).
- collect {|x| categorise_uri(x)}.
+ collect { |x| categorise_uri(x) }.
inject(Hash.new) do |h, e|
if h.has_key? e
h[e] += 1
min_time, max_time, lines = uris_from_logs
delta_t = (max_time - min_time).to_f * 24 * 60
counts = lines.
- collect {|x| categorise_uri(x)}.
+ collect { |x| categorise_uri(x) }.
inject(Hash.new) do |h, e|
if h.has_key? e
h[e] += 1
doc = Hpricot.parse(file)
tables = doc / 'table'
rows = (tables[0] / 'tr')[1..-1]
- data = rows.collect {|r| (r / 'td').collect {|x| x.inner_html} }
+ data = rows.collect { |r| (r / 'td').collect(&:inner_html) }
# filter where the PID is numeric, status is 'W' and host matches the server
- matching_data = data.select {|r| (r[1].to_i > 0) && r[3].match(/W/) && r[11].match(server)}
+ matching_data = data.select { |r| (r[1].to_i > 0) && r[3].match(/W/) && r[11].match(server) }
# return URI and number of seconds processing for each request
- matching_data.collect {|r| [r[12], r[5].to_i]}
+ matching_data.collect { |r| [r[12], r[5].to_i] }
end
CALL_TYPES = {
else
counts = uri_and_times_from_status(server).
- collect {|x,y| [categorise_uri(x), y]}.
+ collect { |x, y| [categorise_uri(x), y] }.
inject(Hash.new) do |h, e|
category, time = e
if h.has_key? category
CALL_TYPES.keys.each do |type|
count = counts[type] || [0]
- avg = count.inject(0){|x,y|x+y} / (1.0 * count.length)
+ avg = count.inject(0) { |x, y|x + y } / (1.0 * count.length)
puts "#{type}.value #{avg}"
end
end
end
end
-disks = node[:block_device].select do |_,attributes|
+disks = node[:block_device].select do |_, attributes|
[ "ATA", "FUJITSU", "SEAGATE", "DELL", "COMPAQ", "IBM-ESXS" ].include?(attributes[:vendor])
end
action :delete
end
-node[:network][:interfaces].each do |ifname,ifattr|
+node[:network][:interfaces].each do |ifname, ifattr|
if ifattr[:encapsulation] == "Ethernet" and ifattr[:state] == "up"
munin_plugin "if_err_#{ifname}" do
target "if_err_"
end
end
-node[:block_device].each do |name,attributes|
+node[:block_device].each do |name, attributes|
if attributes[:vendor] == "ATA"
munin_plugin "smart_#{name}" do
target "smart_"
end
def users
- @users ||= query("SELECT * FROM user").inject({}) do |users,user|
+ @users ||= query("SELECT * FROM user").inject({}) do |users, user|
name = "'#{user[:user]}'@'#{user[:host]}'"
- users[name] = USER_PRIVILEGES.inject({}) do |privileges,privilege|
+ users[name] = USER_PRIVILEGES.inject({}) do |privileges, privilege|
privileges[privilege] = user["#{privilege}_priv".to_sym] == "Y"
privileges
end
end
def databases
- @databases ||= query("SHOW databases").inject({}) do |databases,database|
+ @databases ||= query("SHOW databases").inject({}) do |databases, database|
databases[database[:database]] = {
:permissions => {}
}
if database = @databases[record[:db]]
user = "'#{record[:user]}'@'#{record[:host]}'"
- database[:permissions][user] = DATABASE_PRIVILEGES.inject([]) do |privileges,privilege|
+ database[:permissions][user] = DATABASE_PRIVILEGES.inject([]) do |privileges, privilege|
privileges << privilege if record["#{privilege}_priv".to_sym] == "Y"
privileges
end
end
end
- new_permissions = Hash[new_resource.permissions.collect do |user,privileges|
+ new_permissions = Hash[new_resource.permissions.collect do |user, privileges|
[@mysql.canonicalise_user(user), privileges]
end]
end
end
- new_permissions.each do |user,new_privileges|
+ new_permissions.each do |user, new_privileges|
current_privileges = @current_resource.permissions[user] || {}
new_privileges = Array(new_privileges)
-default[:networking][:interfaces] = { }
+default[:networking][:interfaces] = {}
default[:networking][:nameservers] = [ ]
default[:networking][:search] = [ ]
end
def internal_ipaddress
- return ipaddresses(:role => :internal).first
+ ipaddresses(:role => :internal).first
end
def external_ipaddress
- return ipaddresses(:role => :external).first
+ ipaddresses(:role => :external).first
end
end
end
require "ipaddr"
-node[:networking][:interfaces].each do |name,interface|
+node[:networking][:interfaces].each do |name, interface|
if interface[:role] and role = node[:networking][:roles][interface[:role]]
if role[interface[:family]]
node.default[:networking][:interfaces][name][:prefix] = role[interface[:family]][:prefix]
package "nfs-common"
-node[:nfs].each do |mountpoint,details|
+node[:nfs].each do |mountpoint, details|
if details[:readonly]
mount_options = "ro,bg,soft,udp,rsize=8192,wsize=8192,nfsvers=3"
else
action [ :disable ]
end
-node[:nominatim][:fpm_pools].each do |name,data|
+node[:nominatim][:fpm_pools].each do |name, data|
template "/etc/php5/fpm/pool.d/#{name}.conf" do
source "fpm.conf.erb"
owner "root"
case platform
-when "ubuntu","debian"
+when "ubuntu", "debian"
default[:ntp][:service] = "ntp"
-when "redhat","centos","fedora"
+when "redhat", "centos", "fedora"
default[:ntp][:service] = "ntpd"
end
ignore_failure true
end
-node[:openvpn][:tunnels].each do |name,details|
+node[:openvpn][:tunnels].each do |name, details|
if peer = search(:node, "fqdn:#{details[:peer][:host]}").first
if peer[:openvpn] and not details[:peer][:address]
node.default[:openvpn][:tunnels][name][:peer][:address] = peer[:openvpn][:address]
require 'zlib'
# after this many changes, a changeset will be closed
-CHANGES_LIMIT=50000
+CHANGES_LIMIT = 50000
# this is the scale factor for lat/lon values stored as integers in the database
-GEO_SCALE=10000000
+GEO_SCALE = 10000000
##
# changeset class keeps some information about changesets downloaded from the
# there.
@conn.
exec("select id, created_at, closed_at, num_changes from changesets where closed_at > ((now() at time zone 'utc') - '1 hour'::interval)").
- map {|row| Changeset.new(row) }.
- select {|cs| cs.activity_between?(last_run, @now) }
+ map { |row| Changeset.new(row) }.
+ select { |cs| cs.activity_between?(last_run, @now) }
end
# creates an XML file containing the changeset information from the
'copyright' => "OpenStreetMap and contributors",
'attribution' => "http://www.openstreetmap.org/copyright",
'license' => "http://opendatacommons.org/licenses/odbl/1-0/" }.
- each { |k,v| doc.root[k] = v }
+ each { |k, v| doc.root[k] = v }
changesets.each do |cs|
xml = XML::Node.new("changeset")
fl.flock(File::LOCK_EX)
sequence = (@state.has_key?('sequence') ? @state['sequence'] + 1 : 0)
- data_file = @config['data_dir'] + sprintf("/%03d/%03d/%03d.osm.gz", sequence / 1000000, (sequence / 1000) % 1000, (sequence % 1000));
+ data_file = @config['data_dir'] + sprintf("/%03d/%03d/%03d.osm.gz", sequence / 1000000, (sequence / 1000) % 1000, (sequence % 1000))
tmp_state = @config['state_file'] + ".tmp"
tmp_data = "/tmp/changeset_data.osm.tmp"
# try and write the files to tmp locations and then
# Extract the record data
lines.collect do |line|
record = {}
- fields.zip(line.split("|")) { |name,value| record[name.to_sym] = value }
+ fields.zip(line.split("|")) { |name, value| record[name.to_sym] = value }
record
end
end
def users
- @users ||= query("SELECT * FROM pg_user").inject({}) do |users,user|
+ @users ||= query("SELECT * FROM pg_user").inject({}) do |users, user|
users[user[:usename]] = {
:superuser => user[:usesuper] == "t",
:createdb => user[:usercreatedb] == "t",
end
def databases
- @databases ||= query("SELECT d.datname, u.usename, d.encoding, d.datcollate, d.datctype FROM pg_database AS d INNER JOIN pg_user AS u ON d.datdba = u.usesysid").inject({}) do |databases,database|
+ @databases ||= query("SELECT d.datname, u.usename, d.encoding, d.datcollate, d.datctype FROM pg_database AS d INNER JOIN pg_user AS u ON d.datdba = u.usesysid").inject({}) do |databases, database|
databases[database[:datname]] = {
:owner => database[:usename],
:encoding => database[:encoding],
def extensions(database)
@extensions ||= {}
- @extensions[database] ||= query("SELECT extname, extversion FROM pg_extension", :database => database).inject({}) do |extensions,extension|
+ @extensions[database] ||= query("SELECT extname, extversion FROM pg_extension", :database => database).inject({}) do |extensions, extension|
extensions[extension[:extname]] = {
:version => extension[:extversion]
}
def tables(database)
@tables ||= {}
- @tables[database] ||= query("SELECT n.nspname, c.relname, u.usename, c.relacl FROM pg_class AS c INNER JOIN pg_user AS u ON c.relowner = u.usesysid INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid", :database => database).inject({}) do |tables,table|
+ @tables[database] ||= query("SELECT n.nspname, c.relname, u.usename, c.relacl FROM pg_class AS c INNER JOIN pg_user AS u ON c.relowner = u.usesysid INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid", :database => database).inject({}) do |tables, table|
name = "#{table[:nspname]}.#{table[:relname]}"
tables[name] = {
end
end
- new_resource.permissions.each do |user,new_privileges|
+ new_resource.permissions.each do |user, new_privileges|
current_privileges = @current_resource.permissions[user] || {}
new_privileges = Array(new_privileges)
clusters = node[:postgresql][:clusters] || []
-clusters.each do |name,details|
+clusters.each do |name, details|
suffix = name.tr("/", ":")
munin_plugin "postgres_bgwriter_#{suffix}" do
hosts_allow = Hash.new
hosts_deny = Hash.new
-node[:rsyncd][:modules].each do |name,details|
+node[:rsyncd][:modules].each do |name, details|
hosts_allow[name] = details[:hosts_allow] || []
if details[:nodes_allow]
end
node[:sysctl].each_value do |group|
- group[:parameters].each do |key,value|
+ group[:parameters].each do |key, value|
sysctl_file = "/proc/sys/#{key.gsub('.', '/')}"
file sysctl_file do
end
node[:sysfs].each_value do |group|
- group[:parameters].each do |key,value|
+ group[:parameters].each do |key, value|
sysfs_file = "/sys/#{key}"
file sysfs_file do
"+nadgrids=@null", "+no_defs +over"])
# width/height of the spherical mercator projection
- SIZE=40075016.6855784
+ SIZE = 40075016.6855784
# the size of the meta tile blocks
METATILE = 8
# the directory root for meta tiles
HASH_ROOT = "/tiles/default/"
# node cache file
- NODE_CACHE_FILE="/store/database/nodes"
+ NODE_CACHE_FILE = "/store/database/nodes"
# turns a spherical mercator coord into a tile coord
def Expire.tile_from_merc(point, zoom)
point.x = 0.5 + point.x / SIZE
point.y = 0.5 - point.y / SIZE
# transform into tile space
- point.x = point.x * 2 ** zoom
- point.y = point.y * 2 ** zoom
+ point.x = point.x * 2**zoom
+ point.y = point.y * 2**zoom
# chop of the fractional parts
[point.x.to_int, point.y.to_int, zoom]
end
y &= ~(METATILE - 1)
# generate the path
hash_path = (0..4).collect { |i|
- (((x >> 4*i) & 0xf) << 4) | ((y >> 4*i) & 0xf)
+ (((x >> 4 * i) & 0xf) << 4) | ((y >> 4 * i) & 0xf)
}.reverse.join('/')
z.to_s + '/' + hash_path + ".meta"
end
if id <= @max_id
offset = 16 + id * 8
- lon, lat = @cache[offset .. offset+7].unpack("ll")
+ lon, lat = @cache[offset..offset + 7].unpack("ll")
if lon != -2147483648 && lat != -2147483648
node = Node.new(lon, lat)
mode 0755
end
-node[:tile][:styles].each do |name,details|
+node[:tile][:styles].each do |name, details|
style_directory = "/srv/tile.openstreetmap.org/styles/#{name}"
tile_directory = "/srv/tile.openstreetmap.org/tiles/#{name}"
mode 0755
end
-tile_directories = node[:tile][:styles].collect do |_,style|
+tile_directories = node[:tile][:styles].collect do |_, style|
style[:tile_directories].collect { |directory| directory[:name] }
end.flatten.sort.uniq
rails_directory = "#{node[:web][:base_directory]}/rails"
-piwik_configuration = data_bag_item("web", "piwik").to_hash.reject do |k,_|
+piwik_configuration = data_bag_item("web", "piwik").to_hash.reject do |k, _|
["chef_type", "data_bag", "id"].include?(k)
end
}
}
}
-);
+)
run_list(
"role[ucl-wolfson]",
:start_servers => 20,
:min_spare_servers => 20,
:max_spare_servers => 50,
- :max_clients => 256,
+ :max_clients => 256
}
}
)
}
}
}
-);
+)
run_list(
"role[ic]",
}
}
}
-);
+)
run_list(
"role[ic]",
:nominatim => {
:status => :role,
:members => [ :lonvia, :tomh, :twain ]
- },
+ }
}
},
:apache => {
:owl => {
:status => :role,
:members => [ :yellowbkpk, :ppawel ]
- },
+ }
},
:groups => {
:adm => {
default_attributes(
:apache => {
- :mpm => "prefork",
+ :mpm => "prefork"
}
)
"Search_Data" => "ssd1",
"Search_Index" => "ssd1",
"Aux_Data" => "aux",
- "Aux_Index" => "aux",
+ "Aux_Index" => "aux"
}
}
)
"Search_Data" => "ssd",
"Search_Index" => "ssd",
"Aux_Data" => "data",
- "Aux_Index" => "ssd",
+ "Aux_Index" => "ssd"
}
}
)
}
}
}
-);
+)
run_list(
"role[ic]",
description "Master role applied to ridley"
default_attributes(
- :dhcpd =>{
+ :dhcpd => {
:first_address => "10.0.15.1",
:last_address => "10.0.15.254"
},
:role => :internal,
:family => :inet,
:address => "10.0.0.3"
- },
+ }
}
},
:openvpn => {
:bretth => {
:status => :user,
:shell => "/usr/bin/git-shell"
- },
+ }
}
},
:apache => {
:hardware => {
:modules => [
"it87"
- ],
+ ]
},
:networking => {
:interfaces => {
:address => "2001:41c9:1:400::32",
:prefix => "64",
:gateway => "fe80::1"
- },
+ }
}
},
:openvpn => {
"kernel.shmmax" => 17 * 1024 * 1024 * 1024,
"kernel.shmall" => 17 * 1024 * 1024 * 1024 / 4096
}
- },
+ }
},
:sysfs => {
:hdd_tune => {
}
}
}
-);
+)
run_list(
"role[ic]",
}
},
:apache => {
- :mpm => "prefork",
+ :mpm => "prefork"
}
)
:tile => {
:status => :role,
:members => [ :jburgess, :tomh ]
- },
- },
+ }
+ }
},
:apache => {
:mpm => "event",
"temp6" => { :max => 78, :max_hyst => 73 }
}
}
- },
+ }
},
:munin => {
:plugins => {
:start_servers => 20,
:min_spare_servers => 20,
:max_spare_servers => 50,
- :max_clients => 256,
+ :max_clients => 256
}
}
)