def action_create
super
Chef::Log.debug("Doing a remote recursive directory transfer for #{@new_resource}")
-
+
files_transferred = Set.new
files_to_transfer.each do |cookbook_file_relative_path|
create_cookbook_file(cookbook_file_relative_path)
variables :zones => zones
end
-apache_site "dns.openstreetmap.org" do
+apache_site "dns.openstreetmap.org" do
template "apache.erb"
directory "/srv/dns.openstreetmap.org"
end
mode 0644
not_if do
File.exist?("/etc/ssl/certs/exim.pem") and File.exist?("/etc/ssl/private/exim.key")
- end
+ end
end
execute "/etc/ssl/certs/exim.pem" do
default[:mediawiki][:user] = "wiki"
default[:mediawiki][:group] = "wiki"
default[:mediawiki][:sites] = {}
-
mode 0664
content "<?php require_once('#{extension_directory}/#{name}.php');\n"
only_if do File.exist?("#{extension_directory}/#{name}.php") end
- notifies :create, resources(:template => "#{mediawiki_directory}/LocalSettings.php")
+ notifies :create, resources(:template => "#{mediawiki_directory}/LocalSettings.php")
end
end
:logo => params[:logo] || "$wgStylePath/common/images/wiki.png",
:email_contact => params[:email_contact] || "",
:email_sender => params[:email_sender] || "",
- :email_sender_name => params[:email_sender_name] || "MediaWiki Mail",
+ :email_sender_name => params[:email_sender_name] || "MediaWiki Mail",
:commons => params[:commons] || TRUE,
:skin => params[:skin] || "vector",
:site_notice => params[:site_notice] || "",
mediawiki_extension "Cite" do
site name
- template "mw-ext-Cite.inc.php.erb"
+ template "mw-ext-Cite.inc.php.erb"
end
mediawiki_extension "ConfirmEdit" do
require 'hpricot'
require 'open-uri'
-def uris_from_status(server)
+def uris_from_status(server)
file = open("http://#{server}/server-status").read
doc = Hpricot.parse(file)
tables = doc / 'table'
end
CALL_TYPES = {
- :map => "Map API calls",
- :upload => "Changeset diff uploads",
- :amf => "AMF API calls",
- :history => "Element history fetches",
+ :map => "Map API calls",
+ :upload => "Changeset diff uploads",
+ :amf => "AMF API calls",
+ :history => "Element history fetches",
:full => "Full element fetches",
:trkpts => "GPX trackpoints calls",
- :web => "Web site traffic",
+ :web => "Web site traffic",
:other => "Other API calls"
}
def categorise_uri(line)
uri = line.split(" ")[1]
-
+
case uri
when /api\/0\.6\/map/ then :map
when /api\/0\.6\/changeset\/[0-9]*\/upload/ then :upload
end
h
end
-
+
CALL_TYPES.keys.each do |type|
count = counts[type] || 0
puts "#{type}.value #{count}"
end
CALL_TYPES = {
- :map => "Map API calls",
- :upload => "Changeset diff uploads",
- :amf => "AMF API calls",
- :history => "Element history fetches",
+ :map => "Map API calls",
+ :upload => "Changeset diff uploads",
+ :amf => "AMF API calls",
+ :history => "Element history fetches",
:full => "Full element fetches",
:trkpts => "GPX trackpoints calls",
- :web => "Web site traffic",
+ :web => "Web site traffic",
:other => "Other API calls"
}
def categorise_uri(line)
uri = line.split(" ")[1]
-
+
case uri
when /api\/0\.6\/map/ then :map
when /api\/0\.6\/changeset\/[0-9]*\/upload/ then :upload
end
h
end
-
+
CALL_TYPES.keys.each do |type|
count = counts[type] || 0
puts "#{type}.value #{count / delta_t}"
require 'hpricot'
require 'open-uri'
-def uri_and_times_from_status(server)
+def uri_and_times_from_status(server)
file = open("http://#{server}/server-status").read
doc = Hpricot.parse(file)
tables = doc / 'table'
end
CALL_TYPES = {
- :map => "Map API calls",
- :upload => "Changeset diff uploads",
- :amf => "AMF API calls",
- :history => "Element history fetches",
+ :map => "Map API calls",
+ :upload => "Changeset diff uploads",
+ :amf => "AMF API calls",
+ :history => "Element history fetches",
:full => "Full element fetches",
:trkpts => "GPX trackpoints calls",
- :web => "Web site traffic",
+ :web => "Web site traffic",
:other => "Other API calls"
}
def categorise_uri(line)
uri = line.split(" ")[1]
-
+
case uri
when /api\/0\.6\/map/ then :map
when /api\/0\.6\/changeset\/[0-9]*\/upload/ then :upload
end
h
end
-
+
CALL_TYPES.keys.each do |type|
count = counts[type] || [0]
avg = count.inject(0){|x,y|x+y} / (1.0 * count.length)
#!/usr/bin/env ruby
# put in /etc/munin/plugins and restart munin-node
# by Dan Manges, http://www.dcmanges.com/blog/rails-application-visualization-with-munin
-# NOTE: you might need to add munin to allow passwordless sudo for passenger-memory-stats
-
+# NOTE: you might need to add munin to allow passwordless sudo for passenger-memory-stats
+
def output_config
puts <<-END
graph_args --base 1024 -l 0 --vertical-label bytes --upper-limit 4056231936
END
exit 0
end
-
+
def output_values
- status = `/usr/sbin/passenger-memory-stats | tail -1`
+ status = `/usr/sbin/passenger-memory-stats | tail -1`
unless $?.success?
$stderr.puts "failed executing passenger-memory-stats"
exit 1
status =~ /(\d+\.\d+)/
puts "memory.value #{($1.to_f * 1024 * 1024).round}"
end
-
+
if ARGV[0] == "config"
output_config
else
output_values
end
-
class Chef
class Munin
def self.expand(template, nodes)
- nodes.map do |node|
+ nodes.map do |node|
if node.kind_of?(Hash)
template.gsub(/%%([^%]+)%%/) { node[$1.to_sym] }
else
end
end.join(" ")
end
- end
+ end
end
:description => "List of nameservers to use",
:type => "array",
:default => [""]
-
#default[:nginx][:mpm] = "worker"
-
supports :status => true, :restart => true, :reload => true
subscribes :restart, "template[/etc/nginx/nginx.conf]"
end
-
mode 0755
recursive true
end
-
-case platform
+case platform
when "ubuntu","debian"
default[:ntp][:service] = "ntp"
when "redhat","centos","fedora"
:description => "Array of servers we should talk to",
:type => "array",
:default => ["0.us.pool.ntp.org", "1.us.pool.ntp.org"]
-
group "root"
mode "0644"
variables :passwords => passwords,
- :directory => "/opt/piwik-#{version}/piwik",
+ :directory => "/opt/piwik-#{version}/piwik",
:plugins => node[:piwik][:plugins]
end
last_run = (@now - 60) if last_run.nil?
@state['last_run'] = @now
# pretty much all operations on a changeset will modify its closed_at
- # time (see rails_port's changeset model). so it is probably enough
+ # time (see rails_port's changeset model). so it is probably enough
# for us to look at anything that was closed recently, and filter from
# there.
@conn.
select {|cs| cs.activity_between?(last_run, @now) }
end
- # creates an XML file containing the changeset information from the
+ # creates an XML file containing the changeset information from the
# list of changesets output by open_changesets.
def changeset_dump(changesets)
doc = XML::Document.new
doc.root << xml
end
-
+
doc.to_s
end
rep = Replicator.new(ARGV[0])
rep.save!
-
new_resource.permissions.each do |user,new_privileges|
current_privileges = @current_resource.permissions[user] || {}
new_privileges = Array(new_privileges)
-
+
if new_privileges.include?(:all)
new_privileges |= Chef::PostgreSQL::TABLE_PRIVILEGES
end
# limitations under the License.
#
include_recipe "apache"
-
default[:squid][:cache_mem] = "256 MB"
default[:squid][:cache_dir] = "ufs /var/spool/squid 256 16 256"
default[:squid][:access_log] = "/var/log/squid/access.log squid"
-
module Expire
# projection object to go from latlon -> spherical mercator
- PROJ = Proj4::Projection.new(["+proj=merc", "+a=6378137", "+b=6378137",
+ PROJ = Proj4::Projection.new(["+proj=merc", "+a=6378137", "+b=6378137",
"+lat_ts=0.0", "+lon_0=0.0", "+x_0=0.0",
- "+y_0=0", "+k=1.0", "+units=m",
+ "+y_0=0", "+k=1.0", "+units=m",
"+nadgrids=@null", "+no_defs +over"])
-
+
# width/height of the spherical mercator projection
SIZE=40075016.6855784
# the size of the meta tile blocks
HASH_ROOT = "/tiles/default/"
# node cache file
NODE_CACHE_FILE="/store/database/nodes"
-
+
# turns a spherical mercator coord into a tile coord
def Expire.tile_from_merc(point, zoom)
# renormalise into unit space [0,1]
# chop of the fractional parts
[point.x.to_int, point.y.to_int, zoom]
end
-
+
# turns a latlon -> tile x,y given a zoom level
def Expire.tile_from_latlon(latlon, zoom)
# first convert to spherical mercator
point = PROJ.forward(latlon)
tile_from_merc(point, zoom)
end
-
+
# this must match the definition of xyz_to_meta in mod_tile
def Expire.xyz_to_meta(x, y, z)
# mask off the final few bits
x &= ~(METATILE - 1)
y &= ~(METATILE - 1)
# generate the path
- hash_path = (0..4).collect { |i|
- (((x >> 4*i) & 0xf) << 4) | ((y >> 4*i) & 0xf)
+ hash_path = (0..4).collect { |i|
+ (((x >> 4*i) & 0xf) << 4) | ((y >> 4*i) & 0xf)
}.reverse.join('/')
z.to_s + '/' + hash_path + ".meta"
end
-
+
# time to reset to, some very stupidly early time, before OSM started
EXPIRY_TIME = Time.parse("2000-01-01 00:00:00")
- # expire the meta tile by setting the modified time back
+ # expire the meta tile by setting the modified time back
def Expire.expire_meta(meta)
puts "Expiring #{meta}"
File.utime(EXPIRY_TIME, EXPIRY_TIME, meta)
end
-
+
def Expire.expire(change_file, min_zoom, max_zoom, tile_dirs)
do_expire(change_file, min_zoom, max_zoom) do |set|
new_set = Set.new
tile_dirs.each do |tile_dir|
meta_set.add(tile_dir + "/" + meta) if File.exist?(tile_dir + "/" + meta)
end
-
+
# add the parent into the set for the next round
new_set.add([xy[0] / 2, xy[1] / 2, xy[2] - 1])
end
-
+
# expire all meta tiles
meta_set.each do |meta|
expire_meta(meta)
def Expire.do_expire(change_file, min_zoom, max_zoom, &_)
# read in the osm change file
doc = XML::Document.file(change_file)
-
+
# hash map to contain all the nodes
nodes = Hash.new
-
+
# we put all the nodes into the hash, as it doesn't matter whether the node was
# added, deleted or modified - the tile will need updating anyway.
doc.find('//node').each do |node|
if lat > 85
lat = 85
end
- point = Proj4::Point.new(Math::PI * node['lon'].to_f / 180,
+ point = Proj4::Point.new(Math::PI * node['lon'].to_f / 180,
Math::PI * lat / 180)
nodes[node['id'].to_i] = tile_from_latlon(point, max_zoom)
end
-
+
# now we look for all the ways that have changed and put all of their nodes into
# the hash too. this will add too many nodes, as it is possible a long way will be
# changed at only a portion of its length. however, due to the non-local way that
# mapnik does text placement, it may stil not be enough.
#
- # also, we miss cases where nodes are deleted from ways where that node is not
+ # also, we miss cases where nodes are deleted from ways where that node is not
# itself deleted and the coverage of the point set isn't enough to encompass the
# change.
node_cache = NodeCache.new(NODE_CACHE_FILE)
end
end
end
-
- # create a set of all the tiles at the maximum zoom level which are touched by
+
+ # create a set of all the tiles at the maximum zoom level which are touched by
# any of the nodes we've collected. we'll create the tiles at other zoom levels
# by a simple recursion.
set = Set.new nodes.values
-
+
# expire tiles and shrink to the set of parents
(max_zoom).downto(min_zoom) do |_|
# allow the block to work on the set, returning the set at the next
action :create
use_conditional_get true
else
- action :create_if_missing
+ action :create_if_missing
end
source url
"planet_osm_nodes",
"planet_osm_rels",
"planet_osm_ways",
- "raster_columns",
- "raster_overviews",
+ "raster_columns",
+ "raster_overviews",
"spatial_ref_sys" ].each do |table|
postgresql_table table do
cluster node[:tile][:database][:cluster]
end
end
-[ "geometry_columns",
- "planet_osm_line",
- "planet_osm_point",
- "planet_osm_polygon",
+[ "geometry_columns",
+ "planet_osm_line",
+ "planet_osm_point",
+ "planet_osm_polygon",
"planet_osm_roads" ].each do |table|
postgresql_table table do
cluster node[:tile][:database][:cluster]
default[:tilecache][:net_bucket_size] = "134217728"
default[:tilecache][:ssl][:certificate] = "tile.openstreetmap"
-
package "whoopsie" do
action :purge
end
-
directory "/srv/wiki.openstreetmap.org"
enable_ssl TRUE
-
+
database_name "wiki"
database_username "wiki-user"
database_password passwords["database"]
recaptcha_public_key "6LdFIQATAAAAAMwtHeI8KDgPqvRbXeNYSq1gujKz"
recaptcha_private_key passwords["recaptcha"]
-
+
#site_readonly "MAINTENANCE UPDATE: WIKI READ-ONLY. ETA: Tuesday 8:00am UTC/GMT."
end
mode 0644
source "cron_wiki_dump.erb"
end
-
package "libicu52"
apache_module "php5"
-
},
:network_buffers => {
:comment => "Tune network buffers",
- :parameters => {
+ :parameters => {
"net.core.rmem_max" => "16777216",
"net.core.wmem_max" => "16777216",
"net.ipv4.tcp_rmem" => "4096\t87380\t16777216",
},
:network_conntrack_established => {
:comment => "Only track established connections for four hours",
- :parameters => {
+ :parameters => {
"net.netfilter.nf_conntrack_tcp_timeout_established" => "14400"
}
},
},
:chefrepo => {
:status => :role,
- :members => [
+ :members => [
:tomh, :grant, :matt, :jburgess, :lonvia, :yellowbkpk, :bretth
]
}
:munin => {
:plugins => {
:postgres_connections_openstreetmap => {
- :waiting => {
+ :waiting => {
:warning => 10,
:critical => 20
}
},
:postgres_locks_openstreetmap => {
- :accesssharelock => {
+ :accesssharelock => {
:warning => 900,
:critical => 1000
},
- :rowexclusivelock => {
+ :rowexclusivelock => {
:warning => 250,
:critical => 300
}
:zverik => { :status => :user },
:dodobas => { :status => :user },
:mhohmann => { :status => :user },
- :ooc => {
- :status => :role,
- :members => [ :tomh, :blackadder, :timsc, :ollie ]
+ :ooc => {
+ :status => :role,
+ :members => [ :tomh, :blackadder, :timsc, :ollie ]
},
- :apis => {
- :status => :role,
- :members => [ :tomh ]
+ :apis => {
+ :status => :role,
+ :members => [ :tomh ]
},
- :os => {
- :status => :role,
- :members => [ :tomh, :grant, :ollie ]
+ :os => {
+ :status => :role,
+ :members => [ :tomh, :grant, :ollie ]
},
- :gpsmid => {
- :status => :role,
- :members => [ :apmon, :maba ]
+ :gpsmid => {
+ :status => :role,
+ :members => [ :apmon, :maba ]
}
}
},
:sysctl => {
:postgres => {
:comment => "Increase shared memory for postgres",
- :parameters => {
+ :parameters => {
"kernel.shmall" => "4194304",
"kernel.shmmax" => "17179869184"
}
:sysctl => {
:tune_cpu_scheduler => {
:comment => "Tune CPU scheduler for server scheduling",
- :parameters => {
+ :parameters => {
"kernel.sched_migration_cost" => 50000000,
"kernel.sched_autogroup_enabled" => 0
}
:accounts => {
:users => {
:lambertus => {
- :status => :administrator
+ :status => :administrator
},
:forum => {
:status => :role,
:uid => "tomh",
:gid => "tomh",
:transfer_logging => false,
- :hosts_allow => [
+ :hosts_allow => [
"212.110.172.32", # shenron
"2001:41c9:1:400::32", # shenron
"212.159.112.221" # grant
:sysctl => {
:kvm => {
:comment => "Tuning for KVM guest",
- :parameters => {
+ :parameters => {
"kernel.sched_min_granularity_ns" => 10000000,
"kernel.sched_wakeup_granularity_ns" => 15000000
}
:munin => {
:plugins => {
:exim_mailqueue => {
- :mails => {
+ :mails => {
:warning => 500,
:critical => 1000
}
:sysctl => {
:kvm => {
:comment => "Tuning for KVM guest",
- :parameters => {
+ :parameters => {
"kernel.sched_min_granularity_ns" => 10000000,
"kernel.sched_wakeup_granularity_ns" => 15000000
}
:sysctl => {
:kvm => {
:comment => "Tuning for KVM guest",
- :parameters => {
+ :parameters => {
"kernel.sched_min_granularity_ns" => 10000000,
"kernel.sched_wakeup_granularity_ns" => 15000000
}
:sysctl => {
:kvm => {
:comment => "Tuning for KVM guest",
- :parameters => {
+ :parameters => {
"kernel.sched_min_granularity_ns" => 10000000,
"kernel.sched_wakeup_granularity_ns" => 15000000
}
:sysctl => {
:postgres => {
:comment => "Increase shared memory for postgres",
- :parameters => {
+ :parameters => {
"kernel.shmmax" => 26 * 1024 * 1024 * 1024,
"kernel.shmall" => 26 * 1024 * 1024 * 1024 / 4096
}
},
:network_conntrack_time_wait => {
:comment => "Only track completed connections for 30 seconds",
- :parameters => {
+ :parameters => {
"net.netfilter.nf_conntrack_tcp_timeout_time_wait" => "30"
}
},
:sysctl => {
:postgres => {
:comment => "Increase shared memory for postgres",
- :parameters => {
+ :parameters => {
"kernel.shmmax" => 9 * 1024 * 1024 * 1024,
"kernel.shmall" => 9 * 1024 * 1024 * 1024 / 4096
}
:sysctl => {
:postgres => {
:comment => "Increase shared memory for postgres",
- :parameters => {
+ :parameters => {
"kernel.shmmax" => 16 * 1024 * 1024 * 1024,
"kernel.shmall" => 16 * 1024 * 1024 * 1024 / 4096
}
:sysctl => {
:postgres => {
:comment => "Increase shared memory for postgres",
- :parameters => {
+ :parameters => {
"kernel.shmmax" => 66 * 1024 * 1024 * 1024,
"kernel.shmall" => 66 * 1024 * 1024 * 1024 / 4096
}
},
:sensors_volt => {
:contacts => "null",
- :volt10 => {
+ :volt10 => {
:warning => "3.11:3.50",
:critical => "2.98:3.63"
}
:sysctl => {
:postgres => {
:comment => "Increase shared memory for postgres",
- :parameters => {
+ :parameters => {
"kernel.shmmax" => 17 * 1024 * 1024 * 1024,
"kernel.shmall" => 17 * 1024 * 1024 * 1024 / 4096
}
default_attributes(
:stats => {
:sites => [
- {
+ {
:name => "planet.openstreetmap.org",
:log_pattern => "%YYYY-168-%MM-168-%DD-168.gz",
:days => "7"
},
- {
+ {
:name => "www.openstreetmap.org",
:log_pattern => "*-%YYYY-48-%MM-48-%DD-48.gz",
:days => "*"
:sysctl => {
:network_conntrack_time_wait => {
:comment => "Only track completed connections for 30 seconds",
- :parameters => {
+ :parameters => {
"net.netfilter.nf_conntrack_tcp_timeout_time_wait" => "30"
}
},
},
:squid_swappiness => {
:comment => "Prefer not to swapout to free memory",
- :parameters => {
+ :parameters => {
"vm.swappiness" => "30"
}
}
],
:sensors => {
"jc42-*" => {
- :temps => {
+ :temps => {
"temp1" => { :max => 75 }
}
},
:sysctl => {
:sack => {
:comment => "Disable SACK as the UCL firewall breaks it",
- :parameters => {
+ :parameters => {
"net.ipv4.tcp_sack" => "0"
}
}
:munin => {
:plugins => {
:cpu => {
- :system => {
+ :system => {
:warning => 500,
:critical => 600
}
},
:load => {
- :load => {
+ :load => {
:warning => 150,
:critical => 200
}
:sysctl => {
:postgres => {
:comment => "Increase shared memory for postgres",
- :parameters => {
+ :parameters => {
"kernel.shmmax" => 4 * 1024 * 1024 * 1024,
"kernel.shmall" => 4 * 1024 * 1024 * 1024 / 4096
}