# limitations under the License.
#
-include_recipe "tools"
+include_recipe "git"
include_recipe "munin"
+include_recipe "prometheus"
+include_recipe "sysfs"
+include_recipe "tools"
ohai_plugin "hardware" do
template "ohai.rb.erb"
units = []
-if node[:roles].include?("bytemark") || node[:roles].include?("exonetric")
+if node[:roles].include?("bytemark") || node[:roles].include?("exonetric") || node[:roles].include?("prgmr")
units << "0"
end
case manufacturer
-when "HP"
+when "HP", "HPE"
+ include_recipe "apt::management-component-pack"
+
package "hponcfg"
+ execute "update-ilo" do
+ action :nothing
+ command "/usr/sbin/hponcfg -f /etc/ilo-defaults.xml"
+ not_if { kitchen? }
+ end
+
+ template "/etc/ilo-defaults.xml" do
+ source "ilo-defaults.xml.erb"
+ owner "root"
+ group "root"
+ mode "644"
+ notifies :run, "execute[update-ilo]"
+ end
+
package "hp-health" do
action :install
notifies :restart, "service[hp-health]"
+ only_if { node[:lsb][:release].to_f < 22.04 }
end
service "hp-health" do
action [:enable, :start]
supports :status => true, :restart => true
+ only_if { node[:lsb][:release].to_f < 22.04 }
end
if product.end_with?("Gen8", "Gen9")
action [:enable, :start]
supports :status => true, :restart => true
end
+ elsif product.end_with?("Gen10")
+ package "amsd" do
+ action :install
+ notifies :restart, "service[amsd]"
+ end
+
+ service "amsd" do
+ action [:enable, :start]
+ supports :status => true, :restart => true
+ end
end
- units << "1"
+ units << if product.end_with?("Gen10")
+ "0"
+ else
+ "1"
+ end
when "TYAN"
units << "0"
when "TYAN Computer Corporation"
units << "0"
when "Supermicro"
- case product
- when "H8DGU", "X9SCD", "X7DBU", "X7DW3", "X9DR7/E-(J)LN4F", "X9DR3-F", "X9DRW", "SYS-1028U-TN10RT+", "SYS-2028U-TN24R4T+", "SYS-1029P-WTRT", "Super Server"
- units << "1"
- else
- units << "0"
- end
+ units << "1"
when "IBM"
units << "0"
+when "VMware, Inc."
+ package "open-vm-tools"
+
+ # Remove timeSync plugin completely
+ # https://github.com/vmware/open-vm-tools/issues/302
+ file "/usr/lib/open-vm-tools/plugins/vmsvc/libtimeSync.so" do
+ action :delete
+ notifies :restart, "service[open-vm-tools]"
+ end
+
+ # Attempt to tell Host we are not interested in timeSync
+ execute "vmware-toolbox-cmd-timesync-disable" do
+ command "/usr/bin/vmware-toolbox-cmd timesync disable"
+ ignore_failure true
+ end
+
+ service "open-vm-tools" do
+ action [:enable, :start]
+ supports :status => true, :restart => true
+ end
end
units.sort.uniq.each do |unit|
service "serial-getty@ttyS#{unit}" do
action [:enable, :start]
+ not_if { kitchen? }
end
end
execute "update-grub" do
action :nothing
command "/usr/sbin/update-grub"
+ not_if { kitchen? }
end
template "/etc/default/grub" do
source "grub.erb"
owner "root"
group "root"
- mode 0o644
+ mode "644"
variables :units => units, :entry => grub_entry
notifies :run, "execute[update-grub]"
end
source "initramfs-mdadm.erb"
owner "root"
group "root"
- mode 0o644
+ mode "644"
notifies :run, "execute[update-initramfs]"
end
action [:enable, :start]
end
-package "ipmitool" if node[:kernel][:modules].include?("ipmi_si")
+if node[:kernel][:modules].include?("ipmi_si")
+ package "ipmitool"
+ package "freeipmi-tools"
-package "irqbalance"
+ template "/etc/prometheus/ipmi_local.yml" do
+ source "ipmi_local.yml.erb"
+ owner "root"
+ group "root"
+ mode "644"
+ end
-template "/etc/default/irqbalance" do
- source "irqbalance.erb"
- owner "root"
- group "root"
- mode 0o644
+ prometheus_exporter "ipmi" do
+ port 9290
+ user "root"
+ private_devices false
+ protect_clock false
+ system_call_filter ["@system-service", "@raw-io"]
+ options "--config.file=/etc/prometheus/ipmi_local.yml"
+ subscribes :restart, "template[/etc/prometheus/ipmi_local.yml]"
+ end
end
+package "irqbalance"
+
service "irqbalance" do
action [:start, :enable]
supports :status => false, :restart => true, :reload => false
- subscribes :restart, "template[/etc/default/irqbalance]"
end
-# Link Layer Discovery Protocol Daemon
package "lldpd"
+
service "lldpd" do
action [:start, :enable]
supports :status => true, :restart => true, :reload => true
end
+ohai_plugin "lldp" do
+ template "lldp.rb.erb"
+end
+
+package %w[
+ rasdaemon
+ ruby-sqlite3
+]
+
+service "rasdaemon" do
+ action [:enable, :start]
+end
+
+prometheus_exporter "rasdaemon" do
+ port 9797
+ user "root"
+end
+
tools_packages = []
status_packages = {}
-node[:kernel][:modules].each_key do |modname|
- case modname
- when "cciss"
- tools_packages << "ssacli"
- status_packages["cciss-vol-status"] ||= []
- when "hpsa"
- tools_packages << "ssacli"
- status_packages["cciss-vol-status"] ||= []
- when "mptsas"
- tools_packages << "lsiutil"
- status_packages["mpt-status"] ||= []
- when "mpt2sas", "mpt3sas"
- tools_packages << "sas2ircu"
- status_packages["sas2ircu-status"] ||= []
- when "megaraid_mm"
- tools_packages << "megactl"
- status_packages["megaraid-status"] ||= []
- when "megaraid_sas"
- tools_packages << "megacli"
- status_packages["megaclisas-status"] ||= []
- when "aacraid"
- tools_packages << "arcconf"
- status_packages["aacraid-status"] ||= []
- when "arcmsr"
- tools_packages << "areca"
- end
-end
-
-node[:block_device].each do |name, attributes|
- next unless attributes[:vendor] == "HP" && attributes[:model] == "LOGICAL VOLUME"
-
- if name =~ /^cciss!(c[0-9]+)d[0-9]+$/
- status_packages["cciss-vol-status"] |= ["cciss/#{Regexp.last_match[1]}d0"]
- else
- Dir.glob("/sys/block/#{name}/device/scsi_generic/*").each do |sg|
- status_packages["cciss-vol-status"] |= [File.basename(sg)]
+if node[:virtualization][:role] != "guest" ||
+ (node[:virtualization][:system] != "lxc" &&
+ node[:virtualization][:system] != "lxd" &&
+ node[:virtualization][:system] != "openvz")
+
+ node[:kernel][:modules].each_key do |modname|
+ case modname
+ when "cciss"
+ tools_packages << "ssacli"
+ status_packages["cciss-vol-status"] ||= []
+ when "hpsa"
+ tools_packages << "ssacli"
+ status_packages["cciss-vol-status"] ||= []
+ when "mptsas"
+ tools_packages << "lsiutil"
+ status_packages["mpt-status"] ||= []
+ when "mpt2sas", "mpt3sas"
+ tools_packages << "sas2ircu"
+ status_packages["sas2ircu-status"] ||= []
+ when "megaraid_sas"
+ tools_packages << "megacli"
+ status_packages["megaclisas-status"] ||= []
+ when "aacraid"
+ tools_packages << "arcconf"
+ status_packages["aacraid-status"] ||= []
+ when "arcmsr"
+ tools_packages << "areca"
+ end
+ end
+
+ node[:block_device].each do |name, attributes|
+ next unless attributes[:vendor] == "HP" && attributes[:model] == "LOGICAL VOLUME"
+
+ if name =~ /^cciss!(c[0-9]+)d[0-9]+$/
+ status_packages["cciss-vol-status"] |= ["cciss/#{Regexp.last_match[1]}d0"]
+ else
+ Dir.glob("/sys/block/#{name}/device/scsi_generic/*").each do |sg|
+ status_packages["cciss-vol-status"] |= [File.basename(sg)]
+ end
end
end
end
git "/opt/areca" do
action :sync
repository "https://git.openstreetmap.org/private/areca.git"
+ depth 1
user "root"
group "root"
+ not_if { kitchen? }
end
else
directory "/opt/areca" do
end
end
+include_recipe "apt::hwraid" unless status_packages.empty?
+
if status_packages.include?("cciss-vol-status")
template "/usr/local/bin/cciss-vol-statusd" do
source "cciss-vol-statusd.erb"
owner "root"
group "root"
- mode 0o755
+ mode "755"
notifies :restart, "service[cciss-vol-statusd]"
end
systemd_service "cciss-vol-statusd" do
description "Check cciss_vol_status values in the background"
exec_start "/usr/local/bin/cciss-vol-statusd"
+ nice 10
private_tmp true
protect_system "full"
protect_home true
no_new_privileges true
notifies :restart, "service[cciss-vol-statusd]"
end
+else
+ systemd_service "cciss-vol-statusd" do
+ action :delete
+ end
+
+ template "/usr/local/bin/cciss-vol-statusd" do
+ action :delete
+ end
end
-%w[cciss-vol-status mpt-status sas2ircu-status megaraid-status megaclisas-status aacraid-status].each do |status_package|
+%w[cciss-vol-status mpt-status sas2ircu-status megaclisas-status aacraid-status].each do |status_package|
if status_packages.include?(status_package)
package status_package
source "raid.default.erb"
owner "root"
group "root"
- mode 0o644
+ mode "644"
variables :devices => status_packages[status_package]
end
[]
end
-# intel_ssds = disks.select { |d| d[:vendor] == "INTEL" && d[:model] =~ /^SSD/ }
-#
-# nvmes = if node[:hardware][:pci]
-# node[:hardware][:pci].values.select { |pci| pci[:driver] == "nvme" }
-# else
-# []
-# end
-#
-# intel_nvmes = nvmes.select { |pci| pci[:vendor_name] == "Intel Corporation" }
-#
-# if !intel_ssds.empty? || !intel_nvmes.empty?
-# package "unzip"
-#
-# intel_ssd_tool_version = "3.0.21"
-#
-# remote_file "#{Chef::Config[:file_cache_path]}/Intel_SSD_Data_Center_Tool_#{intel_ssd_tool_version}_Linux.zip" do
-# source "https://downloadmirror.intel.com/29115/eng/Intel_SSD_Data_Center_Tool_#{intel_ssd_tool_version}_Linux.zip"
-# end
-#
-# execute "#{Chef::Config[:file_cache_path]}/Intel_SSD_Data_Center_Tool_#{intel_ssd_tool_version}_Linux.zip" do
-# command "unzip Intel_SSD_Data_Center_Tool_#{intel_ssd_tool_version}_Linux.zip isdct_#{intel_ssd_tool_version}-1_amd64.deb"
-# cwd Chef::Config[:file_cache_path]
-# user "root"
-# group "root"
-# not_if { File.exist?("#{Chef::Config[:file_cache_path]}/isdct_#{intel_ssd_tool_version}-1_amd64.deb") }
-# end
-#
-# dpkg_package "isdct" do
-# version "#{intel_ssd_tool_version}-1"
-# source "#{Chef::Config[:file_cache_path]}/isdct_#{intel_ssd_tool_version}-1_amd64.deb"
-# end
-# end
+intel_ssds = disks.select { |d| d[:vendor] == "INTEL" && d[:model] =~ /^SSD/ }
+
+nvmes = if node[:hardware][:pci]
+ node[:hardware][:pci].values.select { |pci| pci[:driver] == "nvme" }
+ else
+ []
+ end
+
+unless nvmes.empty?
+ package "nvme-cli"
+end
+
+intel_nvmes = nvmes.select { |pci| pci[:vendor_name] == "Intel Corporation" }
+
+if !intel_ssds.empty? || !intel_nvmes.empty?
+ package "unzip"
+
+ sst_tool_version = "1.3"
+ sst_package_version = "#{sst_tool_version}.208-0"
+
+ # remote_file "#{Chef::Config[:file_cache_path]}/SST_CLI_Linux_#{sst_tool_version}.zip" do
+ # source "https://downloadmirror.intel.com/743764/SST_CLI_Linux_#{sst_tool_version}.zip"
+ # end
+
+ execute "#{Chef::Config[:file_cache_path]}/SST_CLI_Linux_#{sst_tool_version}.zip" do
+ command "unzip SST_CLI_Linux_#{sst_tool_version}.zip sst_#{sst_package_version}_amd64.deb"
+ cwd Chef::Config[:file_cache_path]
+ user "root"
+ group "root"
+ not_if { ::File.exist?("#{Chef::Config[:file_cache_path]}/sst_#{sst_package_version}_amd64.deb") }
+ end
+
+ dpkg_package "sst" do
+ version "#{sst_package_version}"
+ source "#{Chef::Config[:file_cache_path]}/sst_#{sst_package_version}_amd64.deb"
+ end
+
+ dpkg_package "intelmas" do
+ action :purge
+ end
+end
disks = disks.map do |disk|
next if disk[:state] == "spun_down" || %w[unconfigured failed].any?(disk[:status])
elsif smart =~ %r{^.*,(\d+)/(\d+)$}
munin = "#{device}-#{Regexp.last_match(1)}:#{Regexp.last_match(2)}"
end
+ elsif disk[:device]
+ device = disk[:device].sub("/dev/", "")
+ smart = disk[:smart_device]
+
+ if smart =~ /^.*,(\d+),(\d+),(\d+)$/
+ munin = "#{device}-#{Regexp.last_match(1)}:#{Regexp.last_match(2)}:#{Regexp.last_match(3)}"
+ end
end
elsif disk[:device] =~ %r{^/dev/(nvme\d+)n\d+$}
device = Regexp.last_match(1)
if disks.count.positive?
package "smartmontools"
+ template "/etc/cron.daily/update-smart-drivedb" do
+ source "update-smart-drivedb.erb"
+ owner "root"
+ group "root"
+ mode "755"
+ end
+
template "/usr/local/bin/smartd-mailer" do
source "smartd-mailer.erb"
owner "root"
group "root"
- mode 0o755
+ mode "755"
end
template "/etc/smartd.conf" do
source "smartd.conf.erb"
owner "root"
group "root"
- mode 0o644
+ mode "644"
variables :disks => disks
end
source "smartmontools.erb"
owner "root"
group "root"
- mode 0o644
+ mode "644"
end
- service "smartd" do
+ service "smartmontools" do
action [:enable, :start]
subscribes :reload, "template[/etc/smartd.conf]"
subscribes :restart, "template[/etc/default/smartmontools]"
end
+ template "/etc/prometheus/collectors/smart.devices" do
+ source "smart.devices.erb"
+ owner "root"
+ group "root"
+ mode "644"
+ variables :disks => disks
+ end
+
+ prometheus_collector "smart" do
+ interval "15m"
+ user "root"
+ capability_bounding_set "CAP_SYS_ADMIN"
+ private_devices false
+ private_users false
+ protect_clock false
+ end
+
# Don't try and do munin monitoring of disks behind
# an Areca controller as they only allow one thing to
# talk to the controller at a time and smartd will
file "/etc/mdadm/mdadm.conf" do
owner "root"
group "root"
- mode 0o644
+ mode "644"
content mdadm_conf
end
- service "mdadm" do
+ service "mdmonitor" do
action :nothing
subscribes :restart, "file[/etc/mdadm/mdadm.conf]"
end
end
-template "/etc/modules" do
- source "modules.erb"
- owner "root"
- group "root"
- mode 0o644
+file "/etc/modules" do
+ action :delete
end
-service "kmod" do
- action :nothing
- subscribes :start, "template[/etc/modules]"
+node[:hardware][:modules].each do |module_name|
+ kernel_module module_name do
+ action :install
+ not_if { kitchen? }
+ end
+end
+
+node[:hardware][:blacklisted_modules].each do |module_name|
+ kernel_module module_name do
+ action :blacklist
+ end
end
if node[:hardware][:watchdog]
source "watchdog.erb"
owner "root"
group "root"
- mode 0o644
+ mode "644"
variables :module => node[:hardware][:watchdog]
end
source "sensors.conf.erb"
owner "root"
group "root"
- mode 0o644
+ mode "644"
notifies :run, "execute[/etc/sensors.d/chef.conf]"
end
end
if node[:hardware][:shm_size]
+ execute "remount-dev-shm" do
+ action :nothing
+ command "/bin/mount -o remount /dev/shm"
+ user "root"
+ group "root"
+ end
+
mount "/dev/shm" do
- action [:mount, :enable]
+ action :enable
device "tmpfs"
fstype "tmpfs"
options "rw,nosuid,nodev,size=#{node[:hardware][:shm_size]}"
+ notifies :run, "execute[remount-dev-shm]"
end
end
+
+prometheus_collector "ohai" do
+ interval "15m"
+ user "root"
+ proc_subset "all"
+ capability_bounding_set "CAP_SYS_ADMIN"
+ private_devices false
+ private_users false
+ protect_clock false
+ protect_kernel_modules false
+end