+disks = if node[:hardware][:disk]
+ node[:hardware][:disk][:disks]
+ else
+ []
+ end
+
+intel_ssds = disks.select { |d| d[:vendor] == "INTEL" && d[:model] =~ /^SSD/ }
+
+nvmes = if node[:hardware][:pci]
+ node[:hardware][:pci].values.select { |pci| pci[:driver] == "nvme" }
+ else
+ []
+ end
+
+intel_nvmes = nvmes.select { |pci| pci[:vendor_name] == "Intel Corporation" }
+
+if !intel_ssds.empty? || !intel_nvmes.empty?
+ package "unzip"
+ package "alien"
+
+ remote_file "#{Chef::Config[:file_cache_path]}/DataCenterTool_3_0_0_Linux.zip" do
+ source "https://downloadmirror.intel.com/23931/eng/DataCenterTool_3_0_0_Linux.zip"
+ end
+
+ execute "unzip-DataCenterTool" do
+ command "unzip DataCenterTool_3_0_0_Linux.zip isdct-3.0.0.400-15.x86_64.rpm"
+ cwd Chef::Config[:file_cache_path]
+ user "root"
+ group "root"
+ not_if { File.exist?("#{Chef::Config[:file_cache_path]}/isdct-3.0.0.400-15.x86_64.rpm") }
+ end
+
+ execute "alien-isdct" do
+ command "alien --to-deb isdct-3.0.0.400-15.x86_64.rpm"
+ cwd Chef::Config[:file_cache_path]
+ user "root"
+ group "root"
+ not_if { File.exist?("#{Chef::Config[:file_cache_path]}/isdct_3.0.0.400-16_amd64.deb") }
+ end
+
+ dpkg_package "isdct" do
+ source "#{Chef::Config[:file_cache_path]}/isdct_3.0.0.400-16_amd64.deb"
+ end
+end
+
+disks = disks.map do |disk|
+ next if disk[:state] == "spun_down"
+
+ if disk[:smart_device]
+ controller = node[:hardware][:disk][:controllers][disk[:controller]]
+ device = controller[:device].sub("/dev/", "")
+ smart = disk[:smart_device]
+
+ if device.start_with?("cciss/") && smart =~ /^cciss,(\d+)$/
+ array = node[:hardware][:disk][:arrays][disk[:arrays].first]
+ munin = "cciss-3#{array[:wwn]}-#{Regexp.last_match(1)}"
+ elsif smart =~ /^.*,(\d+)$/
+ munin = "#{device}-#{Regexp.last_match(1)}"
+ elsif smart =~ %r{^.*,(\d+)/(\d+)$}
+ munin = "#{device}-#{Regexp.last_match(1)}:#{Regexp.last_match(2)}"
+ end
+ elsif disk[:device]
+ device = disk[:device].sub("/dev/", "")
+ munin = device
+ end
+
+ next if device.nil?
+
+ Hash[
+ :device => device,
+ :smart => smart,
+ :munin => munin,
+ :hddtemp => munin.tr("-:", "_")
+ ]
+end
+
+smartd_service = if node[:lsb][:release].to_f >= 16.04
+ "smartd"
+ else
+ "smartmontools"
+ end
+
+disks = disks.compact
+
+if disks.count > 0
+ package "smartmontools"
+
+ template "/usr/local/bin/smartd-mailer" do
+ source "smartd-mailer.erb"
+ owner "root"
+ group "root"
+ mode 0o755
+ end
+
+ template "/etc/smartd.conf" do
+ source "smartd.conf.erb"
+ owner "root"
+ group "root"
+ mode 0o644
+ variables :disks => disks
+ end
+
+ template "/etc/default/smartmontools" do
+ source "smartmontools.erb"
+ owner "root"
+ group "root"
+ mode 0o644
+ end
+
+ service smartd_service do
+ action [:enable, :start]
+ subscribes :reload, "template[/etc/smartd.conf]"
+ subscribes :restart, "template[/etc/default/smartmontools]"
+ end
+
+ # Don't try and do munin monitoring of disks behind
+ # an Areca controller as they only allow one thing to
+ # talk to the controller at a time and smartd will
+ # throw errors if it clashes with munin
+ disks = disks.reject { |disk| disk[:smart] && disk[:smart].start_with?("areca,") }
+
+ disks.each do |disk|
+ munin_plugin "smart_#{disk[:munin]}" do
+ target "smart_"
+ conf "munin.smart.erb"
+ conf_variables :disk => disk
+ end
+ end
+else
+ service smartd_service do
+ action [:stop, :disable]
+ end
+end
+
+if disks.count > 0
+ munin_plugin "hddtemp_smartctl" do
+ conf "munin.hddtemp.erb"
+ conf_variables :disks => disks
+ end
+else
+ munin_plugin "hddtemp_smartctl" do
+ action :delete
+ conf "munin.hddtemp.erb"
+ end
+end
+
+plugins = Dir.glob("/etc/munin/plugins/smart_*").map { |p| File.basename(p) } -
+ disks.map { |d| "smart_#{d[:munin]}" }
+
+plugins.each do |plugin|
+ munin_plugin plugin do
+ action :delete
+ end
+end
+