Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add various bits of setup automation #61

Closed
wants to merge 14 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions attributes/mds.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
case node['platform']
when 'ubuntu'
default["ceph"]["mds"]["init_style"] = "upstart"
else
default["ceph"]["mds"]["init_style"] = "sysvinit"
end
25 changes: 25 additions & 0 deletions attributes/pools.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#
# Override the following default to have pools setup automatically by the
# ceph::pools recipe.
#

default["ceph"]["pools"] = []

#
# Below is a sample of how the override can be done.
# Uncomment it to try it out.
#

#default["ceph"]["pools"] = [
# {
# "name" => "test1"
# },
# { "name" => "test2",
# "pg-num" => 10
# },
# {
# "name" => "test3",
# "pg-num" => 20,
# "pgp-num" => 15
# }
#]
48 changes: 48 additions & 0 deletions providers/manage_pool.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#
# Author:: Jesse Pretorius <[email protected]>
# Cookbook Name:: ceph
# Provider:: manage_pool
#
# Copyright 2013, Business Connexion (Pty) Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

action :create do
name = new_resource.name
pg_num = new_resource.pg_num
pgp_num = new_resource.pgp_num
min_size = new_resource.min_size

if pgp_num.nil?
Log.debug("Setting ceph-pool #{name} pgp_num to #{pg_num} as no value was provided.")
pgp_num = new_resource.pg_num
elsif pgp_num > pg_num
Log.warn("Setting ceph-pool #{name} pgp_num to #{pg_num} as it cannot be a larger value.")
pgp_num = new_resource.pg_num
end

execute "create ceph pool #{name} with pg_num #{pg_num} and pgp_num #{pgp_num}" do
command "ceph osd pool create #{name} #{pg_num} #{pgp_num}"
end

if min_size.nil?
Log.debug("Leaving ceph-pool #{name} min_size at the default value.")
else
execute "set ceph pool #{name} to min_size #{min_size}" do
command "ceph osd pool set #{name} min_size #{min_size}"
end
end

new_resource.updated_by_last_action(true)
end
19 changes: 17 additions & 2 deletions recipes/conf.rb
Original file line number Diff line number Diff line change
@@ -1,5 +1,20 @@
raise "fsid must be set in config" if node["ceph"]["config"]['fsid'].nil?
raise "mon_initial_members must be set in config" if node["ceph"]["config"]['mon_initial_members'].nil?
if node['ceph']['config']['fsid'].nil? || node['ceph']['config']['mon_initial_members'].nil?
Log.debug("ceph-mds: Trying to retrieve fsid and mon from the ceph-setup role")
search_query = "roles:ceph-setup AND chef_environment:#{node.chef_environment}"
search_result = search(:node, search_query)
if search_result.length < 1
msg = "ceph-conf: The ceph fsid and mon_initial_members must be set in the config, or the ceph-setup role must be applied to a node."
Chef::Application.fatal! msg
end

fsid = search_result[0]['ceph']['config']['fsid']
Log.debug("ceph-mds: Found ceph fsid #{fsid} from the ceph-setup role")
node.set['ceph']['config']['fsid'] = fsid

mon_initial_members = search_result[0]['ceph']['config']['mon_initial_members']
Log.debug("ceph-mds: Found ceph mon_initial_members #{mon_initial_members} from the ceph-setup role")
node.set['ceph']['config']['mon_initial_members'] = mon_initial_members
end

mon_addresses = get_mon_addresses()

Expand Down
80 changes: 79 additions & 1 deletion recipes/mds.rb
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
#
# Author:: Kyle Bader <[email protected]>
# Author:: Jesse Pretorius <[email protected]>
# Cookbook Name:: ceph
# Recipe:: mds
#
# Copyright 2011, DreamHost Web Hosting
# Copyright 2013, Business Connexion (Pty) Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -16,5 +18,81 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

include_recipe "ceph::default"
include_recipe "ceph::conf"

package 'ceph-mds' do
action :install
end

service_type = node["ceph"]["mds"]["init_style"]

mons = get_mon_nodes("bootstrap_mds_key:*")

if mons.empty? then
Log.warn("ceph-mds: No ceph mds bootstrap key found.")
else
mds_bootstrap_directory = "/var/lib/ceph/bootstrap-mds"

directory "#{mds_bootstrap_directory}" do
owner "root"
group "root"
mode "0755"
end

# TODO cluster name
cluster = 'ceph'

execute "create the local keyring file" do
command "ceph-authtool '#{mds_bootstrap_directory}/#{cluster}.keyring' --create-keyring --name=client.bootstrap-mds --add-key='#{mons[0]["ceph"]["bootstrap_mds_key"]}'"
creates "#{mds_bootstrap_directory}/#{cluster}.keyring"
end

mds_directory = "/var/lib/ceph/mds/#{cluster}-#{node['hostname']}"

directory "#{mds_directory}" do
owner "root"
group "root"
mode "0755"
recursive true
action :create
end

unless File.exists?("#{mds_directory}/done")
execute "get or create mds keyring" do
command "ceph --cluster #{cluster} --name client.bootstrap-mds --keyring #{mds_bootstrap_directory}/#{cluster}.keyring auth get-or-create mds.#{node['hostname']} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o #{mds_directory}/keyring"
creates "#{mds_directory}/keyring"
end
ruby_block "finalise" do
block do
["done", service_type].each do |ack|
File.open("#{mds_directory}/#{ack}", "w").close()
end
end
end
end

if service_type == "upstart"
service "ceph-mds" do
provider Chef::Provider::Service::Upstart
action :enable
end
service "ceph-mds-all" do
provider Chef::Provider::Service::Upstart
supports :status => true
action [ :enable, :start ]
end
end

service "ceph_mds" do
if service_type == "upstart"
service_name "ceph-mds-all-starter"
provider Chef::Provider::Service::Upstart
else
service_name "ceph"
end
supports :restart => true, :status => true
action [ :enable, :start ]
end
end
17 changes: 14 additions & 3 deletions recipes/mon.rb
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,8 @@
end
end

# The key is going to be automatically
# created,
# We store it when it is created
# The osd and mds keys are going to be automatically created.
# We store on the mon object when it is created
ruby_block "get osd-bootstrap keyring" do
block do
run_out = ""
Expand All @@ -105,3 +104,15 @@
end
not_if { node['ceph']['bootstrap_osd_key'] }
end
ruby_block "get mds-bootstrap keyring" do
block do
run_out = ""
while run_out.empty?
run_out = Mixlib::ShellOut.new("ceph auth get-key client.bootstrap-mds").run_command.stdout.strip
sleep 2
end
node.override['ceph']['bootstrap_mds_key'] = run_out
node.save
end
not_if { node['ceph']['bootstrap_mds_key'] }
end
73 changes: 54 additions & 19 deletions recipes/osd.rb
Original file line number Diff line number Diff line change
Expand Up @@ -34,21 +34,61 @@
include_recipe "ceph::default"
include_recipe "ceph::conf"

if !node["ceph"]["osd_devices"].nil?
node["ceph"]["osd_devices"].each do |osd_device|
Log.debug("ceph-osd: #{osd_device}")
end
elsif node["ceph"]["osd_autoprepare"]
# set node["ceph"]["osd_autoprepare"] to true to enable automated osd disk
# discovery and preparation
osd_devices = Array.new
node['block_device'].select{|device,info| device =~ /^[hvs]d[^a]$/ and info['size'].to_i > 0}.each do |device,info|
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this regex can be quite a problem...
I don't have a chef server to check, but OHAI don't give you a device type or somethings like this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unfortunately no - ohai gives output like this:

{
  "vda": {
    "size": "52428800",
    "removable": "0",
    "vendor": "0x1af4"
  },
  "vdb": {
    "size": "52428800",
    "removable": "0",
    "vendor": "0x1af4"
  },
  "dm-0": {
    "size": "36962304",
    "removable": "0"
  },
  "dm-1": {
    "size": "2088960",
    "removable": "0"
  }
}

My thinking was that although this regex is pretty liberal, the use of it is optional and the user can always submit a patch to improve its flexibility. For now I'm happy to scratch my own itch. :)

Of course if you can think of a way to improve it right now, then I'll gladly patch it.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could maybe use a loop that look into /sys/block//. If inside this directory you see a . It means that the disks has partitions so we probably do not want to touch it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about this as a strategy?

  1. I'll make the regex an attribute of some kind so that it's easier to override through the end-user's environment.
  2. I'll add another attribute to enable zapping the disks that match the regex, regardless of what's on them. Enabling forced zap is useful in testing environments and also useful when re-purposing disks.
  3. If the forced zap is not enabled, then a check will be done for whether the disk has anything on it before preparing it. If it does, this discovery will reflect in the log as a WARN.

Log.debug("ceph-osd: Candidate Device /dev/#{device} found.")
osd_devices << {"device" => "/dev/#{device}"}
end
Log.debug("ceph-osd: New Candidates = #{osd_devices}")
node.set["ceph"]["osd_devices"] = osd_devices
node.save
else
Log.warn('ceph-osd: No ceph osd_devices have been set and ceph osd_autoprepare not enabled.')
end

package 'gdisk' do
action :upgrade
end

# sometimes there are partitions on the disk that interfere with
# ceph-disk-prepare, so let's make sure there's nothing on each candidate disk
if node["ceph"]["osd_autoprepare"] and !node["ceph"]["osd_devices"].nil?
node["ceph"]["osd_devices"].each do |osd_device|
if osd_device['status'].nil?
ruby_block "ceph-osd: erase #{osd_device['device']} to prepare it as an osd" do
block do
devicewipe = Mixlib::ShellOut.new("sgdisk -oZ #{osd_device['device']}").run_command
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The --zap-disk flag of ceph-disk-prepare does that.
With the current regex to auto select the disks to prepare, I'm not sure doing so would be safe though.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Excellent, I'll refactor the disk preparation to incorporate the disk wipe and preparation in one step. 👍

if devicewipe.error!
raise "ceph-osd: erase of #{osd_device['device']} failed!"
end
end
end
elsif osd_device['status'] == 'deployed'
Log.debug("ceph-osd: Not erasing #{osd_device['device']} as it has already been deployed.")
else
Log.debug("ceph-osd: Not erasing #{osd_device['device']} as it has an unrecognised status.")
end
end
end

if !search(:node,"hostname:#{node['hostname']} AND dmcrypt:true").empty?
package 'cryptsetup' do
action :upgrade
end
end

service_type = node["ceph"]["osd"]["init_style"]
mons = get_mon_nodes("ceph_bootstrap_osd_key:*")
mons = get_mon_nodes("bootstrap_osd_key:*")

if mons.empty? then
puts "No ceph-mon found."
Log.warn("ceph-osd: No ceph osd bootstrap key found.")
else

directory "/var/lib/ceph/bootstrap-osd" do
Expand All @@ -60,7 +100,7 @@
# TODO cluster name
cluster = 'ceph'

execute "format as keyring" do
execute "create the local keyring file" do
command "ceph-authtool '/var/lib/ceph/bootstrap-osd/#{cluster}.keyring' --create-keyring --name=client.bootstrap-osd --add-key='#{mons[0]["ceph"]["bootstrap_osd_key"]}'"
creates "/var/lib/ceph/bootstrap-osd/#{cluster}.keyring"
end
Expand Down Expand Up @@ -107,29 +147,26 @@
unless node["ceph"]["osd_devices"].nil?
node["ceph"]["osd_devices"].each_with_index do |osd_device,index|
if !osd_device["status"].nil?
Log.info("osd: osd_device #{osd_device} has already been setup.")
Log.debug("ceph-osd: osd_device #{osd_device['device']} has already been prepared.")
next
end
dmcrypt = ""
if osd_device["encrypted"] == true
dmcrypt = "--dmcrypt"
end
execute "Creating Ceph OSD on #{osd_device['device']}" do
command "ceph-disk-prepare #{dmcrypt} #{osd_device['device']} #{osd_device['journal']}"
action :run
notifies :create, "ruby_block[save osd_device status]"
end
# we add this status to the node env
# so that we can implement recreate
# and/or delete functionalities in the
# future.
ruby_block "save osd_device status" do

ruby_block "ceph-osd: create osd on #{osd_device['device']}" do
block do
node.normal["ceph"]["osd_devices"][index]["status"] = "deployed"
node.save
deviceprep = Mixlib::ShellOut.new("ceph-disk-prepare #{dmcrypt} #{osd_device['device']} #{osd_device['journal']}").run_command
if deviceprep.error!
raise "ceph-osd: osd creation on #{osd_device['device']} failed!"
else
node.set["ceph"]["osd_devices"][index]["status"] = "deployed"
node.save
end
end
action :nothing
end

end
service "ceph_osd" do
case service_type
Expand All @@ -142,8 +179,6 @@
action [ :enable, :start ]
supports :restart => true
end
else
Log.info('node["ceph"]["osd_devices"] empty')
end
end
end
33 changes: 33 additions & 0 deletions recipes/pools.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
#
# Cookbook Name:: ceph
# Recipe:: pools
#
# Copyright 2013, Business Connexion (Pty) Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

# This recipe uses a LWRP to setup pools defined in the environment

if !node["ceph"]["pools"].nil?
node["ceph"]["pools"].each do |pool|
Log.debug("ceph-pool: #{pool}")
ceph_manage_pool pool["name"] do
pg_num pool["pg_num"]
pgp_num pool["pgp_num"]
min_size pool["min_size"]
action :create
not_if "ceph osd lspools | grep #{pool["name"]}"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

With grep, you may have false positive, for instance if a pool foobar exist and you want to create foo.

Re-running ceph osd pool create is safe, or you could test if the pool exists with, for instance, ceph osd pool name get size. If this doesn't return ENOENT, the pool exists.

end
end
end
Loading