diff --git a/Puppetfile b/Puppetfile index c14773750..e7481c88c 100644 --- a/Puppetfile +++ b/Puppetfile @@ -42,6 +42,10 @@ mod 'gluster', :commit => '6c962083d8b100dcaeb6f11dbe61e6071f3d13f0', :git => 'https://github.com/purpleidea/puppet-gluster.git' +mod 'gnocchi', + :commit => '9d7e2042282a893da08aa4715a220516036fcc1e', + :git => 'https://github.com/stackforge/puppet-gnocchi.git' + mod 'haproxy', :commit => 'f381510e940ee11feb044c1c728ba2e5af807c79', :git => 'https://github.com/puppetlabs/puppetlabs-haproxy.git' @@ -118,6 +122,10 @@ mod 'ntp', :commit => '8f697e32bc279b36ada752273e6c788716b95315', :git => 'https://github.com/puppetlabs/puppetlabs-ntp' +mod 'openstack_extras', + :commit => '9693efeb878b305bd00e7ef98cb25e1a874d21bf', + :git => 'https://github.com/stackforge/puppet-openstack_extras.git' + mod 'openstacklib', :commit => 'b7d3c8eaaf47ffbddb50cd942f8654e1aa8fe2f2', :git => 'https://github.com/stackforge/puppet-openstacklib.git' @@ -178,6 +186,10 @@ mod 'timezone', :commit => 'e82cd1b32f395217056df492e5a7dac8dc5c683a', :git => 'https://github.com/saz/puppet-timezone.git' +mod 'tripleo', + :commit => '045e853acac8439d00fcd750928342af721582ad', + :git => 'https://github.com/stackforge/puppet-tripleo.git' + mod 'trove', :commit => 'c345e128c7fec585acf00b5a275a78cd1ae89f03', :git => 'https://github.com/stackforge/puppet-trove' @@ -197,3 +209,4 @@ mod 'vswitch', mod 'xinetd', :commit => '6b02de8d4f30a819eb404048e4258e3a5e8023c8', :git => 'https://github.com/puppetlabs/puppetlabs-xinetd.git' + diff --git a/gnocchi/.fixtures.yml b/gnocchi/.fixtures.yml new file mode 100644 index 000000000..fe0c152a8 --- /dev/null +++ b/gnocchi/.fixtures.yml @@ -0,0 +1,11 @@ +fixtures: + repositories: + 'inifile': 'git://github.com/puppetlabs/puppetlabs-inifile' + 'concat': 'git://github.com/puppetlabs/puppetlabs-concat.git' + 'keystone': 'git://github.com/stackforge/puppet-keystone.git' + 'mysql': 'git://github.com/puppetlabs/puppetlabs-mysql.git' + 'openstacklib': 'git://github.com/stackforge/puppet-openstacklib.git' + 'postgresql': 'git://github.com/puppetlabs/puppet-postgresql.git' + 'stdlib': 'git://github.com/puppetlabs/puppetlabs-stdlib.git' + symlinks: + 'gnocchi': "#{source_dir}" diff --git a/gnocchi/.gitignore b/gnocchi/.gitignore new file mode 100644 index 000000000..da4238187 --- /dev/null +++ b/gnocchi/.gitignore @@ -0,0 +1,7 @@ +*.swp +spec/fixtures/modules/* +spec/fixtures/manifests/site.pp +Gemfile.lock +.vendor +.bundle/ +vendor/ diff --git a/gnocchi/.gitreview b/gnocchi/.gitreview new file mode 100644 index 000000000..ad856333a --- /dev/null +++ b/gnocchi/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=stackforge/puppet-gnocchi.git diff --git a/gnocchi/Gemfile b/gnocchi/Gemfile new file mode 100644 index 000000000..e4c078a33 --- /dev/null +++ b/gnocchi/Gemfile @@ -0,0 +1,19 @@ +source 'https://rubygems.org' + +group :development, :test do + gem 'puppetlabs_spec_helper', :require => false + gem 'puppet-lint', '~> 1.1' + gem 'puppet-lint-param-docs', '1.1.0' + gem 'rake', '10.1.1' + gem 'rspec', '< 2.99' + gem 'json' + gem 'webmock' +end + +if puppetversion = ENV['PUPPET_GEM_VERSION'] + gem 'puppet', puppetversion, :require => false +else + gem 'puppet', :require => false +end + +# vim:ft=ruby diff --git a/gnocchi/LICENSE b/gnocchi/LICENSE new file mode 100644 index 000000000..68c771a09 --- /dev/null +++ b/gnocchi/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/gnocchi/README.md b/gnocchi/README.md new file mode 100644 index 000000000..6c98a4ef8 --- /dev/null +++ b/gnocchi/README.md @@ -0,0 +1,55 @@ +puppet-gnocchi +============== + +#### Table of Contents + +1. [Overview - What is the gnocchi module?](#overview) +2. [Module Description - What does the module do?](#module-description) +3. [Setup - The basics of getting started with gnocchi](#setup) +4. [Implementation - An under-the-hood peek at what the module is doing](#implementation) +5. [Limitations - OS compatibility, etc.](#limitations) +6. [Development - Guide for contributing to the module](#development) +7. [Contributors - Those with commits](#contributors) +8. [Release Notes - Notes on the most recent updates to the module](#release-notes) + +Overview +-------- + +The gnocchi module is a part of [Stackforge](https://github.com/stackforge), an effort by the Openstack infrastructure team to provide continuous integration testing and code review for Openstack and Openstack community projects not part of the core software. The module itself is used to flexibly configure and manage the management service for Openstack. + +Module Description +------------------ + +Setup +----- + +**What the gnocchi module affects:** + +* gnocchi, the HTTP API to store metrics and index resources for OpenStack + (OpenStack Datapoint Service). + +Implementation +-------------- + +### gnocchi + +gnocchi is a combination of Puppet manifest and ruby code to delivery configuration and extra functionality through types and providers. + +Limitations +----------- + +Development +----------- + +Developer documentation for the entire puppet-openstack project. + +* https://wiki.openstack.org/wiki/Puppet-openstack#Developer_documentation + +Contributors +------------ + +* https://github.com/stackforge/puppet-gnocchi/graphs/contributors + +Release Notes +------------- + diff --git a/gnocchi/Rakefile b/gnocchi/Rakefile new file mode 100644 index 000000000..84c9a7046 --- /dev/null +++ b/gnocchi/Rakefile @@ -0,0 +1,9 @@ +require 'puppetlabs_spec_helper/rake_tasks' +require 'puppet-lint/tasks/puppet-lint' + +PuppetLint.configuration.fail_on_warnings = true +PuppetLint.configuration.send('disable_80chars') +PuppetLint.configuration.send('disable_class_parameter_defaults') + +task(:default).clear +task :default => [:spec, :lint] diff --git a/gnocchi/examples/site.pp b/gnocchi/examples/site.pp new file mode 100644 index 000000000..523783c91 --- /dev/null +++ b/gnocchi/examples/site.pp @@ -0,0 +1,19 @@ +# This is an example of site.pp to deploy Gnocchi + +class { 'gnocchi::keystone::auth': + admin_address => '10.0.0.1', + internal_address => '10.0.0.1', + public_address => '10.0.0.1', + password => 'verysecrete', + region => 'OpenStack' +} + +class { 'gnocchi': + database_connection => 'mysql://gnocchi:secrete@10.0.0.1/gnocchi?charset=utf8', +} + +class { 'gnocchi::api': + bind_host => '10.0.0.1', + identity_uri => 'https://identity.openstack.org:35357', + keystone_password => 'verysecrete' +} diff --git a/gnocchi/lib/puppet/provider/gnocchi.rb b/gnocchi/lib/puppet/provider/gnocchi.rb new file mode 100644 index 000000000..f96982b1d --- /dev/null +++ b/gnocchi/lib/puppet/provider/gnocchi.rb @@ -0,0 +1,113 @@ +require 'json' +require 'puppet/util/inifile' + +class Puppet::Provider::Gnocchi < Puppet::Provider + + def self.conf_filename + '/etc/gnocchi/gnocchi.conf' + end + + def self.withenv(hash, &block) + saved = ENV.to_hash + hash.each do |name, val| + ENV[name.to_s] = val + end + + yield + ensure + ENV.clear + saved.each do |name, val| + ENV[name] = val + end + end + + def self.gnocchi_credentials + @gnocchi_credentials ||= get_gnocchi_credentials + end + + def self.get_gnocchi_credentials + auth_keys = ['auth_host', 'auth_port', 'auth_protocol', + 'admin_tenant_name', 'admin_user', 'admin_password'] + conf = gnocchi_conf + if conf and conf['keystone_authtoken'] and + auth_keys.all?{|k| !conf['keystone_authtoken'][k].nil?} + return Hash[ auth_keys.map \ + { |k| [k, conf['keystone_authtoken'][k].strip] } ] + else + raise(Puppet::Error, "File: #{conf_filename} does not contain all \ +required sections. Gnocchi types will not work if gnocchi is not \ +correctly configured.") + end + end + + def gnocchi_credentials + self.class.gnocchi_credentials + end + + def self.auth_endpoint + @auth_endpoint ||= get_auth_endpoint + end + + def self.get_auth_endpoint + q = gnocchi_credentials + "#{q['auth_protocol']}://#{q['auth_host']}:#{q['auth_port']}/v2.0/" + end + + def self.gnocchi_conf + return @gnocchi_conf if @gnocchi_conf + @gnocchi_conf = Puppet::Util::IniConfig::File.new + @gnocchi_conf.read(conf_filename) + @gnocchi_conf + end + + def self.auth_gnocchi(*args) + q = gnocchi_credentials + authenv = { + :OS_AUTH_URL => self.auth_endpoint, + :OS_USERNAME => q['admin_user'], + :OS_TENANT_NAME => q['admin_tenant_name'], + :OS_PASSWORD => q['admin_password'] + } + begin + withenv authenv do + gnocchi(args) + end + rescue Exception => e + if (e.message =~ /\[Errno 111\] Connection refused/) or + (e.message =~ /\(HTTP 400\)/) + sleep 10 + withenv authenv do + gnocchi(args) + end + else + raise(e) + end + end + end + + def auth_gnocchi(*args) + self.class.auth_gnocchi(args) + end + + def gnocchi_manage(*args) + cmd = args.join(" ") + output = `#{cmd}` + $?.exitstatus + end + + def self.reset + @gnocchi_conf = nil + @gnocchi_credentials = nil + end + + def self.list_gnocchi_resources(type, *args) + json = auth_gnocchi("--json", "#{type}-list", *args) + return JSON.parse(json) + end + + def self.get_gnocchi_resource_attrs(type, id) + json = auth_gnocchi("--json", "#{type}-show", id) + return JSON.parse(json) + end + +end diff --git a/gnocchi/lib/puppet/provider/gnocchi_config/ini_setting.rb b/gnocchi/lib/puppet/provider/gnocchi_config/ini_setting.rb new file mode 100644 index 000000000..0fee4b125 --- /dev/null +++ b/gnocchi/lib/puppet/provider/gnocchi_config/ini_setting.rb @@ -0,0 +1,27 @@ +Puppet::Type.type(:gnocchi_config).provide( + :ini_setting, + :parent => Puppet::Type.type(:ini_setting).provider(:ruby) +) do + + def section + resource[:name].split('/', 2).first + end + + def setting + resource[:name].split('/', 2).last + end + + def separator + '=' + end + + def self.file_path + '/etc/gnocchi/gnocchi.conf' + end + + # added for backwards compatibility with older versions of inifile + def file_path + self.class.file_path + end + +end diff --git a/gnocchi/lib/puppet/type/gnocchi_config.rb b/gnocchi/lib/puppet/type/gnocchi_config.rb new file mode 100644 index 000000000..9fbf10c0f --- /dev/null +++ b/gnocchi/lib/puppet/type/gnocchi_config.rb @@ -0,0 +1,42 @@ +Puppet::Type.newtype(:gnocchi_config) do + + ensurable + + newparam(:name, :namevar => true) do + desc 'Section/setting name to manage from /etc/gnocchi/gnocchi.conf' + newvalues(/\S+\/\S+/) + end + + newproperty(:value) do + desc 'The value of the setting to be defined.' + munge do |value| + value = value.to_s.strip + value.capitalize! if value =~ /^(true|false)$/i + value + end + + def is_to_s( currentvalue ) + if resource.secret? + return '[old secret redacted]' + else + return currentvalue + end + end + + def should_to_s( newvalue ) + if resource.secret? + return '[new secret redacted]' + else + return newvalue + end + end + end + + newparam(:secret, :boolean => true) do + desc 'Whether to hide the value from Puppet logs. Defaults to `false`.' + + newvalues(:true, :false) + + defaultto false + end +end diff --git a/gnocchi/manifests/api.pp b/gnocchi/manifests/api.pp new file mode 100644 index 000000000..5a46cf6a2 --- /dev/null +++ b/gnocchi/manifests/api.pp @@ -0,0 +1,177 @@ +# +# Copyright (C) 2014 eNovance SAS +# +# Author: Emilien Macchi +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# == Class gnocchi::api +# +# Configure API service in gnocchi +# +# == Parameters +# +# [*manage_service*] +# (optional) Whether to start/stop the service +# Defaults to true +# +# [*ensure_package*] +# (optional) Whether the gnocchi api package will be installed +# Defaults to 'present' +# +# [*keystone_password*] +# (required) Password used to authentication. +# +# [*verbose*] +# (optional) Rather to log the gnocchi api service at verbose level. +# Default: false +# +# [*debug*] +# (optional) Rather to log the gnocchi api service at debug level. +# Default: false +# +# [*log_file*] +# (optional) The path of file used for logging +# If set to boolean false, it will not log to any file. +# Default: /var/log/gnocchi/gnocchi-api.log +# +# [*log_dir*] +# (optional) directory to which gnocchi logs are sent. +# If set to boolean false, it will not log to any directory. +# Defaults to '/var/log/gnocchi' +# +# [*keystone_tenant*] +# (optional) Tenant to authenticate to. +# Defaults to services. +# +# [*keystone_user*] +# (optional) User to authenticate as with keystone. +# Defaults to 'gnocchi'. + +# [*enabled*] +# (optional) Whether to enable services. +# Defaults to true. +# +# [*use_syslog*] +# (optional) Use syslog for logging. +# Defaults to false. +# +# [*log_facility*] +# (optional) Syslog facility to receive log lines. +# Defaults to 'LOG_USER'. +# +# [*purge_config*] +# (optional) Whether to set only the specified config options +# in the api config. +# Defaults to false. +# +# [*identity_uri*] +# (optional) Complete admin Identity API endpoint. +# Defaults to 'http://127.0.0.1:35357'. +# +class gnocchi::api( + $keystone_password, + $verbose = false, + $debug = false, + $log_file = '/var/log/gnocchi/gnocchi-api.log', + $log_dir = '/var/log/gnocchi', + $keystone_tenant = 'services', + $keystone_user = 'gnocchi', + $identity_uri = 'http://127.0.0.1:35357', + $enabled = true, + $use_syslog = false, + $log_facility = 'LOG_USER', + $purge_config = false, + $manage_service = true, + $ensure_package = 'present', +) inherits gnocchi { + + require keystone::python + include gnocchi::params + + Gnocchi_config<||> ~> Exec['post-gnocchi_config'] + Gnocchi_config<||> ~> Service['gnocchi-api'] + Package['gnocchi-api'] -> Gnocchi_config<||> + + if $::gnocchi::database_connection { + if($::gnocchi::database_connection =~ /mysql:\/\/\S+:\S+@\S+\/\S+/) { + require 'mysql::bindings' + require 'mysql::bindings::python' + } elsif($::gnocchi::database_connection =~ /postgresql:\/\/\S+:\S+@\S+\/\S+/) { + + } elsif($::gnocchi::database_connection =~ /sqlite:\/\//) { + + } else { + fail("Invalid db connection ${::gnocchi::database_connection}") + } + gnocchi_config { + 'database/sql_connection': value => $::gnocchi::database_connection, secret => true; + 'database/sql_idle_timeout': value => $::gnocchi::database_idle_timeoutl; + } + } + + # basic service config + gnocchi_config { + 'DEFAULT/verbose': value => $verbose; + 'DEFAULT/debug': value => $debug; + 'keystone_authtoken/identity_uri': value => $identity_uri; + 'keystone_authtoken/admin_user': value => $keystone_user; + 'keystone_authtoken/admin_password': value => $keystone_password, secret => true; + 'keystone_authtoken/admin_tenant_name': value => $keystone_tenant; + } + + # Logging + if $log_file { + gnocchi_config { + 'DEFAULT/log_file': value => $log_file; + } + } else { + gnocchi_config { + 'DEFAULT/log_file': ensure => absent; + } + } + + if $log_dir { + gnocchi_config { + 'DEFAULT/log_dir': value => $log_dir; + } + } else { + gnocchi_config { + 'DEFAULT/log_dir': ensure => absent; + } + } + + # Syslog + if $use_syslog { + gnocchi_config { + 'DEFAULT/use_syslog' : value => true; + 'DEFAULT/syslog_log_facility' : value => $log_facility; + } + } else { + gnocchi_config { + 'DEFAULT/use_syslog': value => false; + } + } + + resources { 'gnocchi_config': + purge => $purge_config, + } + + gnocchi::generic_service { 'api': + enabled => $enabled, + manage_service => $manage_service, + ensure_package => $ensure_package, + package_name => $::gnocchi::params::api_package_name, + service_name => $::gnocchi::params::api_service_name, + } +} diff --git a/gnocchi/manifests/db/mysql.pp b/gnocchi/manifests/db/mysql.pp new file mode 100644 index 000000000..44946643c --- /dev/null +++ b/gnocchi/manifests/db/mysql.pp @@ -0,0 +1,76 @@ +# +# Copyright (C) 2014 eNovance SAS +# +# Author: Emilien Macchi +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# == Class: gnocchi::db::mysql +# +# The gnocchi::db::mysql class creates a MySQL database for gnocchi. +# It must be used on the MySQL server +# +# === Parameters +# +# [*password*] +# (required) Password that will be used for the gnocchi db user. +# +# [*dbname*] +# (optional) Name of gnocchi database. +# Defaults to gnocchi +# +# [*user*] +# (optional) Name of gnocchi user. +# Defaults to gnocchi +# +# [*host*] +# (optional) Host where user should be allowed all privileges for database. +# Defaults to 127.0.0.1 +# +# [*allowed_hosts*] +# (optional) Hosts allowed to use the database +# Defaults to undef. +# +# [*charset*] +# (optional) Charset of gnocchi database +# Defaults 'utf8'. +# +# [*collate*] +# (optional) Charset collate of gnocchi database +# Defaults 'utf8_unicode_ci'. +# +class gnocchi::db::mysql( + $password, + $dbname = 'gnocchi', + $user = 'gnocchi', + $host = '127.0.0.1', + $allowed_hosts = undef, + $charset = 'utf8', + $collate = 'utf8_unicode_ci', + $mysql_module = undef, +) { + + validate_string($password) + + ::openstacklib::db::mysql { 'gnocchi': + user => $user, + password_hash => mysql_password($password), + dbname => $dbname, + host => $host, + charset => $charset, + collate => $collate, + allowed_hosts => $allowed_hosts, + } + + ::Openstacklib::Db::Mysql['gnocchi'] ~> Exec<| title == 'gnocchi-dbsync' |> +} diff --git a/gnocchi/manifests/db/postgresql.pp b/gnocchi/manifests/db/postgresql.pp new file mode 100644 index 000000000..ca31d5542 --- /dev/null +++ b/gnocchi/manifests/db/postgresql.pp @@ -0,0 +1,47 @@ +# == Class: gnocchi::db::postgresql +# +# Class that configures postgresql for gnocchi +# Requires the Puppetlabs postgresql module. +# +# === Parameters +# +# [*password*] +# (Required) Password to connect to the database. +# +# [*dbname*] +# (Optional) Name of the database. +# Defaults to 'gnocchi'. +# +# [*user*] +# (Optional) User to connect to the database. +# Defaults to 'gnocchi'. +# +# [*encoding*] +# (Optional) The charset to use for the database. +# Default to undef. +# +# [*privileges*] +# (Optional) Privileges given to the database user. +# Default to 'ALL' +# +class gnocchi::db::postgresql( + $password, + $dbname = 'gnocchi', + $user = 'gnocchi', + $encoding = undef, + $privileges = 'ALL', +) { + + Class['gnocchi::db::postgresql'] -> Service<| title == 'gnocchi' |> + + ::openstacklib::db::postgresql { 'gnocchi': + password_hash => postgresql_password($user, $password), + dbname => $dbname, + user => $user, + encoding => $encoding, + privileges => $privileges, + } + + ::Openstacklib::Db::Postgresql['gnocchi'] ~> Exec<| title == 'gnocchi-dbsync' |> + +} diff --git a/gnocchi/manifests/db/sync.pp b/gnocchi/manifests/db/sync.pp new file mode 100644 index 000000000..3b75869e4 --- /dev/null +++ b/gnocchi/manifests/db/sync.pp @@ -0,0 +1,28 @@ +# +# Copyright (C) 2014 eNovance SAS +# +# Author: Emilien Macchi +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Class to execute "gnocchi-dbsync" +# +class gnocchi::db::sync { + exec { 'gnocchi-dbsync': + path => '/usr/bin', + user => 'gnocchi', + refreshonly => true, + subscribe => [Package['gnocchi'], Keystone_config['database/connection']], + require => User['gnocchi'], + } +} diff --git a/gnocchi/manifests/generic_service.pp b/gnocchi/manifests/generic_service.pp new file mode 100644 index 000000000..91e348d4b --- /dev/null +++ b/gnocchi/manifests/generic_service.pp @@ -0,0 +1,71 @@ +# +# Copyright (C) 2014 eNovance SAS +# +# Author: Emilien Macchi +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# == Define: gnocchi::generic_service +# +# This defined type implements basic gnocchi services. +# It is introduced to attempt to consolidate +# common code. +# +# It also allows users to specify ad-hoc services +# as needed +# +# This define creates a service resource with title gnocchi-${name} and +# conditionally creates a package resource with title gnocchi-${name} +# +define gnocchi::generic_service( + $package_name, + $service_name, + $enabled = false, + $manage_service = true, + $ensure_package = 'present' +) { + + include gnocchi::params + include gnocchi::db::sync + + $gnocchi_title = "gnocchi-${name}" + Exec['post-gnocchi_config'] ~> Service<| title == $gnocchi_title |> + Exec<| title == 'gnocchi-dbsync' |> ~> Service<| title == $gnocchi_title |> + + if ($package_name) { + if !defined(Package[$package_name]) { + package { $gnocchi_title: + ensure => $ensure_package, + name => $package_name, + notify => Service[$gnocchi_title], + } + } + } + + if $service_name { + if $manage_service { + if $enabled { + $service_ensure = 'running' + } else { + $service_ensure = 'stopped' + } + } + + service { $gnocchi_title: + ensure => $service_ensure, + name => $service_name, + enable => $enabled, + hasstatus => true, + } + } +} diff --git a/gnocchi/manifests/init.pp b/gnocchi/manifests/init.pp new file mode 100644 index 000000000..031d2b9ee --- /dev/null +++ b/gnocchi/manifests/init.pp @@ -0,0 +1,43 @@ +# +# Copyright (C) 2014 eNovance SAS +# +# Author: Emilien Macchi +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# gnocchi::init +# +# Gnocchi base config +# +# == Parameters +# +# [*database_connection*] +# (optional) Connection url to connect to gnocchi database. +# Defaults to 'sqlite:////var/lib/gnocchi/gnocchi.sqlite' +# +# [*database_idle_timeout*] +# (optional) Timeout before idle db connections are reaped. +# Defaults to 3600 +# +class gnocchi( + $database_connection = 'sqlite:////var/lib/gnocchi/gnocchi.sqlite', + $database_idle_timeout = 3600, +) { + include gnocchi::params + + exec { 'post-gnocchi_config': + command => '/bin/echo "Gnocchi config has changed"', + refreshonly => true, + } + +} diff --git a/gnocchi/manifests/keystone/auth.pp b/gnocchi/manifests/keystone/auth.pp new file mode 100644 index 000000000..cb010d9d6 --- /dev/null +++ b/gnocchi/manifests/keystone/auth.pp @@ -0,0 +1,121 @@ +# +# Copyright (C) 2014 eNovance SAS +# +# Author: Emilien Macchi +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# gnocchi::keystone::auth +# +# Configures Gnocchi user, service and endpoint in Keystone. +# +# === Parameters +# +# [*password*] +# (required) Password for Gnocchi user. +# +# [*auth_name*] +# Username for Gnocchi service. Defaults to 'gnocchi'. +# +# [*email*] +# Email for Gnocchi user. Defaults to 'gnocchi@localhost'. +# +# [*tenant*] +# Tenant for Gnocchi user. Defaults to 'services'. +# +# [*configure_endpoint*] +# Should Gnocchi endpoint be configured? Defaults to 'true'. +# +# [*configure_user*] +# Should Gnocchi user be configured? Defaults to 'true'. +# +# [*configure_user_role*] +# Should Gnocchi user/role association be configured? Defaults to 'true'. +# +# [*service_type*] +# Type of service. Defaults to 'gnocchi'. +# +# [*public_protocol*] +# Protocol for public endpoint. Defaults to 'http'. +# +# [*public_address*] +# Public address for endpoint. Defaults to '127.0.0.1'. +# +# [*public_port*] +# Port for public endpoint. +# Defaults to '8041'. +# +# [*admin_protocol*] +# Protocol for admin endpoint. Defaults to 'http'. +# +# [*admin_address*] +# Admin address for endpoint. Defaults to '127.0.0.1'. +# +# [*admin_port*] +# Port for admin endpoint. +# Defaults to '8041'. +# +# [*internal_protocol*] +# Protocol for internal endpoint. Defaults to 'http'. +# +# [*internal_address*] +# Internal address for endpoint. Defaults to '127.0.0.1'. +# +# [*internal_port*] +# Port for internal endpoint. +# Defaults to '8041'. +# +# [*region*] +# Region for endpoint. Defaults to 'RegionOne'. +# +# +class gnocchi::keystone::auth ( + $password, + $auth_name = 'gnocchi', + $email = 'gnocchi@localhost', + $tenant = 'services', + $configure_endpoint = true, + $configure_user = true, + $configure_user_role = true, + $service_type = 'gnocchi', + $public_protocol = 'http', + $public_address = '127.0.0.1', + $public_port = '8041', + $admin_protocol = 'http', + $admin_address = '127.0.0.1', + $admin_port = '8041', + $internal_protocol = 'http', + $internal_address = '127.0.0.1', + $internal_port = '8041', + $region = 'RegionOne' +) { + + Keystone_user_role["${auth_name}@${tenant}"] ~> Service <| name == 'gnocchi-api' |> + Keystone_endpoint["${region}/${auth_name}"] ~> Service <| name == 'gnocchi-api' |> + + keystone::resource::service_identity { $auth_name: + configure_user => true, + configure_user_role => true, + configure_endpoint => $configure_endpoint, + service_type => $service_type, + service_description => 'OpenStack Datapoint Service', + region => $region, + password => $password, + email => $email, + tenant => $tenant, + public_url => "${public_protocol}://${public_address}:${public_port}", + internal_url => "${internal_protocol}://${internal_address}:${internal_port}", + admin_url => "${admin_protocol}://${admin_address}:${admin_port}", + } + +} diff --git a/gnocchi/manifests/params.pp b/gnocchi/manifests/params.pp new file mode 100644 index 000000000..ddf81248b --- /dev/null +++ b/gnocchi/manifests/params.pp @@ -0,0 +1,19 @@ +# Parameters for puppet-gnocchi +# +class gnocchi::params { + + case $::osfamily { + 'RedHat': { + $api_package_name = 'openstack-gnocchi-api' + $api_service_name = 'openstack-gnocchi-api' + } + 'Debian': { + $api_package_name = 'gnocchi-api' + $api_service_name = 'gnocchi-api' + } + default: { + fail("Unsupported osfamily: ${::osfamily} operatingsystem") + } + + } # Case $::osfamily +} diff --git a/gnocchi/manifests/storage/file.pp b/gnocchi/manifests/storage/file.pp new file mode 100644 index 000000000..97014256c --- /dev/null +++ b/gnocchi/manifests/storage/file.pp @@ -0,0 +1,37 @@ +# +# Copyright (C) 2014 eNovance SAS +# +# Author: Emilien Macchi +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# gnocchi::storage::file +# +# File driver for Gnocchi +# +# == Parameters +# +# [*file_basepath*] +# (optional) Path used to store gnocchi data files. +# Defaults to '/var/lib/gnocchi'. +# +class gnocchi::storage::file( + $file_basepath = '/var/lib/gnocchi', +) { + + gnocchi_config { + 'storage/driver': value => 'file'; + 'storage/file_basepath': value => $file_basepath; + } + +} diff --git a/gnocchi/manifests/storage/swift.pp b/gnocchi/manifests/storage/swift.pp new file mode 100644 index 000000000..4aec6872d --- /dev/null +++ b/gnocchi/manifests/storage/swift.pp @@ -0,0 +1,61 @@ +# +# Copyright (C) 2014 eNovance SAS +# +# Author: Emilien Macchi +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# gnocchi::storage::swift +# +# Swift driver for Gnocchi +# +# == Parameters +# +# [*swift_auth_version*] +# (optional) 'Swift authentication version to user. +# Defaults to '1'. +# +# [*swift_authurl*] +# (optional) Swift auth URL. +# Defaults to 'http://localhost:8080/auth/v1.0'. +# +# [*swift_user*] +# (optional) Swift user. +# Defaults to 'admin:admin' +# +# [*swift_key*] +# (optional) Swift key. +# Defaults to 'admin' +# +# [*swift_tenant_name*] +# (optional) Swift tenant name, only used if swift_auth_version is '2'. +# Defaults to undef +# +class gnocchi::storage::swift( + $swift_auth_version = '1', + $swift_authurl = 'http://localhost:8080/auth/v1.0', + $swift_user = 'admin:admin', + $swift_key = 'admin', + $swift_tenant_name = undef, +) { + + gnocchi_config { + 'storage/driver': value => 'swift'; + 'storage/swift_user': value => $swift_user; + 'storage/swift_key': value => $swift_key; + 'storage/swift_tenant_name': value => $swift_tenant_name; + 'storage/swift_auth_version': value => $swift_auth_version; + 'storage/swift_authurl': value => $swift_authurl; + } + +} diff --git a/gnocchi/metadata.json b/gnocchi/metadata.json new file mode 100644 index 000000000..65d4fedb2 --- /dev/null +++ b/gnocchi/metadata.json @@ -0,0 +1,39 @@ +{ + "name": "stackforge-gnocchi", + "version": "5.0.0", + "author": "eNovance and StackForge Contributors", + "summary": "Puppet module for OpenStack Gnocchi", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-gnocchi.git", + "project_page": "https://launchpad.net/puppet-gnocchi", + "issues_url": "https://bugs.launchpad.net/puppet-gnocchi", + "requirements": [ + { "name": "pe","version_requirement": "3.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": ["8"] + }, + { + "operatingsystem": "Fedora", + "operatingsystemrelease": ["20"] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": ["7"] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": ["14.04"] + } + ], + "description": "Installs and configures OpenStack Gnocchi (Metric & index storage API).", + "dependencies": [ + { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, + { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0" } + ] +} diff --git a/gnocchi/spec/classes/gnocchi_api_spec.rb b/gnocchi/spec/classes/gnocchi_api_spec.rb new file mode 100644 index 000000000..619cafd1f --- /dev/null +++ b/gnocchi/spec/classes/gnocchi_api_spec.rb @@ -0,0 +1,87 @@ +# +# Unit tests for gnocchi::api +# +require 'spec_helper' + +describe 'gnocchi::api' do + + let :params do + { :keystone_password => 'passw0rd', + :keystone_user => 'gnocchi', + :identity_uri => 'https://identity.os.net:5000', + :keystone_tenant => '_services_', + } + end + + shared_examples 'gnocchi-api' do + + context 'with default parameters' do + + it 'installs gnocchi-api package and service' do + should contain_service('gnocchi-api').with( + :name => platform_params[:api_service_name], + :ensure => 'running', + :hasstatus => true, + :enable => true + ) + should contain_package('gnocchi-api').with( + :name => platform_params[:api_package_name], + :ensure => 'present', + :notify => 'Service[gnocchi-api]' + ) + end + + it 'configures gnocchi-api with default parameters' do + should contain_gnocchi_config('DEFAULT/verbose').with_value(false) + should contain_gnocchi_config('DEFAULT/debug').with_value(false) + should contain_gnocchi_config('keystone_authtoken/identity_uri').with_value(params[:identity_uri]) + should contain_gnocchi_config('keystone_authtoken/admin_tenant_name').with_value(params[:keystone_tenant]) + should contain_gnocchi_config('keystone_authtoken/admin_user').with_value(params[:keystone_user]) + should contain_gnocchi_config('keystone_authtoken/admin_password').with_value(params[:keystone_password]) + should contain_gnocchi_config('keystone_authtoken/admin_password').with_value(params[:keystone_password]).with_secret(true) + end + + context 'when using MySQL' do + let :pre_condition do + "class { 'gnocchi': + database_connection => 'mysql://gnocchi:pass@10.0.0.1/gnocchi'}" + end + it 'configures gnocchi-api with RabbitMQ' do + should contain_gnocchi_config('database/sql_connection').with_value('mysql://gnocchi:pass@10.0.0.1/gnocchi') + should contain_gnocchi_config('database/sql_connection').with_value('mysql://gnocchi:pass@10.0.0.1/gnocchi').with_secret(true) + end + end + end + end + + context 'on Debian platforms' do + let :facts do + { + :osfamily => 'Debian' + } + end + + let :platform_params do + { :api_package_name => 'gnocchi-api', + :api_service_name => 'gnocchi-api' } + end + + it_configures 'gnocchi-api' + end + + context 'on RedHat platforms' do + let :facts do + { + :osfamily => 'RedHat' + } + end + + let :platform_params do + { :api_package_name => 'openstack-gnocchi-api', + :api_service_name => 'openstack-gnocchi-api' } + end + + it_configures 'gnocchi-api' + end + +end diff --git a/gnocchi/spec/classes/gnocchi_db_mysql_spec.rb b/gnocchi/spec/classes/gnocchi_db_mysql_spec.rb new file mode 100644 index 000000000..d4814ab60 --- /dev/null +++ b/gnocchi/spec/classes/gnocchi_db_mysql_spec.rb @@ -0,0 +1,95 @@ +# +# Unit tests for gnocchi::db::mysql +# +require 'spec_helper' + +describe 'gnocchi::db::mysql' do + + let :pre_condition do + [ + 'include mysql::server', + 'include gnocchi::db::sync' + ] + end + + let :params do + { :dbname => 'gnocchi', + :password => 's3cr3t', + :user => 'gnocchi', + :charset => 'utf8', + :collate => 'utf8_unicode_ci', + :host => '127.0.0.1', + } + end + + shared_examples_for 'gnocchi mysql database' do + + context 'when omiting the required parameter password' do + before { params.delete(:password) } + it { expect { should raise_error(Puppet::Error) } } + end + + it 'creates a mysql database' do + should contain_openstacklib__db__mysql('gnocchi').with( + :user => params[:user], + :dbname => params[:dbname], + :password_hash => '*58C036CDA51D8E8BBBBF2F9EA5ABF111ADA444F0', + :host => params[:host], + :charset => params[:charset] + ) + end + + context 'overriding allowed_hosts param to array' do + before :each do + params.merge!( + :allowed_hosts => ['127.0.0.1','%'] + ) + end + + it { + should contain_openstacklib__db__mysql('gnocchi').with( + :user => params[:user], + :dbname => params[:dbname], + :password_hash => '*58C036CDA51D8E8BBBBF2F9EA5ABF111ADA444F0', + :host => params[:host], + :charset => params[:charset], + :allowed_hosts => ['127.0.0.1','%'] + )} + end + + context 'overriding allowed_hosts param to string' do + before :each do + params.merge!( + :allowed_hosts => '192.168.1.1' + ) + end + + it { + should contain_openstacklib__db__mysql('gnocchi').with( + :user => params[:user], + :dbname => params[:dbname], + :password_hash => '*58C036CDA51D8E8BBBBF2F9EA5ABF111ADA444F0', + :host => params[:host], + :charset => params[:charset], + :allowed_hosts => '192.168.1.1' + )} + end + + end + + context 'on Debian platforms' do + let :facts do + { :osfamily => 'Debian' } + end + + it_configures 'gnocchi mysql database' + end + + context 'on RedHat platforms' do + let :facts do + { :osfamily => 'RedHat' } + end + + it_configures 'gnocchi mysql database' + end +end diff --git a/gnocchi/spec/classes/gnocchi_db_postgresql_spec.rb b/gnocchi/spec/classes/gnocchi_db_postgresql_spec.rb new file mode 100644 index 000000000..c1f958ad5 --- /dev/null +++ b/gnocchi/spec/classes/gnocchi_db_postgresql_spec.rb @@ -0,0 +1,58 @@ +require 'spec_helper' + +describe 'gnocchi::db::postgresql' do + + let :req_params do + { :password => 'pw' } + end + + let :pre_condition do + 'include postgresql::server' + end + + context 'on a RedHat osfamily' do + let :facts do + { + :osfamily => 'RedHat', + :operatingsystemrelease => '7.0', + :concat_basedir => '/var/lib/puppet/concat' + } + end + + context 'with only required parameters' do + let :params do + req_params + end + + it { should contain_postgresql__server__db('gnocchi').with( + :user => 'gnocchi', + :password => 'md590440288cb225f56d585b88ad270cd37' + )} + end + + end + + context 'on a Debian osfamily' do + let :facts do + { + :operatingsystemrelease => '7.8', + :operatingsystem => 'Debian', + :osfamily => 'Debian', + :concat_basedir => '/var/lib/puppet/concat' + } + end + + context 'with only required parameters' do + let :params do + req_params + end + + it { should contain_postgresql__server__db('gnocchi').with( + :user => 'gnocchi', + :password => 'md590440288cb225f56d585b88ad270cd37' + )} + end + + end + +end diff --git a/gnocchi/spec/classes/gnocchi_init_spec.rb b/gnocchi/spec/classes/gnocchi_init_spec.rb new file mode 100644 index 000000000..7f82ae36f --- /dev/null +++ b/gnocchi/spec/classes/gnocchi_init_spec.rb @@ -0,0 +1,30 @@ +# +# Unit tests for gnocchi::init +# +require 'spec_helper' + +describe 'gnocchi' do + + shared_examples_for 'gnocchi' do + it { + should contain_class('gnocchi::params') + should contain_exec('post-gnocchi_config') + } + end + + context 'on Debian platforms' do + let :facts do + { :osfamily => 'Debian' } + end + + it_configures 'gnocchi' + end + + context 'on RedHat platforms' do + let :facts do + { :osfamily => 'RedHat' } + end + + it_configures 'gnocchi' + end +end diff --git a/gnocchi/spec/classes/gnocchi_keystone_auth_spec.rb b/gnocchi/spec/classes/gnocchi_keystone_auth_spec.rb new file mode 100644 index 000000000..1b5f1246b --- /dev/null +++ b/gnocchi/spec/classes/gnocchi_keystone_auth_spec.rb @@ -0,0 +1,74 @@ +# +# Unit tests for gnocchi::keystone::auth +# +require 'spec_helper' + +describe 'gnocchi::keystone::auth' do + + let :facts do + { :osfamily => 'Debian' } + end + + describe 'with default class parameters' do + let :params do + { :password => 'gnocchi_password', + :tenant => 'foobar' } + end + + it { should contain_keystone_user('gnocchi').with( + :ensure => 'present', + :password => 'gnocchi_password', + :tenant => 'foobar' + ) } + + it { should contain_keystone_user_role('gnocchi@foobar').with( + :ensure => 'present', + :roles => 'admin' + )} + + it { should contain_keystone_service('gnocchi').with( + :ensure => 'present', + :type => 'gnocchi', + :description => 'OpenStack Datapoint Service' + ) } + + it { should contain_keystone_endpoint('RegionOne/gnocchi').with( + :ensure => 'present', + :public_url => "http://127.0.0.1:8041", + :admin_url => "http://127.0.0.1:8041", + :internal_url => "http://127.0.0.1:8041" + ) } + end + + describe 'when overriding public_protocol, public_port and public_address' do + let :params do + { :password => 'gnocchi_password', + :public_protocol => 'https', + :public_port => '80', + :public_address => '10.10.10.10', + :admin_port => '81', + :internal_port => '82', + :internal_address => '10.10.10.11', + :admin_address => '10.10.10.12' } + end + + it { should contain_keystone_endpoint('RegionOne/gnocchi').with( + :ensure => 'present', + :public_url => "https://10.10.10.10:80", + :internal_url => "http://10.10.10.11:82", + :admin_url => "http://10.10.10.12:81" + ) } + end + + describe 'when overriding auth name' do + let :params do + { :password => 'foo', + :auth_name => 'gnocchy' } + end + + it { should contain_keystone_user('gnocchy') } + it { should contain_keystone_user_role('gnocchy@services') } + it { should contain_keystone_service('gnocchy') } + it { should contain_keystone_endpoint('RegionOne/gnocchy') } + end +end diff --git a/gnocchi/spec/classes/gnocchi_storage_file_spec.rb b/gnocchi/spec/classes/gnocchi_storage_file_spec.rb new file mode 100644 index 000000000..0fca0b744 --- /dev/null +++ b/gnocchi/spec/classes/gnocchi_storage_file_spec.rb @@ -0,0 +1,42 @@ +# +# Unit tests for gnocchi::storage::file +# +require 'spec_helper' + +describe 'gnocchi::storage::file' do + + let :params do + {} + end + + shared_examples 'gnocchi storage file' do + + context 'with default parameters' do + it 'configures gnocchi-api with default parameters' do + should contain_gnocchi_config('storage/driver').with_value('file') + should contain_gnocchi_config('storage/file_basepath').with_value('/var/lib/gnocchi') + end + end + end + + context 'on Debian platforms' do + let :facts do + { + :osfamily => 'Debian' + } + end + + it_configures 'gnocchi storage file' + end + + context 'on RedHat platforms' do + let :facts do + { + :osfamily => 'RedHat' + } + end + + it_configures 'gnocchi storage file' + end + +end diff --git a/gnocchi/spec/classes/gnocchi_storage_swift_spec.rb b/gnocchi/spec/classes/gnocchi_storage_swift_spec.rb new file mode 100644 index 000000000..ea8fe31ae --- /dev/null +++ b/gnocchi/spec/classes/gnocchi_storage_swift_spec.rb @@ -0,0 +1,45 @@ +# +# Unit tests for gnocchi::storage::swift +# +require 'spec_helper' + +describe 'gnocchi::storage::swift' do + + let :params do + {} + end + + shared_examples 'gnocchi storage swift' do + + context 'with default parameters' do + it 'configures gnocchi-api with default parameters' do + should contain_gnocchi_config('storage/driver').with_value('swift') + should contain_gnocchi_config('storage/swift_user').with_value('admin:admin') + should contain_gnocchi_config('storage/swift_key').with_value('admin') + should contain_gnocchi_config('storage/swift_authurl').with_value('http://localhost:8080/auth/v1.0') + should contain_gnocchi_config('storage/swift_auth_version').with_value('1') + end + end + end + + context 'on Debian platforms' do + let :facts do + { + :osfamily => 'Debian' + } + end + + it_configures 'gnocchi storage swift' + end + + context 'on RedHat platforms' do + let :facts do + { + :osfamily => 'RedHat' + } + end + + it_configures 'gnocchi storage swift' + end + +end diff --git a/gnocchi/spec/shared_examples.rb b/gnocchi/spec/shared_examples.rb new file mode 100644 index 000000000..f458f0fd2 --- /dev/null +++ b/gnocchi/spec/shared_examples.rb @@ -0,0 +1,56 @@ +shared_examples_for "a Puppet::Error" do |description| + it "with message matching #{description.inspect}" do + expect { should have_class_count(1) }.to raise_error(Puppet::Error, description) + end +end + +shared_examples 'generic Gnocchi service' do |service| + + context 'with default parameters' do + it 'installs package and service' do + should contain_package(service[:name]).with({ + :name => service[:package_name], + :ensure => 'present', + :notify => "Service[#{service[:name]}]" + }) + should contain_service(service[:name]).with({ + :name => service[:service_name], + :ensure => 'stopped', + :hasstatus => true, + :enable => false + }) + end + end + + context 'with overridden parameters' do + let :params do + { :enabled => true, + :ensure_package => '2014.1-1' } + end + + it 'installs package and service' do + should contain_package(service[:name]).with({ + :name => service[:package_name], + :ensure => '2014.1-1', + :notify => "Service[#{service[:name]}]" + }) + should contain_service(service[:name]).with({ + :name => service[:service_name], + :ensure => 'running', + :hasstatus => true, + :enable => true + }) + end + end + + context 'while not managing service state' do + let :params do + { :enabled => false, + :manage_service => false } + end + + it 'does not control service state' do + should contain_service(service[:name]).without_ensure + end + end +end diff --git a/gnocchi/spec/spec_helper.rb b/gnocchi/spec/spec_helper.rb new file mode 100644 index 000000000..53d4dd02d --- /dev/null +++ b/gnocchi/spec/spec_helper.rb @@ -0,0 +1,7 @@ +require 'puppetlabs_spec_helper/module_spec_helper' +require 'shared_examples' + +RSpec.configure do |c| + c.alias_it_should_behave_like_to :it_configures, 'configures' + c.alias_it_should_behave_like_to :it_raises, 'raises' +end diff --git a/gnocchi/spec/unit/provider/gnocchi_config/ini_setting_spec.rb b/gnocchi/spec/unit/provider/gnocchi_config/ini_setting_spec.rb new file mode 100644 index 000000000..fa617c6bc --- /dev/null +++ b/gnocchi/spec/unit/provider/gnocchi_config/ini_setting_spec.rb @@ -0,0 +1,37 @@ +# these tests are a little concerning b/c they are hacking around the +# modulepath, so these tests will not catch issues that may eventually arise +# related to loading these plugins. +# I could not, for the life of me, figure out how to programatcally set the modulepath +$LOAD_PATH.push( + File.join( + File.dirname(__FILE__), + '..', + '..', + '..', + 'fixtures', + 'modules', + 'inifile', + 'lib') +) +require 'spec_helper' +provider_class = Puppet::Type.type(:gnocchi_config).provider(:ini_setting) +describe provider_class do + + it 'should default to the default setting when no other one is specified' do + resource = Puppet::Type::Gnocchi_config.new( + {:name => 'DEFAULT/foo', :value => 'bar'} + ) + provider = provider_class.new(resource) + provider.section.should == 'DEFAULT' + provider.setting.should == 'foo' + end + + it 'should allow setting to be set explicitly' do + resource = Puppet::Type::Gnocchi_config.new( + {:name => 'dude/foo', :value => 'bar'} + ) + provider = provider_class.new(resource) + provider.section.should == 'dude' + provider.setting.should == 'foo' + end +end diff --git a/gnocchi/spec/unit/provider/gnocchi_spec.rb b/gnocchi/spec/unit/provider/gnocchi_spec.rb new file mode 100644 index 000000000..7c6e0d02e --- /dev/null +++ b/gnocchi/spec/unit/provider/gnocchi_spec.rb @@ -0,0 +1,14 @@ +require 'puppet' +require 'spec_helper' +require 'puppet/provider/gnocchi' + + +klass = Puppet::Provider::Gnocchi + +describe Puppet::Provider::Gnocchi do + + after :each do + klass.reset + end + +end diff --git a/gnocchi/spec/unit/type/gnocchi_config_spec.rb b/gnocchi/spec/unit/type/gnocchi_config_spec.rb new file mode 100644 index 000000000..24a0caf75 --- /dev/null +++ b/gnocchi/spec/unit/type/gnocchi_config_spec.rb @@ -0,0 +1,52 @@ +require 'puppet' +require 'puppet/type/gnocchi_config' +describe 'Puppet::Type.type(:gnocchi_config)' do + before :each do + @gnocchi_config = Puppet::Type.type(:gnocchi_config).new(:name => 'DEFAULT/foo', :value => 'bar') + end + + it 'should require a name' do + expect { + Puppet::Type.type(:gnocchi_config).new({}) + }.to raise_error(Puppet::Error, 'Title or name must be provided') + end + + it 'should not expect a name with whitespace' do + expect { + Puppet::Type.type(:gnocchi_config).new(:name => 'f oo') + }.to raise_error(Puppet::Error, /Parameter name failed/) + end + + it 'should fail when there is no section' do + expect { + Puppet::Type.type(:gnocchi_config).new(:name => 'foo') + }.to raise_error(Puppet::Error, /Parameter name failed/) + end + + it 'should not require a value when ensure is absent' do + Puppet::Type.type(:gnocchi_config).new(:name => 'DEFAULT/foo', :ensure => :absent) + end + + it 'should accept a valid value' do + @gnocchi_config[:value] = 'bar' + @gnocchi_config[:value].should == 'bar' + end + + it 'should not accept a value with whitespace' do + @gnocchi_config[:value] = 'b ar' + @gnocchi_config[:value].should == 'b ar' + end + + it 'should accept valid ensure values' do + @gnocchi_config[:ensure] = :present + @gnocchi_config[:ensure].should == :present + @gnocchi_config[:ensure] = :absent + @gnocchi_config[:ensure].should == :absent + end + + it 'should not accept invalid ensure values' do + expect { + @gnocchi_config[:ensure] = :latest + }.to raise_error(Puppet::Error, /Invalid value/) + end +end diff --git a/openstack_extras/.fixtures.yml b/openstack_extras/.fixtures.yml new file mode 100644 index 000000000..22f2f08fd --- /dev/null +++ b/openstack_extras/.fixtures.yml @@ -0,0 +1,7 @@ +fixtures: + repositories: + 'corosync': 'https://github.com/puppetlabs/puppetlabs-corosync' + 'apt' : 'git://github.com/puppetlabs/puppetlabs-apt' + 'stdlib' : 'git://github.com/puppetlabs/puppetlabs-stdlib' + symlinks: + 'openstack_extras': "#{source_dir}" diff --git a/openstack_extras/.gitignore b/openstack_extras/.gitignore new file mode 100644 index 000000000..8dee88223 --- /dev/null +++ b/openstack_extras/.gitignore @@ -0,0 +1,5 @@ +spec/fixtures/ +pkg +Gemfile.lock +*.swp +.idea diff --git a/openstack_extras/.gitreview b/openstack_extras/.gitreview new file mode 100644 index 000000000..441ad434f --- /dev/null +++ b/openstack_extras/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=stackforge/puppet-openstack_extras.git diff --git a/openstack_extras/Gemfile b/openstack_extras/Gemfile new file mode 100644 index 000000000..d965fa900 --- /dev/null +++ b/openstack_extras/Gemfile @@ -0,0 +1,18 @@ +source 'https://rubygems.org' + +group :development, :test do + gem 'puppetlabs_spec_helper', :require => false + gem 'puppet-lint', '~> 0.3.2' + gem 'rake', '10.1.1' + gem 'rspec', '< 2.99' + gem 'json' + gem 'webmock' +end + +if puppetversion = ENV['PUPPET_GEM_VERSION'] + gem 'puppet', puppetversion, :require => false +else + gem 'puppet', :require => false +end + +# vim:ft=ruby diff --git a/openstack_extras/LICENSE b/openstack_extras/LICENSE new file mode 100644 index 000000000..96f12d349 --- /dev/null +++ b/openstack_extras/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 OpenStack Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/openstack_extras/README.md b/openstack_extras/README.md new file mode 100644 index 000000000..c18905067 --- /dev/null +++ b/openstack_extras/README.md @@ -0,0 +1,148 @@ +openstack_extras +============ + +5.0.0 - 2014.2.0 - Juno + +#### Table of Contents + +1. [Overview - What is the openstack_extras module?](#overview) +2. [Module Description - What does the module do?](#module-description) +3. [Setup - The basics of getting started with openstack_extras](#setup) +4. [Implementation - An under-the-hood peek at what the module is doing](#implementation) +5. [Limitations - OS compatibility, etc.](#limitations) +6. [Development - Guide for contributing to the module](#development) +7. [Contributors - Those with commits](#contributors) +8. [Release Notes - Notes on the most recent updates to the module](#release-notes) + +Overview +-------- + +The openstack_extras module is a part of [Stackforge](https://github.com/stackforge), +an effort by the Openstack infrastructure team to provide continuous integration +testing and code review for Openstack and Openstack community projects not part +of the core software. The module itself is used to add useful utilities for +composing and deploying OpenStack with the Stackforge Openstack modules. + +Module Description +------------------ + +The openstack_extras module is intended to provide useful utilities to help +with OpenStack deployments, including composition classes, HA utilities, +monitoring functionality, and so on. + +This module combines other modules in order to build and leverage an entire +OpenStack software stack. This module replaces functionality from the +deprecated [stackforge/puppet-openstack module](https://github.com/stackforge/puppet-openstack). + +Setup +----- + +### Installing openstack_extras + + example% puppet module install puppetlabs/openstack_extras + +### Beginning with openstack_extras + +Instructions for beginning with openstack_extras will be added later. + +Implementation +-------------- + +### openstack_extras + +openstack_extras is a combination of Puppet manifest and ruby code to delivery +configuration and extra functionality through types and providers. + +**HA configuration for Openstack services** + +This module allows to configure Openstack services in HA. Please refer to the [ha-guide](http://docs.openstack.org/high-availability-guide/content/index.html) for details. +If you have a Corosync with Pacemaker cluster with several nodes joined, you may want to use an HA service provider which allows you to create the pacemaker resources for Openstack services and run them in HA mode. +The example HA service configuration for keystone service: + +```puppet +openstack_extras::pacemaker::service { 'openstack-keystone' : + ensure => present, + metadata => {}, + ms_metadata => {}, + operations => {}, + parameters => {}, + primitive_class => 'systemd', + primitive_provider => false, + primitive_type => 'openstack-keystone', + use_handler => false, + clone => true, + require => Package['openstack-keystone'] +} +``` +This example will create a pacemaker clone resource named `p_openstack-keystone-clone` and will start it with the help of systemd. + +And this example will create a resource `p_cinder-api-clone` for Cinder API service with the given OCF script template from some `cluster` module: + +```puppet + $metadata = { + 'resource-stickiness' => '1' + } + $operations = { + 'monitor' => { + 'interval' => '20', + 'timeout' => '30', + }, + 'start' => { + 'timeout' => '60', + }, + 'stop' => { + 'timeout' => '60', + }, + } + $ms_metadata = { + 'interleave' => true, + } + + openstack_extras::pacemaker::service { 'cinder-api' : + primitive_type => 'cinder-api', + metadata => $metadata, + ms_metadata => $ms_metadata, + operations => $operations, + clone => true, + ocf_script_template => 'cluster/cinder_api.ocf.erb', + } + +``` + +Limitations +----------- + +* Limitations will be added as they are discovered. + +Development +----------- + +Developer documentation for the entire puppet-openstack project. + +* https://wiki.openstack.org/wiki/Puppet-openstack#Developer_documentation + +Contributors +------------ + +* https://github.com/stackforge/puppet-openstack_extras/graphs/contributors + +Versioning +---------- + +This module has been given version 5 to track the puppet-openstack modules. The +versioning for the puppet-openstack modules are as follows: + +``` +Puppet Module :: OpenStack Version :: OpenStack Codename +2.0.0 -> 2013.1.0 -> Grizzly +3.0.0 -> 2013.2.0 -> Havana +4.0.0 -> 2014.1.0 -> Icehouse +5.0.0 -> 2014.2.0 -> Juno +``` + +Release Notes +------------- + +**5.0.0** + +* This is the initial release of this module. diff --git a/openstack_extras/Rakefile b/openstack_extras/Rakefile new file mode 100644 index 000000000..0097908ec --- /dev/null +++ b/openstack_extras/Rakefile @@ -0,0 +1,9 @@ +require 'rubygems' +require 'puppetlabs_spec_helper/rake_tasks' +require 'puppet-lint/tasks/puppet-lint' + +PuppetLint.configuration.fail_on_warnings = true +PuppetLint.configuration.send('disable_80chars') +PuppetLint.configuration.send('disable_double_quoted_strings') +PuppetLint.configuration.send('disable_class_inherits_from_params_class') +PuppetLint.configuration.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp", "examples/**/*.pp", "vendor/**/*.pp"] diff --git a/openstack_extras/examples/redhat_repo.yaml b/openstack_extras/examples/redhat_repo.yaml new file mode 100644 index 000000000..d542b99dd --- /dev/null +++ b/openstack_extras/examples/redhat_repo.yaml @@ -0,0 +1,33 @@ +# This is an example of how to define your own repos in hiera +# in addition to RDO when using the redhat repo class +# assuming you have included the class somewhere +# +# This is taken from the aptira hiera data files in +# puppet_openstack_builder and may go out of date. +# +# Set up repositories using openstack_extras +openstack_extras::repo::redhat::redhat::repo_hash: + 'CentOS-Base': + 'descr': 'CentOS-$releasever - Base' + 'baseurl': "%{hiera('yum_base_mirror')}/$releasever/os/$basearch/" + 'CentOS-Updates': + 'descr': 'CentOS-$releasever - Updates' + 'baseurl': "%{hiera('yum_base_mirror')}/$releasever/updates/$basearch/" + 'CentOS-Extras': + 'descr': 'CentOS-$releasever - Extras' + 'baseurl': "%{hiera('yum_base_mirror')}/$releasever/extras/$basearch/" + 'epel': + 'descr': 'Extra Packages for Enterprise Linux 6 - $basearch' + 'baseurl': "%{hiera('yum_epel_mirror')}/$releasever/$basearch/" + 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6' + 'failovermethod': 'priority' + +openstack_extras::repo::redhat::redhat::repo_defaults: + 'proxy': "http://%{hiera('proxy_host')}:%{hiera('proxy_port')}" + +openstack_extras::repo::redhat::redhat::gpgkey_hash: + '/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6': + 'source': 'puppet:///modules/openstack_extras/RPM-GPG-KEY-EPEL-6' + +openstack_extras::repo::redhat::redhat::purge_unmanaged: true +openstack_extras::repo::redhat::redhat::package_require: true diff --git a/openstack_extras/files/RPM-GPG-KEY-EPEL-6 b/openstack_extras/files/RPM-GPG-KEY-EPEL-6 new file mode 100644 index 000000000..7a2030489 --- /dev/null +++ b/openstack_extras/files/RPM-GPG-KEY-EPEL-6 @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.5 (GNU/Linux) + +mQINBEvSKUIBEADLGnUj24ZVKW7liFN/JA5CgtzlNnKs7sBg7fVbNWryiE3URbn1 +JXvrdwHtkKyY96/ifZ1Ld3lE2gOF61bGZ2CWwJNee76Sp9Z+isP8RQXbG5jwj/4B +M9HK7phktqFVJ8VbY2jfTjcfxRvGM8YBwXF8hx0CDZURAjvf1xRSQJ7iAo58qcHn +XtxOAvQmAbR9z6Q/h/D+Y/PhoIJp1OV4VNHCbCs9M7HUVBpgC53PDcTUQuwcgeY6 +pQgo9eT1eLNSZVrJ5Bctivl1UcD6P6CIGkkeT2gNhqindRPngUXGXW7Qzoefe+fV +QqJSm7Tq2q9oqVZ46J964waCRItRySpuW5dxZO34WM6wsw2BP2MlACbH4l3luqtp +Xo3Bvfnk+HAFH3HcMuwdaulxv7zYKXCfNoSfgrpEfo2Ex4Im/I3WdtwME/Gbnwdq +3VJzgAxLVFhczDHwNkjmIdPAlNJ9/ixRjip4dgZtW8VcBCrNoL+LhDrIfjvnLdRu +vBHy9P3sCF7FZycaHlMWP6RiLtHnEMGcbZ8QpQHi2dReU1wyr9QgguGU+jqSXYar +1yEcsdRGasppNIZ8+Qawbm/a4doT10TEtPArhSoHlwbvqTDYjtfV92lC/2iwgO6g +YgG9XrO4V8dV39Ffm7oLFfvTbg5mv4Q/E6AWo/gkjmtxkculbyAvjFtYAQARAQAB +tCFFUEVMICg2KSA8ZXBlbEBmZWRvcmFwcm9qZWN0Lm9yZz6JAjYEEwECACAFAkvS +KUICGw8GCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRA7Sd8qBgi4lR/GD/wLGPv9 +qO39eyb9NlrwfKdUEo1tHxKdrhNz+XYrO4yVDTBZRPSuvL2yaoeSIhQOKhNPfEgT +9mdsbsgcfmoHxmGVcn+lbheWsSvcgrXuz0gLt8TGGKGGROAoLXpuUsb1HNtKEOwP +Q4z1uQ2nOz5hLRyDOV0I2LwYV8BjGIjBKUMFEUxFTsL7XOZkrAg/WbTH2PW3hrfS +WtcRA7EYonI3B80d39ffws7SmyKbS5PmZjqOPuTvV2F0tMhKIhncBwoojWZPExft +HpKhzKVh8fdDO/3P1y1Fk3Cin8UbCO9MWMFNR27fVzCANlEPljsHA+3Ez4F7uboF +p0OOEov4Yyi4BEbgqZnthTG4ub9nyiupIZ3ckPHr3nVcDUGcL6lQD/nkmNVIeLYP +x1uHPOSlWfuojAYgzRH6LL7Idg4FHHBA0to7FW8dQXFIOyNiJFAOT2j8P5+tVdq8 +wB0PDSH8yRpn4HdJ9RYquau4OkjluxOWf0uRaS//SUcCZh+1/KBEOmcvBHYRZA5J +l/nakCgxGb2paQOzqqpOcHKvlyLuzO5uybMXaipLExTGJXBlXrbbASfXa/yGYSAG +iVrGz9CE6676dMlm8F+s3XXE13QZrXmjloc6jwOljnfAkjTGXjiB7OULESed96MR +XtfLk0W5Ab9pd7tKDR6QHI7rgHXfCopRnZ2VVQ== +=V/6I +-----END PGP PUBLIC KEY BLOCK----- diff --git a/openstack_extras/files/RPM-GPG-KEY-EPEL-7 b/openstack_extras/files/RPM-GPG-KEY-EPEL-7 new file mode 100644 index 000000000..f205ede46 --- /dev/null +++ b/openstack_extras/files/RPM-GPG-KEY-EPEL-7 @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.11 (GNU/Linux) + +mQINBFKuaIQBEAC1UphXwMqCAarPUH/ZsOFslabeTVO2pDk5YnO96f+rgZB7xArB +OSeQk7B90iqSJ85/c72OAn4OXYvT63gfCeXpJs5M7emXkPsNQWWSju99lW+AqSNm +jYWhmRlLRGl0OO7gIwj776dIXvcMNFlzSPj00N2xAqjMbjlnV2n2abAE5gq6VpqP +vFXVyfrVa/ualogDVmf6h2t4Rdpifq8qTHsHFU3xpCz+T6/dGWKGQ42ZQfTaLnDM +jToAsmY0AyevkIbX6iZVtzGvanYpPcWW4X0RDPcpqfFNZk643xI4lsZ+Y2Er9Yu5 +S/8x0ly+tmmIokaE0wwbdUu740YTZjCesroYWiRg5zuQ2xfKxJoV5E+Eh+tYwGDJ +n6HfWhRgnudRRwvuJ45ztYVtKulKw8QQpd2STWrcQQDJaRWmnMooX/PATTjCBExB +9dkz38Druvk7IkHMtsIqlkAOQMdsX1d3Tov6BE2XDjIG0zFxLduJGbVwc/6rIc95 +T055j36Ez0HrjxdpTGOOHxRqMK5m9flFbaxxtDnS7w77WqzW7HjFrD0VeTx2vnjj +GqchHEQpfDpFOzb8LTFhgYidyRNUflQY35WLOzLNV+pV3eQ3Jg11UFwelSNLqfQf +uFRGc+zcwkNjHh5yPvm9odR1BIfqJ6sKGPGbtPNXo7ERMRypWyRz0zi0twARAQAB +tChGZWRvcmEgRVBFTCAoNykgPGVwZWxAZmVkb3JhcHJvamVjdC5vcmc+iQI4BBMB +AgAiBQJSrmiEAhsPBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRBqL66iNSxk +5cfGD/4spqpsTjtDM7qpytKLHKruZtvuWiqt5RfvT9ww9GUUFMZ4ZZGX4nUXg49q +ixDLayWR8ddG/s5kyOi3C0uX/6inzaYyRg+Bh70brqKUK14F1BrrPi29eaKfG+Gu +MFtXdBG2a7OtPmw3yuKmq9Epv6B0mP6E5KSdvSRSqJWtGcA6wRS/wDzXJENHp5re +9Ism3CYydpy0GLRA5wo4fPB5uLdUhLEUDvh2KK//fMjja3o0L+SNz8N0aDZyn5Ax +CU9RB3EHcTecFgoy5umRj99BZrebR1NO+4gBrivIfdvD4fJNfNBHXwhSH9ACGCNv +HnXVjHQF9iHWApKkRIeh8Fr2n5dtfJEF7SEX8GbX7FbsWo29kXMrVgNqHNyDnfAB +VoPubgQdtJZJkVZAkaHrMu8AytwT62Q4eNqmJI1aWbZQNI5jWYqc6RKuCK6/F99q +thFT9gJO17+yRuL6Uv2/vgzVR1RGdwVLKwlUjGPAjYflpCQwWMAASxiv9uPyYPHc +ErSrbRG0wjIfAR3vus1OSOx3xZHZpXFfmQTsDP7zVROLzV98R3JwFAxJ4/xqeON4 +vCPFU6OsT3lWQ8w7il5ohY95wmujfr6lk89kEzJdOTzcn7DBbUru33CQMGKZ3Evt +RjsC7FDbL017qxS+ZVA/HGkyfiu4cpgV8VUnbql5eAZ+1Ll6Dw== +=hdPa +-----END PGP PUBLIC KEY BLOCK----- diff --git a/openstack_extras/files/RPM-GPG-KEY-RDO-Havana b/openstack_extras/files/RPM-GPG-KEY-RDO-Havana new file mode 100644 index 000000000..59a2a5af6 --- /dev/null +++ b/openstack_extras/files/RPM-GPG-KEY-RDO-Havana @@ -0,0 +1,52 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.11 (GNU/Linux) + +mQINBFHvxL0BEADUX2nizL2nXQDR+c4msIcBdvXx67Q9MUXgPzaTWIB9EPxrnJVb +hrZva6JKKrt9djG3k7qeUdy7qwMT6OwZ5LswmcVKEQ91+sgO9GazUSmdZIb+e9ag +vmEnkPgeUCI7UlmNqpoPjfvn5msgcJGFGyLHoNGONs88Jo8TWkc145d+P2UJC6Kx +hNAHNIntE40eebA/mHW8NWySMQy9UPLYqw1TEawv5PTDGViaM08gEhvH1lEMOpD9 +nIYTeYw9JCXSPqG7NcpvF3q2gzew3sw1dYuXkowOybSSTJCAPGhuaRMcBzTOFhLD +1NRzeBXOHYKg7lxVSDtdH0wljNleR4IzdH6R+vR5XEddmqqIAZJ/8I8T9fxq18De +hVLvSuRh+UcVehjjHucmLNskTzDE+8oC7WdI2SoTQaPfa0xVcYvM+zWZ4OSVqoiW +i4/fIwIArFiuUqu7E9trackxdtzEIjdJnWzjdlNSZ9S8wqyt0ncjgyTnCzcoFPvq +HTnRmIR3ldxxlTKEXdTw/v3TobN6Giu+Iqu4vFpyP8j/z/YJQJoDCnLX+6Dsj4Ko +JDHBfaCLVnHRUVdnowtd37qN13x+w5Bj/u7td9SRvsHyVYmM7WxlDFjTYvo685IZ +hhu5qyrvMReXciQfGNkwEGpRzniY3PKJFJy0jCjiAiT3pjhUC2XWkjHGaQARAQAB +tCVyZG8taGF2YW5hLXNpZ24gPHJkby1pbmZvQHJlZGhhdC5jb20+iQI4BBMBAgAi +BQJR78S9AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRD6Fnt8K8fIAV5L +D/9T1ZWgP38an1pF2rzklTu09ET5e5B55/Jm2bBt1jLp55iF2R/N/G3EEZQM2UzQ +9u3NqY9JdEtdcGUuBP46+DhO6y5HpwvFug/s+ZL4QnlU//tLa8aJXCycyct3tE82 +3tGv37ToAQYucrJkWKNyxOK6SPj/wCKv0sySJsCstRB4ygQWEB8Y7U48kfybAriS +4lwfAceoDLTui2JNfolKeiYfY1iZn4m7q5a2CC8ZhpuWW9W4myUEA7pwKnOJRNr1 +JxF4eDo4SBlwG1eHH/Eg99QodzG49OerOZ4cLozTp9gz9kD70Ki7OXDS4vCsk4Xm +O0z243PK6WKVgmuWs9BYjyvojlvdHrr9UB4xBu9tVSdwJdnYKHunyN4F54IIby6b ++x2J5yTb36gSuQXywkLPuzQJ3qVtlyk1BMl9y8ZSAmiNtfUx2LzIPv558yogsky3 +0onTSFYBlvieo51qbDNmyCsqoClZ9EmGVDjzZpRyJvG/kzISGHStlwrC5ZlDzt/9 +dGfGh4AEOP0ISoCJHcUBfcoPQ0lZmIHUg50ZID1dcrttFdvKWP3mQ5PhjbJ2jm7u +THUSAXYNZRAS4p5NL55+7nVtT0Yu7+rWkgkIOEih5O4VCH56QOV4dfgJndXMzHRt +VTioriA/wrMLiyCw6RLP3iXfzyXch5mjAIeG+7YD8WnFvrkCDQRR78S9ARAAotGX +fwwB+o8bkPK2T2QfIsCg81oyfn5ka1VCeRX05ggQ+KscDWbYDSe/CndRSGoYnSAQ +1GvfDNRcoAB7ZLFU/plQxQEj2dmOAJ6fVvebi8ZP7wU4zOBEm5ijEGTsUGqqdye1 +F0hn/aFomvbMVCrnpThSTTY31c1BRlP7vJFn+21MuohI+/WaDmXq+eM55UlgqksM +3iJrexQRyzGj7cwt3kpFITYOsog6r5AgURsKfyRUM0mk3I2bjvpEu94HBk35RJoS +SacesgmWIscFF52I4PyaXZIo0tz3M8O1lk8y1J7Nl16LfvjEk9nxf9isxc8XgJ+f +C7O1zTxV4nmZqbxphbOfNACfmdlcp9BXg0znvVTgarU5QEdIPb+yhF4ilZItqRIo +feXeb4JuAfZhZNSVeAmqMMydDGkJ2IGL3ahtP4baBTEgdB5xPhm9HQnqLEdoXZZi +1HvRpT8eBsbR6EfhITNosVHy6zqbe3BcGLaDMu3PJAhziIGwtFAXuFhCGnXTtJHm +4A/2VMhgM4Zpf6kVijQ6APJn7X4iq6qeVWTOh/h2HsXanvbv0b0zfKBRCV4tgTnw +S0CO1tO3LVWJSbE+qYc1ZDEqN+0pPc0dIeBAhoBP54KPyc3s/BJrs4YBKkR/cROn +y8S+utUnW4h8cZbWBpfCTDjVBZTQYFyAoHeFJkcAEQEAAYkCHwQYAQIACQUCUe/E +vQIbDAAKCRD6Fnt8K8fIAVlRD/9lgKWuoU1iUdKBg25fM7HTGUhiUzddT/0rFnjp +jOIjeCguc8yX3tekgO+hY6+xM/OOc2BfGSmVXg88u9+aG97KInP2nAPCnxYSWMaQ +Wo1I9066K7nRfZ7PNYB3/lhDkPy0E2ha79SUnWUjlGnswzsNtSt8GxRETAEAv5jo +m9Jbep62jxl3M+f8Z817452dwaUoNNSrfTUKP5FMO95gkHS7sWG2t7X+K5c9/vX5 +GTv3SLQMbHivrRm0yCzFfQpQfAkYAZNahiLp/89RKwyySiQeDzeeqy581U7uLxwA +Uu/QXZH0k3RIZGI/JdOQ3Yk09wzh5SQeOcUs51Jk/O34wu+LrKwFvSgkP1Ld7hqB +j4A8LKn/tJDESOomPlljb8D/sfFb7K7g+sO8GY5Z8RiJKxQT8NXpw+st7QIa2XGV +5i2uhhbPVFaPly8bwtLstoaF84hokOSv4/cMfRbsUIQJMaxtcMwqf9H+eOas0uKh +D3gDZODEve5hYEabTFbVUrJ8N61qyVm3s1kbYBS4q1pM8pPzOnSqKsGnahHDpwhD +vXXbLsOsskZR629yTT/ZbPXMExPUnczhuGzEkCj2tDYF6n63nHSZmClPToXKxkPP +KIaHfraZoe+pO1XjRRSrT+Ax68FlnxoJqLBcuIWpzylnLpXldYqtVXFgXBy9bQ1A +WRhdyg== +=3A9V +-----END PGP PUBLIC KEY BLOCK----- diff --git a/openstack_extras/files/RPM-GPG-KEY-RDO-Juno b/openstack_extras/files/RPM-GPG-KEY-RDO-Juno new file mode 100644 index 000000000..17970c668 --- /dev/null +++ b/openstack_extras/files/RPM-GPG-KEY-RDO-Juno @@ -0,0 +1,52 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQINBFQaEA4BEADNjmFy/gNt+j/3Npji2Yae9RJedb1jvuulTC3udfyRsqiZP47c +jRFQ6GmqTGqRdn2tLAaWZiwS6hcCtRjqa1K275DnL1jWEd+YFNrggQsd5atbuikm ++TYz/sMVYrSicawCzQnc2lYOJyF45tpfNy5u1Hwfne6Mt66BHUAFoCnSgzQ5b4Wo +mvr9znJSE+vySHVOQxPcaZIpN4jRYi4thUlAn09LM1i9i/1APD6wuOz9xSe9ZkTz +SZVPbPcztOc0JJ53CAoPjhjZJaWJ6Jvlte+blcvlZ4go2Pq1ws8ouBTo1hC3CeVP +SDK+2c234KOZcOL3eh5VAT0ztACI1kbhThZHY6jGKJJjRcUZO+/DGHPwhdO4nANc +nhihown7/r1yluMamf+Yk7IeVuJUNU8ObPXn9fYZ2jJeAFLHxwDt6gOItiwbED7r +JAX9gOE0eJsMsJfa1FE9/b08gvaIC20+b5jL2ElAmdp8Kxwg7iGGgn6XljxrAhH7 +3AsjmerQQbrcCCOFen9L0t9mfkuI4E55yx+2y1gN8Mis02+oNp2UmyZGZH+T/GTA +MGljXmE5BuBwbqgP6guIubMyUklDnoOZbTuYE2RK2YGPIPMmy+RL/rZvCJYFFh0w +SvQYpMqNmr7DdDppYhnxApWkFfid/93DrXqmeHQYMLP3PqN1mNptLL2acQARAQAB +tCNyZG8tanVuby1zaWduIDxyZG8taW5mb0ByZWRoYXQuY29tPokCOAQTAQIAIgUC +VBoQDgIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ7srF1d9mdOO2chAA +zLahBIt+5Wdg8yBAUCXQOM+DDPu5LHsE7skWMxNexeyRfxv9LDaJ00F7jwt8qf3R +LqVEIwJ9eMieuugRtjTgHUsrRE7/UscnqYHTe34X/E5p+ONvtEN2CzlMqATlxf4h +2PR5pi3SJyMHAOY+MpGvtgnuYpUsvkuBOnpBNdYmLPhNqGK4kl3FVLubJCNFfjpf +ycwziUcMAkB788SUQMwIAuwGvGe0/zDIVYuQjnRcbaQA1PllHlBOJJPiIe39+mx/ +uORO3WNAvNLhOUGQ0uiLl8pHay5T8HpXDpRFcIb3QXkYrkw0W3EXx6MKoCSMzCq7 +U78prykxrYp+sTjFxBCgl7xvY6Z/wxITfnwNLyf+H/04jCkg1wLPbj1TOWX793d9 +fW3z/DWGxr3Xv1VHepJg+vTUP66mYThWOe0WhDH3ofkUswLEnqxQt/VFuVS+ycpg +xnS3/BplleZHFl0k1uGTk/sO7Amz2ZYv+/OdqBdkpaZCbGEeiV36R6M7sliZ/pVD +VN5Ng18MG1TSyvN0HTCLJpcBT8i3KU07zmAsLVjuN134ObA5Gc3H0Jg7P54gPsxd +QRb1h3j005JYFObEwNtlDlo1FqtKOsc+MsTMMww3f5y7CXLYQHsHjPJ0oXgmmdc2 +ann24uMxOkwg5KjOy/1tko+AxGcdNfmDz1ZFXlKkdBK5Ag0EVBoQDgEQAK9ne5tk +yb1LuOy4fmLp54vCWI8LdY7RyeCBn55JvyOL4XE7FMKS0ct1fVdEjl6d0PxU+GYD +iGP/nEPfCa7UFZ9Za5mrIR8ezQRV4rBsZGKy+mNS+xRI2marad7RCiYcx/XD4Drr +LvAAWk9rTC6ffj9SwvDMdoQf/E65k6AP4YQnCRs0yscsV3fSDVq2pz9gRZJkHz9e +1WvVWvnYZI+FYRw+aNZCJkD5I9pY4pYmXRn34XM72V7DJoLr8YZQ+Qhn+ToqqKcI +dCON1n0kl7qucrosv51LuNEz9lMBJTkZqJZRALsz8qhxZxBxGPbhB7foRnbRWNoC +udth7kVrvIHPABWz/r8oTD+YAQawvcE+Giy8q1k+MiaQzba8lbVSPY2Q8F/0Pjr3 +V9xz8n1FS8omPUUdlGb9+uF9TIvqo0UKTy6CbCikkFq6URR2GBvOFp2IPevBU/SV +qV2EvopNv6wov6DkDMvKD7BdrU/r/gYZ3r2hJDtkgRLL5agNrWZbc+MmNNlEUqNY +A30n49hOsez5QY5Il+hoeDMsmHF1XQI/SPACM51EsQAH5zApwL0FeTtoyqxMsWus +/oH+hhEBptViAy2ZXx42BR2f98vS2WuCRurTd1Ro/UOJ9TlZlxXzrjiIozvfogqr +PGFzwr36Ra+rlD+IJ/5AkY1G687tAgHWCaAPABEBAAGJAh8EGAECAAkFAlQaEA4C +GwwACgkQ7srF1d9mdOO0vA/8DJY8evIz3AHIeUQestoy4cxxoN8XgsnlNQmnNkNw +V0ow2YaQVjMpNaWfpx5Ij5IfmKKh1R7UkwMEyuzLPLV6vyqHGKmuQSuhniN6rr3Y +pZ4Aht5u+uMmAAFrQORx6vSmyHN2uLaAXMhLcQ+Ec4f1IJn0uZUPI2aS0isshqBG +INQJCeiTsrJtatlG5kM6tmpRZnhjN6ybaLjPOdfqZV9oInnSrw75JLQvGFkW8pGc +co/7njvZEoGTgYKh610EE1DO3Bv5XVUhnstfu+Zzn4WSH2KbbdonUPYcsdKNjmdo +oyhgVyzR4K2cwxp4r4gEe8O8YiNRJ3b9/41Dfn6pu/DVdjGolr9hokDKPYbrnXw0 +ZpcDlYT0INA/9r1V37nTXZ0CiOLm0ZHkreG5dn67bWYnFj7j3qUTsjpBFZ2Z5mmC +BDProaNYsTQS0Oyb26d4xaYDAXpga/LAsHs+qZBxVP1ExBEvMJ2Piu9Vr4Pn/euv +uw1aZtXZ1R7Sky6ksek4mgpmyIlEY3s8PLZuHX4LNlc8FKXmrXbAyIIlfUv0oeZV +m+0ImNUeTgmA/HPIHHhamaiT4emBLpdf+ddZMaQFITo8kaY1eCIH7xaBdIsDMQrS +1nkiCWreksxfyQS7mD3+sW8BFFFrlbFIgz/EXu+JfCP4jdbKaIsyzQuihWZVgDoK +tqA= +=vBYB +-----END PGP PUBLIC KEY BLOCK----- diff --git a/openstack_extras/lib/puppet/parser/functions/validate_yum_hash.rb b/openstack_extras/lib/puppet/parser/functions/validate_yum_hash.rb new file mode 100644 index 000000000..ea5cc775f --- /dev/null +++ b/openstack_extras/lib/puppet/parser/functions/validate_yum_hash.rb @@ -0,0 +1,59 @@ +module Puppet::Parser::Functions + + yumrepo_arguments = [ + 'name', + 'ensure', + 'baseurl', + 'cost', + 'descr', + 'enabled', + 'enablegroups', + 'exclude', + 'failovermethod', + 'gpgcheck', + 'gpgkey', + 'http_caching', + 'include', + 'includepkgs', + 'keepalive', + 'metadata_expire', + 'metalink', + 'mirrorlist', + 'priority', + 'protect', + 'provider', + 'proxy', + 'proxy_password', + 'proxy_username', + 'repo_gpgcheck', + 's3_enabled', + 'skip_if_unavailable', + 'sslcacert', + 'sslclientcert', + 'sslclientkey', + 'sslverify', + 'target', + 'timeout' + ] + + newfunction(:validate_yum_hash) do |args| + if args.size > 1 + raise Puppet::Error, "validate_yum_hash takes only a single argument, #{args.size} provided" + end + arg = args[0] + + if not arg.kind_of?(Hash) + raise Puppet::Error, "non-hash argument provided to validate_yum_hash" + end + + if arg.size > 0 + arg.each do |title, params| + params.each do |param, value| + if ! yumrepo_arguments.include?(param) + raise Puppet::Error, "Parameter #{param} is not valid for the yumrepo type" + end + end + end + end + end +end diff --git a/openstack_extras/lib/puppet/provider/pacemaker_common.rb b/openstack_extras/lib/puppet/provider/pacemaker_common.rb new file mode 100644 index 000000000..e9f5d877e --- /dev/null +++ b/openstack_extras/lib/puppet/provider/pacemaker_common.rb @@ -0,0 +1,723 @@ +require 'rexml/document' + +class Puppet::Provider::Pacemaker_common < Puppet::Provider + + @raw_cib = nil + @cib = nil + @primitives = nil + @primitives_structure = nil + + RETRY_COUNT = 100 + RETRY_STEP = 6 + + # get a raw CIB from cibadmin + # or from a debug file if raw_cib_file is set + # @return [String] cib xml + def raw_cib + @raw_cib = cibadmin '-Q' + if @raw_cib == '' or not @raw_cib + fail 'Could not dump CIB XML using "cibadmin -Q" command!' + end + @raw_cib + end + + # create a new REXML CIB document + # @return [REXML::Document] at '/' + def cib + return @cib if @cib + @cib = REXML::Document.new(raw_cib) + end + + # reset all saved variables to obtain new data + def cib_reset + # Puppet.debug 'Reset CIB memoization' + @raw_cib = nil + @cib = nil + @primitives = nil + @primitives_structure = nil + @nodes_structure = nil + end + + # get status CIB section + # @return [REXML::Element] at /cib/status + def cib_section_status + REXML::XPath.match cib, '/cib/status' + end + + # get lrm_rsc_ops section from lrm_resource section CIB section + # @param lrm_resource [REXML::Element] + # at /cib/status/node_state/lrm[@id="node-name"]/lrm_resources/lrm_resource[@id="resource-name"]/lrm_rsc_op + # @return [REXML::Element] + def cib_section_lrm_rsc_ops(lrm_resource) + REXML::XPath.match lrm_resource, 'lrm_rsc_op' + end + + # get node_state CIB section + # @return [REXML::Element] at /cib/status/node_state + def cib_section_nodes_state + REXML::XPath.match cib_section_status, 'node_state' + end + + # get primitives CIB section + # @return [REXML::Element] at /cib/configuration/resources/primitive + def cib_section_primitives + REXML::XPath.match cib, '//primitive' + end + + # get lrm_rsc_ops section from lrm_resource section CIB section + # @param lrm [REXML::Element] + # at /cib/status/node_state/lrm[@id="node-name"]/lrm_resources/lrm_resource + # @return [REXML::Element] + def cib_section_lrm_resources(lrm) + REXML::XPath.match lrm, 'lrm_resources/lrm_resource' + end + + # determine the status of a single operation + # @param op [Hash String>] + # @return ['start','stop','master',nil] + def operation_status(op) + # skip incomplete ops + return unless op['op-status'] == '0' + + if op['operation'] == 'monitor' + # for monitor operation status is determined by its rc-code + # 0 - start, 8 - master, 7 - stop, else - error + case op['rc-code'] + when '0' + 'start' + when '7' + 'stop' + when '8' + 'master' + else + # not entirely correct but count failed monitor as 'stop' + 'stop' + end + elsif %w(start stop promote).include? op['operation'] + # for start/stop/promote status is set if op was successful + # use master instead of promote + return unless %w(0 7 8).include? op['rc-code'] + if op['operation'] == 'promote' + 'master' + else + op['operation'] + end + else + # other operations are irrelevant + nil + end + end + + # determine resource status by parsing last operations + # @param ops [Array] + # @return ['start','stop','master',nil] + # nil means that status is unknown + def determine_primitive_status(ops) + status = nil + ops.each do |op| + op_status = operation_status op + status = op_status if op_status + end + status + end + + # check if operations have same failed operations + # that should be cleaned up later + # @param ops [Array] + # @return [TrueClass,FalseClass] + def failed_operations_found?(ops) + ops.each do |op| + # skip incompleate ops + next unless op['op-status'] == '0' + # skip useless ops + next unless %w(start stop monitor promote).include? op['operation'] + + # are there failed start, stop + if %w(start stop promote).include? op['operation'] + return true if op['rc-code'] != '0' + end + + # are there failed monitors + if op['operation'] == 'monitor' + return true unless %w(0 7 8).include? op['rc-code'] + end + end + false + end + + # convert elements's attributes to hash + # @param element [REXML::Element] + # @return [Hash String>] + def attributes_to_hash(element) + hash = {} + element.attributes.each do |a, v| + hash.store a.to_s, v.to_s + end + hash + end + + # convert element's children to hash + # of their attributes using key and hash key + # @param element [REXML::Element] + # @param key + # @return [Hash String>] + def elements_to_hash(element, key, tag = nil) + elements = {} + children = element.get_elements tag + return elements unless children + children.each do |child| + child_structure = attributes_to_hash child + name = child_structure[key] + next unless name + elements.store name, child_structure + end + elements + end + + # decode lrm_resources section of CIB + # @param lrm_resources [REXML::Element] + # @return [Hash Hash>] + def decode_lrm_resources(lrm_resources) + resources = {} + lrm_resources.each do |lrm_resource| + resource = attributes_to_hash lrm_resource + id = resource['id'] + next unless id + lrm_rsc_ops = cib_section_lrm_rsc_ops lrm_resource + ops = decode_lrm_rsc_ops lrm_rsc_ops + resource.store 'ops', ops + resource.store 'status', determine_primitive_status(ops) + resource.store 'failed', failed_operations_found?(ops) + resources.store id, resource + end + resources + end + + # decode lrm_rsc_ops section of the resource's CIB + # @param lrm_rsc_ops [REXML::Element] + # @return [Array] + def decode_lrm_rsc_ops(lrm_rsc_ops) + ops = [] + lrm_rsc_ops.each do |lrm_rsc_op| + op = attributes_to_hash lrm_rsc_op + next unless op['call-id'] + ops << op + end + ops.sort { |a,b| a['call-id'].to_i <=> b['call-id'].to_i } + end + + # get nodes structure with resources and their statuses + # @return [Hash Hash>] + def nodes + return @nodes_structure if @nodes_structure + @nodes_structure = {} + cib_section_nodes_state.each do |node_state| + node = attributes_to_hash node_state + id = node['id'] + next unless id + lrm = node_state.elements['lrm'] + lrm_resources = cib_section_lrm_resources lrm + resources = decode_lrm_resources lrm_resources + node.store 'primitives', resources + @nodes_structure.store id, node + end + @nodes_structure + end + + # get primitives configuration structure with primitives and their attributes + # @return [Hash Hash>] + def primitives + return @primitives_structure if @primitives_structure + @primitives_structure = {} + cib_section_primitives.each do |primitive| + primitive_structure = {} + id = primitive.attributes['id'] + next unless id + primitive_structure.store 'name', id + primitive.attributes.each do |k, v| + primitive_structure.store k.to_s, v + end + + if primitive.parent.name and primitive.parent.attributes['id'] + parent_structure = { + 'id' => primitive.parent.attributes['id'], + 'type' => primitive.parent.name + } + primitive_structure.store 'name', parent_structure['id'] + primitive_structure.store 'parent', parent_structure + end + + instance_attributes = primitive.elements['instance_attributes'] + if instance_attributes + instance_attributes_structure = elements_to_hash instance_attributes, 'name', 'nvpair' + primitive_structure.store 'instance_attributes', instance_attributes_structure + end + + meta_attributes = primitive.elements['meta_attributes'] + if meta_attributes + meta_attributes_structure = elements_to_hash meta_attributes, 'name', 'nvpair' + primitive_structure.store 'meta_attributes', meta_attributes_structure + end + + operations = primitive.elements['operations'] + if operations + operations_structure = elements_to_hash operations, 'id', 'op' + primitive_structure.store 'operations', operations_structure + end + + @primitives_structure.store id, primitive_structure + end + @primitives_structure + end + + # check if primitive is clone or multistate + # @param primitive [String] primitive id + # @return [TrueClass,FalseClass] + def primitive_is_complex?(primitive) + return unless primitive_exists? primitive + primitives[primitive].key? 'parent' + end + + # check if primitive is clone + # @param primitive [String] primitive id + # @return [TrueClass,FalseClass] + def primitive_is_clone?(primitive) + is_complex = primitive_is_complex? primitive + return is_complex unless is_complex + primitives[primitive]['parent']['type'] == 'clone' + end + + # check if primitive is multistate + # @param primitive [String] primitive id + # @return [TrueClass,FalseClass] + def primitive_is_multistate?(primitive) + is_complex = primitive_is_complex? primitive + return is_complex unless is_complex + primitives[primitive]['parent']['type'] == 'master' + end + + # return primitive class + # @param primitive [String] primitive id + # @return [String] + def primitive_class(primitive) + return unless primitive_exists? primitive + primitives[primitive]['class'] + end + + # disable this primitive + # @param primitive [String] + def disable_primitive(primitive) + retry_command { + pcs 'resource', 'disable', primitive + } + end + alias :stop_primitive :disable_primitive + + # enable this primitive + # @param primitive [String] + def enable_primitive(primitive) + retry_command { + pcs 'resource', 'enable', primitive + } + end + alias :start_primitive :enable_primitive + + # ban this primitive + # @param primitive [String] + def ban_primitive(primitive, node = '') + retry_command { + pcs 'resource', 'ban', primitive, node + } + end + + # move this primitive + # @param primitive [String] + def move_primitive(primitive, node = '') + retry_command { + pcs 'resource', 'move', primitive, node + } + end + + # unban/unmove this primitive + # @param primitive [String] + def unban_primitive(primitive, node = '') + retry_command { + pcs 'resource', 'clear', primitive, node + } + end + alias :clear_primitive :unban_primitive + alias :unmove_primitive :unban_primitive + + # cleanup this primitive + # @param primitive [String] + def cleanup_primitive(primitive, node = '') + opts = ['--cleanup', "--resource=#{primitive}"] + opts << "--node=#{node}" if ! node.empty? + retry_command { + crm_resource opts + } + end + + # manage this primitive + # @param primitive [String] + def manage_primitive(primitive) + retry_command { + pcs 'resource', 'manage', primitive + } + end + + # unamanage this primitive + # @param primitive [String] + def unmanage_primitive(primitive) + retry_command { + pcs 'resource', 'unmanage', primitive + } + end + + # set quorum_policy of the cluster + # @param primitive [String] + def no_quorum_policy(primitive) + retry_command { + pcs 'property', 'set', "no-quorum-policy=#{primitive}" + } + end + + # set maintenance_mode of the cluster + # @param primitive [TrueClass,FalseClass] + def maintenance_mode(primitive) + retry_command { + pcs 'property', 'set', "maintenance-mode=#{primitive}" + } + end + + # add a location constraint + # @param primitive [String] the primitive's name + # @param node [String] the node's name + # @param score [Numeric,String] score value + def constraint_location_add(primitive, node, score = 100) + id = "#{primitive}_on_#{node}" + retry_command { + pcs 'constraint', 'location', 'add', id, primitive, node, score + } + end + + # remove a location constraint + # @param primitive [String] the primitive's name + # @param node [String] the node's name + def constraint_location_remove(primitive, node) + id = "#{primitive}_on_#{node}" + retry_command { + pcs 'constraint', 'location', 'remove', id + } + end + + # get a status of a primitive on the entire cluster + # of on a node if node name param given + # @param primitive [String] + # @param node [String] + # @return [String] + def primitive_status(primitive, node = nil) + if node + found_node = nil + nodes.each do |k, v| + if v.fetch("uname", {}).eql? node + found_node = v + end + end + return unless found_node + found_node. + fetch('primitives',{}). + fetch(primitive, {}). + fetch('status', nil) + else + statuses = [] + nodes.each do |k,v| + status = v.fetch('primitives',{}). + fetch(primitive, {}). + fetch('status', nil) + statuses << status + end + status_values = { + 'stop' => 0, + 'start' => 1, + 'master' => 2, + } + statuses.max_by do |status| + return unless status + status_values[status] + end + end + end + + # generate report of primitive statuses by node + # mostly for debugging + # @return [Hash] + def primitives_status_by_node + report = {} + return unless nodes.is_a? Hash + nodes.each do |node_name, node_data| + primitives_of_node = node_data['primitives'] + next unless primitives_of_node.is_a? Hash + primitives_of_node.each do |primitive, primitive_data| + primitive_status = primitive_data['status'] + report[primitive] = {} unless report[primitive].is_a? Hash + report[primitive][node_name] = primitive_status + end + end + report + end + + # form a cluster status report for debugging + # @return [String] + def get_cluster_debug_report + report = "\n" + primitives_status_by_node.each do |primitive, data| + primitive_name = primitive + primitive_name = primitives[primitive]['name'] if primitives[primitive]['name'] + primitive_type = 'Simple' + primitive_type = 'Cloned' if primitive_is_clone? primitive + primitive_type = 'Multistate' if primitive_is_multistate? primitive + primitive_status = primitive_status primitive + + report += "-> #{primitive_type} primitive '#{primitive_name}' global status: #{primitive_status}" + report += ' (UNMANAGE)' unless primitive_is_managed? primitive + report += "\n" + report += ' ' if data.any? + nodes = [] + data.keys.sort.each do |node_name| + node_status = data.fetch node_name + node_block = "#{node_name}: #{node_status}" + node_block += ' (FAIL)' if primitive_has_failures? primitive, node_name + nodes << node_block + end + report += nodes.join ' | ' + report += "\n" + end + report + end + + # does this primitive have failed operations? + # @param primitive [String] primitive name + # @param node [String] on this node if given + # @return [TrueClass,FalseClass] + def primitive_has_failures?(primitive, node = nil) + return unless primitive_exists? primitive + if node + nodes. + fetch(node, {}). + fetch('primitives',{}). + fetch(primitive, {}). + fetch('failed', nil) + else + nodes.each do |k,v| + failed = v.fetch('primitives',{}). + fetch(primitive, {}). + fetch('failed', nil) + return true if failed + end + false + end + end + + # determine if a primitive is running on the entire cluster + # of on a node if node name param given + # @param primitive [String] primitive id + # @param node [String] on this node if given + # @return [TrueClass,FalseClass] + def primitive_is_running?(primitive, node = nil) + return unless primitive_exists? primitive + status = primitive_status primitive, node + return status unless status + %w(start master).include? status + end + + # check if primitive is running as a master + # either anywhere or on the give node + # @param primitive [String] primitive id + # @param node [String] on this node if given + # @return [TrueClass,FalseClass] + def primitive_has_master_running?(primitive, node = nil) + is_multistate = primitive_is_multistate? primitive + return is_multistate unless is_multistate + status = primitive_status primitive, node + return status unless status + status == 'master' + end + + # return service status value expected by Puppet + # puppet wants :running or :stopped symbol + # @param primitive [String] primitive id + # @param node [String] on this node if given + # @return [:running,:stopped] + def get_primitive_puppet_status(primitive, node = nil) + if primitive_is_running? primitive, node + :running + else + :stopped + end + end + + # return service enabled status value expected by Puppet + # puppet wants :true or :false symbols + # @param primitive [String] + # @return [:true,:false] + def get_primitive_puppet_enable(primitive) + if primitive_is_managed? primitive + :true + else + :false + end + end + + # check if primitive exists in the confiuguration + # @param primitive primitive id or name + def primitive_exists?(primitive) + primitives.key? primitive + end + + # determine if primitive is managed + # @param primitive [String] primitive id + # @return [TrueClass,FalseClass] + # TODO: will not work correctly if cluster is in management mode + def primitive_is_managed?(primitive) + return unless primitive_exists? primitive + is_managed = primitives.fetch(primitive).fetch('meta_attributes', {}).fetch('is-managed', {}).fetch('value', 'true') + is_managed == 'true' + end + + # determine if primitive has target-state started + # @param primitive [String] primitive id + # @return [TrueClass,FalseClass] + # TODO: will not work correctly if target state is set globally to stopped + def primitive_is_started?(primitive) + return unless primitive_exists? primitive + target_role = primitives.fetch(primitive).fetch('meta_attributes', {}).fetch('target-role', {}).fetch('value', 'Started') + target_role == 'Started' + end + + # check if pacemaker is online + # and we can work with it + # @return [TrueClass,FalseClass] + def is_online? + begin + cibadmin '-Q' + true + rescue Puppet::ExecutionFailure + false + else + true + end + end + + # retry the given command until it runs without errors + # or for RETRY_COUNT times with RETRY_STEP sec step + # print cluster status report on fail + # returns normal command output on success + # @return [String] + def retry_command + (0..RETRY_COUNT).each do + begin + out = yield + rescue Puppet::ExecutionFailure => e + Puppet.debug "Command failed: #{e.message}" + sleep RETRY_STEP + else + return out + end + end + Puppet.debug get_cluster_debug_report if is_online? + fail "Execution timeout after #{RETRY_COUNT * RETRY_STEP} seconds!" + end + + # retry the given block until it returns true + # or for RETRY_COUNT times with RETRY_STEP sec step + # print cluster status report on fail + def retry_block_until_true + (0..RETRY_COUNT).each do + return if yield + sleep RETRY_STEP + end + Puppet.debug get_cluster_debug_report if is_online? + fail "Execution timeout after #{RETRY_COUNT * RETRY_STEP} seconds!" + end + + # wait for pacemaker to become online + def wait_for_online + Puppet.debug "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for Pacemaker to become online" + retry_block_until_true do + is_online? + end + Puppet.debug 'Pacemaker is online' + end + + # cleanup a primitive and then wait until + # we can get it's status again because + # cleanup blocks operations sections for a while + # @param primitive [String] primitive name + def cleanup_with_wait(primitive, node = '') + node_msgpart = node.empty? ? '' : " on node '#{node}'" + Puppet.debug "Cleanup primitive '#{primitive}'#{node_msgpart} and wait until cleanup finishes" + cleanup_primitive(primitive, node) + retry_block_until_true do + cib_reset + primitive_status(primitive) != nil + end + Puppet.debug "Primitive '#{primitive}' have been cleaned up#{node_msgpart} and is online again" + end + + # wait for primitive to start + # if node is given then start on this node + # @param primitive [String] primitive id + # @param node [String] on this node if given + def wait_for_start(primitive, node = nil) + message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to start" + message += " on node '#{node}'" if node + Puppet.debug message + retry_block_until_true do + cib_reset + primitive_is_running? primitive, node + end + Puppet.debug get_cluster_debug_report + message = "Service '#{primitive}' have started" + message += " on node '#{node}'" if node + Puppet.debug message + end + + # wait for primitive to start as a master + # if node is given then start as a master on this node + # @param primitive [String] primitive id + # @param node [String] on this node if given + def wait_for_master(primitive, node = nil) + message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to start master" + message += " on node '#{node}'" if node + Puppet.debug message + retry_block_until_true do + cib_reset + primitive_has_master_running? primitive, node + end + Puppet.debug get_cluster_debug_report + message = "Service '#{primitive}' have started master" + message += " on node '#{node}'" if node + Puppet.debug message + end + + # wait for primitive to stop + # if node is given then start on this node + # @param primitive [String] primitive id + # @param node [String] on this node if given + def wait_for_stop(primitive, node = nil) + message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to stop" + message += " on node '#{node}'" if node + Puppet.debug message + retry_block_until_true do + cib_reset + result = primitive_is_running? primitive, node + result.is_a? FalseClass + end + Puppet.debug get_cluster_debug_report + message = "Service '#{primitive}' was stopped" + message += " on node '#{node}'" if node + Puppet.debug message + end + +end diff --git a/openstack_extras/lib/puppet/provider/service/pacemaker.rb b/openstack_extras/lib/puppet/provider/service/pacemaker.rb new file mode 100644 index 000000000..3139867c2 --- /dev/null +++ b/openstack_extras/lib/puppet/provider/service/pacemaker.rb @@ -0,0 +1,203 @@ +require File.join File.dirname(__FILE__), '../pacemaker_common.rb' + +Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Pacemaker_common do + + has_feature :enableable + has_feature :refreshable + + commands :uname => 'uname' + commands :pcs => 'pcs' + commands :crm_resource => 'crm_resource' + commands :cibadmin => 'cibadmin' + + # hostname of the current node + # @return [String] + def hostname + return @hostname if @hostname + @hostname = (uname '-n').chomp.strip + end + + # original name passed from the type + # @return [String] + def title + @resource[:name] + end + + # primitive name with 'p_' added if needed + # @return [String] + def name + return @name if @name + primitive_name = title + if primitive_exists? primitive_name + Puppet.debug "Primitive with title '#{primitive_name}' was found in CIB" + @name = primitive_name + return @name + end + primitive_name = "p_#{primitive_name}" + if primitive_exists? primitive_name + Puppet.debug "Using '#{primitive_name}' name instead of '#{title}'" + @name = primitive_name + return @name + end + fail "Primitive '#{title}' was not found in CIB!" + end + + # full name of the primitive + # if resource is complex use group name + # @return [String] + def full_name + return @full_name if @full_name + if primitive_is_complex? name + full_name = primitives[name]['name'] + Puppet.debug "Using full name '#{full_name}' for complex primitive '#{name}'" + @full_name = full_name + else + @full_name = name + end + end + + # name of the basic service without 'p_' prefix + # used to disable the basic service + # @return [String] + def basic_service_name + return @basic_service_name if @basic_service_name + if name.start_with? 'p_' + basic_service_name = name.gsub /^p_/, '' + Puppet.debug "Using '#{basic_service_name}' as the basic service name for primitive '#{name}'" + @basic_service_name = basic_service_name + else + @basic_service_name = name + end + end + + # called by Puppet to determine if the service + # is running on the local node + # @return [:running,:stopped] + def status + wait_for_online + Puppet.debug "Call: 'status' for Pacemaker service '#{name}' on node '#{hostname}'" + cib_reset + out = get_primitive_puppet_status name, hostname + Puppet.debug get_cluster_debug_report + Puppet.debug "Return: '#{out}' (#{out.class})" + out + end + + # called by Puppet to start the service + def start + Puppet.debug "Call 'start' for Pacemaker service '#{name}' on node '#{hostname}'" + enable unless primitive_is_managed? name + disable_basic_service + constraint_location_add name, hostname + unban_primitive name, hostname + start_primitive name + cleanup_with_wait(name, hostname) if primitive_has_failures?(name, hostname) + + if primitive_is_multistate? name + Puppet.debug "Choose master start for Pacemaker service '#{name}'" + wait_for_master name + else + Puppet.debug "Choose global start for Pacemaker service '#{name}'" + wait_for_start name + end + end + + # called by Puppet to stop the service + def stop + Puppet.debug "Call 'stop' for Pacemaker service '#{name}' on node '#{hostname}'" + enable unless primitive_is_managed? name + cleanup_with_wait(name, hostname) if primitive_has_failures?(name, hostname) + + if primitive_is_complex? name + Puppet.debug "Choose local stop for Pacemaker service '#{name}' on node '#{hostname}'" + ban_primitive name, hostname + wait_for_stop name, hostname + else + Puppet.debug "Choose global stop for Pacemaker service '#{name}'" + stop_primitive name + wait_for_stop name + end + end + + # called by Puppet to restart the service + def restart + Puppet.debug "Call 'restart' for Pacemaker service '#{name}' on node '#{hostname}'" + unless primitive_is_running? name, hostname + Puppet.info "Pacemaker service '#{name}' is not running on node '#{hostname}'. Skipping restart!" + return + end + + begin + stop + rescue + nil + ensure + start + end + end + + # called by Puppet to enable the service + def enable + Puppet.debug "Call 'enable' for Pacemaker service '#{name}' on node '#{hostname}'" + manage_primitive name + end + + # called by Puppet to disable the service + def disable + Puppet.debug "Call 'disable' for Pacemaker service '#{name}' on node '#{hostname}'" + unmanage_primitive name + end + alias :manual_start :disable + + # called by Puppet to determine if the service is enabled + # @return [:true,:false] + def enabled? + Puppet.debug "Call 'enabled?' for Pacemaker service '#{name}' on node '#{hostname}'" + out = get_primitive_puppet_enable name + Puppet.debug "Return: '#{out}' (#{out.class})" + out + end + + # create an extra provider instance to deal with the basic service + # the provider will be chosen to match the current system + # @return [Puppet::Type::Service::Provider] + def extra_provider(provider_name = nil) + return @extra_provider if @extra_provider + begin + param_hash = {} + param_hash.store :name, basic_service_name + param_hash.store :provider, provider_name if provider_name + type = Puppet::Type::Service.new param_hash + @extra_provider = type.provider + rescue => e + Puppet.warning "Could not get extra provider for Pacemaker primitive '#{name}': #{e.message}" + @extra_provider = nil + end + end + + # disable and stop the basic service + def disable_basic_service + return unless extra_provider + begin + if extra_provider.enableable? and extra_provider.enabled? == :true + Puppet.info "Disable basic service '#{extra_provider.name}' using provider '#{extra_provider.class.name}'" + extra_provider.disable + else + Puppet.info "Basic service '#{extra_provider.name}' is disabled as reported by '#{extra_provider.class.name}' provider" + end + if extra_provider.status == :running + if not ['lsb','systemd','upstart'].include?(primitive_class name) + Puppet.info "Stop basic service '#{extra_provider.name}' using provider '#{extra_provider.class.name}'" + extra_provider.stop + else + Puppet.info "Not stopping basic service '#{extra_provider.name}', since its Pacemaker primitive is using primitive_class '#{extra_provider.class.name}'" + end + else + Puppet.info "Basic service '#{extra_provider.name}' is stopped as reported by '#{extra_provider.class.name}' provider" + end + rescue => e + Puppet.warning "Could not disable basic service for Pacemaker primitive '#{name}' using '#{extra_provider.class.name}' provider: #{e.message}" + end + end + +end diff --git a/openstack_extras/manifests/auth_file.pp b/openstack_extras/manifests/auth_file.pp new file mode 100644 index 000000000..193df366b --- /dev/null +++ b/openstack_extras/manifests/auth_file.pp @@ -0,0 +1,92 @@ +# == Class: openstack_extras::auth_file +# +# Creates an auth file that can be used to export +# environment variables that can be used to authenticate +# against a keystone server. +# +# === Parameters +# +# [*password*] +# (required) Password for this account as defined in keystone +# +# [*auth_url*] +# (optional) URL to authenticate against +# Defaults to 'http://127.0.0.1:5000/v2.0/' +# +# [*service_token*] +# (optional) Keystone service token +# NOTE: This setting will trigger a warning from keystone. +# Authentication credentials will be ignored by keystone client +# in favor of token authentication. +# Defaults to undef. +# +# [*service_endpoint*] +# (optional) Keystone service endpoint +# Defaults to 'http://127.0.0.1:35357/v2.0/' +# +# [*username*] +# (optional) Username for this account as defined in keystone +# Defaults to 'admin'. +# +# [*tenant_name*] +# (optional) Tenant for this account as defined in keystone +# Defaults to 'openstack'. +# +# [*region_name*] +# (optional) Openstack region to use +# Defaults to 'RegionOne'. +# +# [*use_no_cache*] +# (optional) Do not use the auth token cache. +# Defaults to true. +# +# [*cinder_endpoint_type*] +# (optional) The Cinder endpoint to use +# Defaults to 'publicURL'. +# +# [*glance_endpoint_type*] +# (optional) The Glance endpoint to use +# Defaults to 'publicURL'. +# +# [*keystone_endpoint_type*] +# (optional) The Keystone endpoint to use +# Defaults to 'publicURL'. +# +# [*nova_endpoint_type*] +# (optional) The Nova endpoint to use +# Defaults to 'publicURL'. +# +# [*neutron_endpoint_type*] +# (optional) The Neutron endpoint to use +# Defaults to 'publicURL'. +# +# [*auth_strategy*] +# (optional) The method to use for authentication +# Defaults to 'keystone'. +# +class openstack_extras::auth_file( + $password = undef, + $auth_url = 'http://127.0.0.1:5000/v2.0/', + $service_token = undef, + $service_endpoint = 'http://127.0.0.1:35357/v2.0/', + $username = 'admin', + $tenant_name = 'openstack', + $region_name = 'RegionOne', + $use_no_cache = true, + $cinder_endpoint_type = 'publicURL', + $glance_endpoint_type = 'publicURL', + $keystone_endpoint_type = 'publicURL', + $nova_endpoint_type = 'publicURL', + $neutron_endpoint_type = 'publicURL', + $auth_strategy = 'keystone', +) { + if ! $password { + fail('You must specify a password for openstack_extras::auth_file') + } + file { '/root/openrc': + owner => 'root', + group => 'root', + mode => '0700', + content => template('openstack_extras/openrc.erb') + } +} diff --git a/openstack_extras/manifests/pacemaker/service.pp b/openstack_extras/manifests/pacemaker/service.pp new file mode 100644 index 000000000..8e17b2a0d --- /dev/null +++ b/openstack_extras/manifests/pacemaker/service.pp @@ -0,0 +1,213 @@ +# == Class: openstack_extras::pacemaker::service +# +# Configures Pacemaker resource for a specified service and +# overrides its service provider to Pacemaker. +# Assumes there is a service already exists in the Puppet catalog. +# For example, the one, such as nova-api, heat-engine, neutron-agent-l3 +# and so on, created by other core Puppet modules for Openstack. +# +# === Parameters +# +# [*ensure*] +# (optional) The state of the service provided by Pacemaker +# Defaults to present +# +# [*ocf_root_path*] +# (optional) The path for OCF scripts +# Defaults to /usr/lib/ocf +# +# [*primitive_class*] +# (optional) The class of Pacemaker resource (primitive) +# Defaults to ocf +# +# [*primitive_provider*] +# (optional) The provider of OCF scripts +# Defaults to pacemaker +# +# [*primitive_type*] +# (optional) The type of the primitive (OCF file name). +# Used with the other parameters as a full path to OCF script: +# primitive_class/primitive_provider/primitive_type +# resided at ocf_root_path/resource.d +# Defaults to false +# +# [*parameters*] +# (optional) The hash of parameters for a primitive +# Defaults to false +# +# [*operations*] +# (optional) The hash of operations for a primitive +# Defaults to false +# +# [*metadata*] +# (optional) The hash of metadata for a primitive +# Defaults to false +# +# [*ms_metadata*] +# (optional) The hash of ms_metadata for a primitive +# Defaults to false +# +# [*use_handler*] +# (optional) The handler (wrapper script) for OCF script +# Could be useful for debug and informational purposes. +# It sets some default values like OCF_ROOT in order to +# simplify debugging of OCF scripts +# Defaults to true +# +# [*handler_root_path*] +# (optional) The path for a handler script +# Defaults to /usr/local/bin +# +# [*ocf_script_template*] +# (optional) ERB template for OCF script for Pacemaker +# resource +# Defaults to false +# +# [*ocf_script_file*] +# (optional) OCF file for Pacemaker resource +# Defaults to false +# +# [*create_primitive*] +# (optional) Controls Pacemaker primitive creation +# Defaults to true +# +# [*clone*] +# (optional) Create a cloned primitive +# Defaults to false +# +# === Examples +# +# Will create resource and ensure Pacemaker provider for +# 'some-api-service' with the given OCF scripte template and +# parameters: +# +# $metadata = { +# 'resource-stickiness' => '1' +# } +# $operations = { +# 'monitor' => { +# 'interval' => '20', +# 'timeout' => '30', +# }, +# 'start' => { +# 'timeout' => '60', +# }, +# 'stop' => { +# 'timeout' => '60', +# }, +# } +# $ms_metadata = { +# 'interleave' => true, +# } +# +# openstack_extras::pacemaker::service { 'some-api-service' : +# primitive_type => 'some-api-service', +# metadata => $metadata, +# ms_metadata => $ms_metadata, +# operations => $operations, +# clone => true, +# ocf_script_template => 'some_module/some_api_service.ocf.erb', +# } +# +define openstack_extras::pacemaker::service ( + $ensure = 'present', + $ocf_root_path = '/usr/lib/ocf', + $primitive_class = 'ocf', + $primitive_provider = 'pacemaker', + $primitive_type = false, + $parameters = false, + $operations = false, + $metadata = false, + $ms_metadata = false, + $use_handler = true, + $handler_root_path = '/usr/local/bin', + $ocf_script_template = false, + $ocf_script_file = false, + $create_primitive = true, + $clone = false, +) { + + $service_name = $title + $primitive_name = "p_${service_name}" + $ocf_script_name = "${service_name}-ocf-file" + $ocf_handler_name = "ocf_handler_${service_name}" + + $ocf_dir_path = "${ocf_root_path}/resource.d" + $ocf_script_path = "${ocf_dir_path}/${primitive_provider}/${$primitive_type}" + $ocf_handler_path = "${handler_root_path}/${ocf_handler_name}" + + Service<| title == $service_name |> { + provider => 'pacemaker', + } + + Service<| name == $service_name |> { + provider => 'pacemaker', + } + + if $create_primitive { + cs_primitive { $primitive_name : + ensure => $ensure, + primitive_class => $primitive_class, + primitive_type => $primitive_type, + provided_by => $primitive_provider, + parameters => $parameters, + operations => $operations, + metadata => $metadata, + ms_metadata => $ms_metadata, + } + + $clone_name="${primitive_name}-clone" + if $clone { + cs_clone { $clone_name : + ensure => present, + primitive => $primitive_name, + require => Cs_primitive[$primitive_name] + } + } + else { + cs_clone { $clone_name : + ensure => absent, + require => Cs_primitive[$primitive_name] + } + } + } + + if $ocf_script_template or $ocf_script_file { + file { $ocf_script_name : + ensure => $ensure, + path => $ocf_script_path, + mode => '0755', + owner => 'root', + group => 'root', + } + + if $ocf_script_template { + File[$ocf_script_name] { + content => template($ocf_script_template), + } + } elsif $ocf_script_file { + File[$ocf_script_name] { + source => "puppet:///modules/${ocf_script_file}", + } + } + + } + + if ($primitive_class == 'ocf') and ($use_handler) { + file { $ocf_handler_name : + ensure => present, + path => $ocf_handler_path, + owner => 'root', + group => 'root', + mode => '0700', + content => template('openstack_extras/ocf_handler.erb'), + } + } + + File<| title == $ocf_script_name |> -> + Cs_primitive<| title == $primitive_name |> + File<| title == $ocf_script_name |> ~> Service[$service_name] + Cs_primitive<| title == $primitive_name |> -> Service[$service_name] + File<| title == $ocf_handler_name |> -> Service[$service_name] + +} diff --git a/openstack_extras/manifests/repo/debian/debian.pp b/openstack_extras/manifests/repo/debian/debian.pp new file mode 100644 index 000000000..2204dc8c2 --- /dev/null +++ b/openstack_extras/manifests/repo/debian/debian.pp @@ -0,0 +1,59 @@ +# == Class: openstack_extras::repo::debian::debian +# +# This repo sets up apt sources for use with the debian +# osfamily and debian operatingsystem +# +# === Parameters: +# +# [*release*] +# (optional) The OpenStack release to add a +# Debian Wheezy apt source for. +# Defaults to 'icehouse' +# +# [*manage_whz*] +# (optional) Whether or not to add the default +# Debian Wheezy APT source +# Defaults to true +# +# [*source_hash*] +# (optional) A hash of apt::source resources to +# create and manage +# Defaults to {} +# +# [*source_defaults*] +# (optional) A hash of defaults to use for all apt::source +# resources created by this class +# Defaults to {} +# +# [*package_require*] +# (optional) Whether or not to run 'apt-get update' before +# installing any packages. +# Defaults to false +# +class openstack_extras::repo::debian::debian( + $release = $::openstack_extras::repo::debian::params::release, + $manage_whz = true, + $source_hash = {}, + $source_defaults = {}, + $package_require = false +) inherits openstack_extras::repo::debian::params { + if $manage_whz { + apt::source { $::openstack_extras::repo::debian::params::whz_name: + location => $::openstack_extras::repo::debian::params::whz_location, + release => $release, + repos => $::openstack_extras::repo::debian::params::whz_repos, + required_packages => $::openstack_extras::repo::debian::params::whz_required_packages + } -> + apt::source { "${::openstack_extras::repo::debian::params::whz_name}_backports": + location => $::openstack_extras::repo::debian::params::whz_location, + release => "${release}-backports", + repos => $::openstack_extras::repo::debian::params::whz_repos, + } + } + + create_resources('apt::source', $source_hash, $source_defaults) + + if $package_require { + Exec['apt_update'] -> Package<||> + } +} diff --git a/openstack_extras/manifests/repo/debian/params.pp b/openstack_extras/manifests/repo/debian/params.pp new file mode 100644 index 000000000..73732279e --- /dev/null +++ b/openstack_extras/manifests/repo/debian/params.pp @@ -0,0 +1,18 @@ +# == Class: openstack_extras::repo::debian::params +# +# This repo sets defaults for the debian osfamily +# +class openstack_extras::repo::debian::params +{ + $release = 'icehouse' + + $uca_name = 'ubuntu-cloud-archive' + $uca_location = 'http://ubuntu-cloud.archive.canonical.com/ubuntu' + $uca_repos = 'main' + $uca_required_packages = 'ubuntu-cloud-keyring' + + $whz_name = 'debian_wheezy' + $whz_location = 'http://archive.gplhost.com/debian' + $whz_repos = 'main' + $whz_required_packages = 'gplhost-archive-keyring' +} diff --git a/openstack_extras/manifests/repo/debian/ubuntu.pp b/openstack_extras/manifests/repo/debian/ubuntu.pp new file mode 100644 index 000000000..41726b754 --- /dev/null +++ b/openstack_extras/manifests/repo/debian/ubuntu.pp @@ -0,0 +1,54 @@ +# == Class: openstack_extras::repo::debian::ubuntu +# +# This repo sets up apt sources for use with the debian +# osfamily and ubuntu operatingsystem +# +# === Parameters: +# +# [*release*] +# (optional) The OpenStack release to add an +# Ubuntu Cloud Archive APT source for. +# Defaults to 'icehouse' +# +# [*manage_uca*] +# (optional) Whether or not to add the default +# Ubuntu Cloud Archive APT source +# Defaults to true +# +# [*source_hash*] +# (optional) A hash of apt::source resources to +# create and manage +# Defaults to {} +# +# [*source_defaults*] +# (optional) A hash of defaults to use for all apt::source +# resources created by this class +# Defaults to {} +# +# [*package_require*] +# (optional) Whether or not to run 'apt-get update' before +# installing any packages. +# Defaults to false +# +class openstack_extras::repo::debian::ubuntu( + $release = $::openstack_extras::repo::debian::params::release, + $manage_uca = true, + $source_hash = {}, + $source_defaults = {}, + $package_require = false +) inherits openstack_extras::repo::debian::params { + if $manage_uca { + apt::source { $::openstack_extras::repo::debian::params::uca_name: + location => $::openstack_extras::repo::debian::params::uca_location, + release => "${::lsbdistcodename}-updates/${release}", + repos => $::openstack_extras::repo::debian::params::uca_repos, + required_packages => $::openstack_extras::repo::debian::params::uca_required_packages + } + } + + create_resources('apt::source', $source_hash, $source_defaults) + + if $package_require { + Exec['apt_update'] -> Package<||> + } +} diff --git a/openstack_extras/manifests/repo/redhat/params.pp b/openstack_extras/manifests/repo/redhat/params.pp new file mode 100644 index 000000000..ece8e620b --- /dev/null +++ b/openstack_extras/manifests/repo/redhat/params.pp @@ -0,0 +1,32 @@ +# == Class: openstack_extras::repo::redhat::params +# +# This repo sets defaults for use with the redhat +# osfamily repo classes. +# +class openstack_extras::repo::redhat::params +{ + $release = 'juno' + + $repo_defaults = { 'enabled' => '1', + 'gpgcheck' => '1', + 'notify' => "Exec[yum_refresh]", + 'mirrorlist' => 'absent', + 'require' => "Anchor[openstack_extras_redhat]" + } + + $gpgkey_defaults = { 'owner' => 'root', + 'group' => 'root', + 'mode' => '0644', + 'before' => "Anchor[openstack_extras_redhat]" + } + + case $::operatingsystem { + centos, redhat, scientific, slc: { $dist = 'epel' } + fedora: { $dist = 'fedora' } + default: { + warning('Unrecognised operatingsystem') + } + } + + $rdo_priority = 98 +} diff --git a/openstack_extras/manifests/repo/redhat/redhat.pp b/openstack_extras/manifests/repo/redhat/redhat.pp new file mode 100644 index 000000000..303e7e25b --- /dev/null +++ b/openstack_extras/manifests/repo/redhat/redhat.pp @@ -0,0 +1,134 @@ +# == Class: openstack_extras::repo::redhat::redhat +# +# This repo sets up yum repos for use with the redhat +# osfamily and redhat operatingsystem. +# +# === Parameters: +# +# [*release*] +# (optional) The openstack release to use if managing rdo +# Defaults to $::openstack_extras::repo::redhat::params::release +# +# [*manage_rdo*] +# (optional) Whether to create a predefined yumrepo resource +# for the RDO OpenStack repository provided by RedHat +# Defaults to true +# +# [*repo_hash*] +# (optional) A hash of yumrepo resources that will be passed to +# create_resource. See examples folder for some useful examples. +# Defaults to {} +# +# [*repo_defaults*] +# (optional) The defaults for the yumrepo resources that will be +# created using create_resource. +# Defaults to $::openstack_extras::repo::redhat::params::repo_defaults +# +# [*gpgkey_hash*] +# (optional) A hash of file resources that will be passed to +# create_resource. See examples folder for some useful examples. +# Defaults to {} +# +# [*gpgkey_defaults*] +# (optional) The default resource attributes to +# create gpgkeys with. +# Defaults to $::openstack_extras::repo::redhat::params::gpgkey_defaults +# +# [*purge_unmanaged*] +# (optional) Purge the yum.repos.d directory of +# all repositories not managed by Puppet +# Defaults to false +# +# [*package_require*] +# (optional) Set all packages to require all +# yumrepos be set. +# Defaults to false +# +class openstack_extras::repo::redhat::redhat( + $release = $::openstack_extras::repo::redhat::params::release, + $manage_rdo = true, + $manage_epel = true, + $repo_hash = {}, + $repo_defaults = {}, + $gpgkey_hash = {}, + $gpgkey_defaults = {}, + $purge_unmanaged = false, + $package_require = false +) inherits openstack_extras::repo::redhat::params { + + validate_string($release) + validate_bool($manage_rdo) + validate_bool($manage_epel) + validate_hash($repo_hash) + validate_hash($repo_defaults) + validate_hash($gpgkey_hash) + validate_hash($gpgkey_defaults) + validate_bool($purge_unmanaged) + validate_bool($package_require) + + $_repo_defaults = merge($::openstack_extras::repo::redhat::params::repo_defaults, $repo_defaults) + $_gpgkey_defaults = merge($::openstack_extras::repo::redhat::params::gpgkey_defaults, $gpgkey_defaults) + + anchor { 'openstack_extras_redhat': } + + if $manage_rdo { + $release_cap = capitalize($release) + $_dist = $::openstack_extras::repo::redhat::params::dist + + $rdo_hash = { 'rdo-release' => { + 'baseurl' => "http://repos.fedorapeople.org/repos/openstack/openstack-${release}/${_dist}-${::operatingsystemmajrelease}/", + 'descr' => "OpenStack ${release_cap} Repository", + 'priority' => $::openstack_extras::repo::redhat::params::rdo_priority, + 'gpgkey' => "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RDO-${release_cap}", + } + } + + $rdokey_hash = { "/etc/pki/rpm-gpg/RPM-GPG-KEY-RDO-${release_cap}" => { + 'source' => "puppet:///modules/openstack_extras/RPM-GPG-KEY-RDO-${release_cap}" + } + } + + create_resources('file', $rdokey_hash, $_gpgkey_defaults) + create_resources('yumrepo', $rdo_hash, $_repo_defaults) + } + + if $manage_epel { + if ($::osfamily == 'RedHat' and + $::operatingsystem != 'Fedora') + { + $epel_hash = { 'epel' => { + 'baseurl' => "https://download.fedoraproject.org/pub/epel/${::operatingsystemmajrelease}/\$basearch", + 'descr' => "Extra Packages for Enterprise Linux ${::operatingsystemmajrelease} - \$basearch", + 'gpgkey' => "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-${::operatingsystemmajrelease}", + 'failovermethod' => 'priority' + } + } + + $epelkey_hash = { "/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-${::operatingsystemmajrelease}" => { + 'source' => "puppet:///modules/openstack_extras/RPM-GPG-KEY-EPEL-${::operatingsystemmajrelease}" + } + } + + create_resources('file', $epelkey_hash, $_gpgkey_defaults) + create_resources('yumrepo', $epel_hash, $_repo_defaults) + } + } + + validate_yum_hash($repo_hash) + create_resources('yumrepo', $repo_hash, $_repo_defaults) + create_resources('file', $gpgkey_hash, $_gpgkey_defaults) + + if ((versioncmp($::puppetversion, '3.5') > 0) and $purge_unmanaged) { + resources { 'yumrepo': purge => true } + } + + if $package_require { + Yumrepo<||> -> Package<||> + } + + exec { 'yum_refresh': + command => '/usr/bin/yum clean all', + refreshonly => true, + } -> Package <||> +} + diff --git a/openstack_extras/metadata.json b/openstack_extras/metadata.json new file mode 100644 index 000000000..4c36ba5b4 --- /dev/null +++ b/openstack_extras/metadata.json @@ -0,0 +1,36 @@ +{ + "name": "stackforge-openstack_extras", + "version": "5.0.0", + "author": "StackForge Contributors", + "summary": "Puppet OpenStack Extras Module", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-openstack_extras.git", + "project_page": "https://launchpad.net/puppet-openstack_extras", + "issues_url": "https://bugs.launchpad.net/puppet-openstack_extras", + "requirements": [ + { "name": "pe","version_requirement": "3.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": ["7"] + }, + { + "operatingsystem": "Fedora", + "operatingsystemrelease": ["20"] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": ["6.5","7"] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": ["12.04","14.04"] + } + ], + "description": "Puppet module to add useful utilities for OpenStack deployments", + "dependencies": [ + { "name": "puppetlabs/corosync", "version_requirement": ">=0.1.0" } + ] +} diff --git a/openstack_extras/spec/classes/openstack_extras_auth_file_spec.rb b/openstack_extras/spec/classes/openstack_extras_auth_file_spec.rb new file mode 100644 index 000000000..6052b07c4 --- /dev/null +++ b/openstack_extras/spec/classes/openstack_extras_auth_file_spec.rb @@ -0,0 +1,86 @@ +require 'spec_helper' + +describe 'openstack_extras::auth_file' do + + describe "when only passing default class parameters" do + + let :params do + { :password => 'admin' } + end + + it 'should create a openrc file' do + verify_contents(catalogue, '/root/openrc', [ + 'export OS_NO_CACHE=\'true\'', + 'export OS_TENANT_NAME=\'openstack\'', + 'export OS_USERNAME=\'admin\'', + 'export OS_PASSWORD=\'admin\'', + 'export OS_AUTH_URL=\'http://127.0.0.1:5000/v2.0/\'', + 'export OS_AUTH_STRATEGY=\'keystone\'', + 'export OS_REGION_NAME=\'RegionOne\'', + 'export CINDER_ENDPOINT_TYPE=\'publicURL\'', + 'export GLANCE_ENDPOINT_TYPE=\'publicURL\'', + 'export KEYSTONE_ENDPOINT_TYPE=\'publicURL\'', + 'export NOVA_ENDPOINT_TYPE=\'publicURL\'', + 'export NEUTRON_ENDPOINT_TYPE=\'publicURL\'' + ]) + end + end + + describe 'when overriding parameters' do + + let :params do + { + :password => 'admin', + :auth_url => 'http://127.0.0.2:5000/v2.0/', + :service_token => 'servicetoken', + :service_endpoint => 'http://127.0.0.2:35357/v2.0/', + :username => 'myuser', + :tenant_name => 'mytenant', + :region_name => 'myregion', + :use_no_cache => 'false', + :cinder_endpoint_type => 'internalURL', + :glance_endpoint_type => 'internalURL', + :keystone_endpoint_type => 'internalURL', + :nova_endpoint_type => 'internalURL', + :neutron_endpoint_type => 'internalURL', + :auth_strategy => 'no_auth', + } + end + + it 'should create a openrc file' do + verify_contents(catalogue, '/root/openrc', [ + 'export OS_SERVICE_TOKEN=\'servicetoken\'', + 'export OS_SERVICE_ENDPOINT=\'http://127.0.0.2:35357/v2.0/\'', + 'export OS_NO_CACHE=\'false\'', + 'export OS_TENANT_NAME=\'mytenant\'', + 'export OS_USERNAME=\'myuser\'', + 'export OS_PASSWORD=\'admin\'', + 'export OS_AUTH_URL=\'http://127.0.0.2:5000/v2.0/\'', + 'export OS_AUTH_STRATEGY=\'no_auth\'', + 'export OS_REGION_NAME=\'myregion\'', + 'export CINDER_ENDPOINT_TYPE=\'internalURL\'', + 'export GLANCE_ENDPOINT_TYPE=\'internalURL\'', + 'export KEYSTONE_ENDPOINT_TYPE=\'internalURL\'', + 'export NOVA_ENDPOINT_TYPE=\'internalURL\'', + 'export NEUTRON_ENDPOINT_TYPE=\'internalURL\'' + ]) + end + end + + describe "handle password and token with single quotes" do + + let :params do + { + :password => 'singlequote\'', + :service_token => 'key\'stone' + } + end + + it 'should create a openrc file' do + verify_contents(catalogue, '/root/openrc', [ + 'export OS_SERVICE_TOKEN=\'key\\\'stone\'', + 'export OS_PASSWORD=\'singlequote\\\'\'', + ]) + end + end +end diff --git a/openstack_extras/spec/classes/openstack_extras_repo_debian_debian_spec.rb b/openstack_extras/spec/classes/openstack_extras_repo_debian_debian_spec.rb new file mode 100644 index 000000000..e38fff2d8 --- /dev/null +++ b/openstack_extras/spec/classes/openstack_extras_repo_debian_debian_spec.rb @@ -0,0 +1,132 @@ +require 'spec_helper' + +describe 'openstack_extras::repo::debian::debian' do + let :class_params do + { + :manage_whz => true, + :source_hash => {}, + :source_defaults => {}, + :package_require => false + } + end + + let :paramclass_defaults do + { + :release => 'icehouse' + } + end + + let :default_params do + class_params.merge!(paramclass_defaults) + end + + context 'on Debian platforms' do + let :facts do + { + :osfamily => 'Debian', + :operatingsystem => 'Debian', + :lsbdistid => 'Debian' + } + end + + describe 'with default parameters' do + let :params do + {}.merge!(default_params) + end + + it { should contain_apt__source('debian_wheezy').with( + :location => 'http://archive.gplhost.com/debian', + :release => 'icehouse', + :repos => 'main', + :required_packages => 'gplhost-archive-keyring' + )} + + it { should contain_apt__source('debian_wheezy_backports').with( + :location => 'http://archive.gplhost.com/debian', + :release => 'icehouse-backports', + :repos => 'main' + )} + + end + + describe 'with overridden release' do + let :params do + default_params.merge!({ :release => 'juno' }) + end + + it { should contain_apt__source('debian_wheezy').with( + :location => 'http://archive.gplhost.com/debian', + :release => 'juno', + :repos => 'main', + :required_packages => 'gplhost-archive-keyring' + )} + + it { should contain_apt__source('debian_wheezy_backports').with( + :location => 'http://archive.gplhost.com/debian', + :release => 'juno-backports', + :repos => 'main' + )} + + end + + describe 'with overridden source hash' do + let :params do + default_params.merge!({ :source_hash => { + 'debian_unstable' => { + 'location' => 'http://mymirror/debian/', + 'repos' => 'main', + 'release' => 'unstable' + }, + 'puppetlabs' => { + 'location' => 'http://apt.puppetlabs.com', + 'repos' => 'main', + 'release' => 'wheezy', + 'key' => '4BD6EC30', + 'key_server' => 'pgp.mit.edu' + } + } + }) + end + + it { should contain_apt__source('debian_unstable').with( + :location => 'http://mymirror/debian/', + :release => 'unstable', + :repos => 'main' + )} + + it { should contain_apt__source('puppetlabs').with( + :location => 'http://apt.puppetlabs.com', + :repos => 'main', + :release => 'wheezy', + :key => '4BD6EC30', + :key_server => 'pgp.mit.edu' + )} + + end + + describe 'with overridden source default' do + let :params do + default_params.merge!({ :source_defaults => { + 'include_src' => 'true' + } + }) + end + + it { should contain_apt__source('debian_wheezy').with( + :location => 'http://archive.gplhost.com/debian', + :release => 'icehouse', + :repos => 'main', + :required_packages => 'gplhost-archive-keyring', + :include_src => 'true' + )} + + it { should contain_apt__source('debian_wheezy_backports').with( + :location => 'http://archive.gplhost.com/debian', + :release => 'icehouse-backports', + :repos => 'main', + :include_src => 'true' + )} + + end + end +end diff --git a/openstack_extras/spec/classes/openstack_extras_repo_debian_ubuntu_spec.rb b/openstack_extras/spec/classes/openstack_extras_repo_debian_ubuntu_spec.rb new file mode 100644 index 000000000..f62fa01af --- /dev/null +++ b/openstack_extras/spec/classes/openstack_extras_repo_debian_ubuntu_spec.rb @@ -0,0 +1,114 @@ +require 'spec_helper' + +describe 'openstack_extras::repo::debian::ubuntu' do + let :class_params do + { + :manage_uca => true, + :source_hash => {}, + :source_defaults => {}, + :package_require => false + } + end + + let :paramclass_defaults do + { + :release => 'icehouse' + } + end + + let :default_params do + class_params.merge!(paramclass_defaults) + end + + context 'on Debian platforms' do + let :facts do + { + :osfamily => 'Debian', + :operatingsystem => 'Ubuntu', + :lsbdistid => 'Ubuntu', + :lsbdistcodename => 'trusty' + } + end + + describe 'with default parameters' do + let :params do + {}.merge!(default_params) + end + + it { should contain_apt__source('ubuntu-cloud-archive').with( + :location => 'http://ubuntu-cloud.archive.canonical.com/ubuntu', + :release => 'trusty-updates/icehouse', + :repos => 'main', + :required_packages => 'ubuntu-cloud-keyring' + )} + + end + + describe 'with overridden release' do + let :params do + default_params.merge!({ :release => 'juno' }) + end + + it { should contain_apt__source('ubuntu-cloud-archive').with( + :location => 'http://ubuntu-cloud.archive.canonical.com/ubuntu', + :release => 'trusty-updates/juno', + :repos => 'main', + :required_packages => 'ubuntu-cloud-keyring' + )} + + end + + describe 'with overridden source hash' do + let :params do + default_params.merge!({ :source_hash => { + 'local_mirror' => { + 'location' => 'http://mymirror/ubuntu/', + 'repos' => 'main', + 'release' => 'trusty' + }, + 'puppetlabs' => { + 'location' => 'http://apt.puppetlabs.com', + 'repos' => 'main', + 'release' => 'trusty', + 'key' => '4BD6EC30', + 'key_server' => 'pgp.mit.edu' + } + } + }) + end + + it { should contain_apt__source('local_mirror').with( + :location => 'http://mymirror/ubuntu/', + :release => 'trusty', + :repos => 'main' + )} + + it { should contain_apt__source('puppetlabs').with( + :location => 'http://apt.puppetlabs.com', + :release => 'trusty', + :repos => 'main', + :key => '4BD6EC30', + :key_server => 'pgp.mit.edu' + )} + + end + + describe 'with overridden source default' do + let :params do + default_params.merge!({ :source_defaults => { + 'include_src' => 'true' + } + }) + end + + it { should contain_apt__source('ubuntu-cloud-archive').with( + :include_src => 'true', + :location => 'http://ubuntu-cloud.archive.canonical.com/ubuntu', + :release => 'trusty-updates/icehouse', + :repos => 'main', + :required_packages => 'ubuntu-cloud-keyring' + )} + + end + end +end diff --git a/openstack_extras/spec/classes/openstack_extras_repo_redhat_redhat_spec.rb b/openstack_extras/spec/classes/openstack_extras_repo_redhat_redhat_spec.rb new file mode 100644 index 000000000..b4dd6a8b5 --- /dev/null +++ b/openstack_extras/spec/classes/openstack_extras_repo_redhat_redhat_spec.rb @@ -0,0 +1,187 @@ +require 'spec_helper' + +describe 'openstack_extras::repo::redhat::redhat' do + let :class_params do + { + :manage_rdo => true, + :manage_epel => true, + :repo_hash => {}, + :gpgkey_hash => {}, + :purge_unmanaged => false, + :package_require => false + } + end + + let :paramclass_defaults do + { + :release => 'icehouse', + :repo_defaults => { 'enabled' => '1', + 'gpgcheck' => '1', + 'notify' => "Exec[yum_refresh]", + 'mirrorlist' => 'absent', + 'require' => "Anchor[openstack_extras_redhat]" + }, + :gpgkey_defaults => { 'owner' => 'root', + 'group' => 'root', + 'mode' => '0644', + 'before' => "Anchor[openstack_extras_redhat]" + } + } + end + + let :default_params do + class_params.merge!(paramclass_defaults) + end + + context 'on RedHat platforms' do + let :facts do + { + :osfamily => 'RedHat', + :operatingsystem => 'RedHat', + :operatingsystemrelease => '6.5', + :operatingsystemmajrelease => '6' + } + end + + describe 'with default parameters' do + let :params do + {}.merge!(default_params) + end + + it { should contain_yumrepo('rdo-release').with( + :baseurl => "http://repos.fedorapeople.org/repos/openstack/openstack-icehouse/epel-6/", + :descr => "OpenStack Icehouse Repository", + :priority => 98, + :gpgkey => "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RDO-Icehouse", + :enabled => '1', + :gpgcheck => '1', + :mirrorlist => 'absent', + :require => "Anchor[openstack_extras_redhat]", + :notify => "Exec[yum_refresh]" + )} + + it { should contain_yumrepo('epel').with( + :baseurl => 'https://download.fedoraproject.org/pub/epel/6/$basearch', + :descr => 'Extra Packages for Enterprise Linux 6 - $basearch', + :gpgkey => "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6", + :failovermethod => 'priority', + :enabled => '1', + :gpgcheck => '1', + :mirrorlist => 'absent', + :require => "Anchor[openstack_extras_redhat]", + :notify => "Exec[yum_refresh]" + )} + + it { should contain_file('/etc/pki/rpm-gpg/RPM-GPG-KEY-RDO-Icehouse').with( + :source => "puppet:///modules/openstack_extras/RPM-GPG-KEY-RDO-Icehouse", + :owner => 'root', + :group => 'root', + :mode => '0644', + :before => "Anchor[openstack_extras_redhat]" + )} + + end + + describe 'with overridden release' do + let :params do + default_params.merge!({ :release => 'juno' }) + end + + it { should contain_yumrepo('rdo-release').with( + :baseurl => "http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6/", + :descr => "OpenStack Juno Repository", + :gpgkey => "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RDO-Juno" + )} + + it { should contain_file('/etc/pki/rpm-gpg/RPM-GPG-KEY-RDO-Juno').with( + :source => "puppet:///modules/openstack_extras/RPM-GPG-KEY-RDO-Juno" + )} + end + + describe 'with overridden repo hash' do + let :params do + default_params.merge!({ :repo_hash => { + 'CentOS-Base' => { + 'baseurl' => 'http://mymirror/$releasever/os/$basearch/', + 'descr' => 'CentOS-$releasever - Base', + 'gpgkey' => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6' + }, + 'CentOS-Updates' => { + 'baseurl' => 'http://mymirror/$releasever/updates/$basearch/', + 'descr' => 'CentOS-$releasever - Updates', + 'gpgkey' => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6', + } + } + }) + end + + it { should contain_yumrepo('CentOS-Base').with( + :baseurl => "http://mymirror/$releasever/os/$basearch/", + :descr => "CentOS-$releasever - Base", + :enabled => '1', + :gpgcheck => '1', + :gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6', + :mirrorlist => 'absent', + :require => "Anchor[openstack_extras_redhat]", + :notify => "Exec[yum_refresh]" + )} + + it { should contain_yumrepo('CentOS-Updates').with( + :baseurl => "http://mymirror/$releasever/updates/$basearch/", + :descr => "CentOS-$releasever - Updates", + :enabled => '1', + :gpgcheck => '1', + :gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6', + :mirrorlist => 'absent', + :require => "Anchor[openstack_extras_redhat]", + :notify => "Exec[yum_refresh]" + )} + + end + + describe 'with overridden repo default' do + let :params do + default_params.merge!({ :repo_defaults => { + 'proxy' => 'http://my.proxy.com:8000' + } + }) + end + + it { should contain_yumrepo('rdo-release').with( + :baseurl => "http://repos.fedorapeople.org/repos/openstack/openstack-icehouse/epel-6/", + :descr => "OpenStack Icehouse Repository", + :gpgkey => "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RDO-Icehouse", + :proxy => "http://my.proxy.com:8000" + )} + end + + describe 'with overridden gpgkey default' do + let :params do + default_params.merge!({ :gpgkey_defaults => { + 'owner' => 'steve' + } + }) + end + + it { should contain_file('/etc/pki/rpm-gpg/RPM-GPG-KEY-RDO-Icehouse').with( + :owner => "steve" + )} + end + + describe 'with epel management disabled' do + let :params do + default_params.merge!({ :manage_epel => false }) + end + + it { should_not contain_yumrepo('epel') } + end + + describe 'with rdo management disabled' do + let :params do + default_params.merge!({ :manage_rdo => false }) + end + + it { should_not contain_yumrepo('rdo-release') } + end + end +end diff --git a/openstack_extras/spec/defines/openstack_extras_pacemaker_service_spec.rb b/openstack_extras/spec/defines/openstack_extras_pacemaker_service_spec.rb new file mode 100644 index 000000000..62ebe49ef --- /dev/null +++ b/openstack_extras/spec/defines/openstack_extras_pacemaker_service_spec.rb @@ -0,0 +1,162 @@ +require 'spec_helper' + +describe 'openstack_extras::pacemaker::service', :type => :define do + + let :pre_condition do + "class { 'foo': }" + end + + let (:title) { 'foo-api' } + + let :default_params do + { + :ensure => 'present', + :ocf_root_path => '/usr/lib/ocf', + :primitive_class => 'ocf', + :primitive_provider => 'pacemaker', + :primitive_type => false, + :parameters => false, + :operations => false, + :metadata => false, + :ms_metadata => false, + :use_handler => true, + :handler_root_path => '/usr/local/bin', + :ocf_script_template => false, + :ocf_script_file => false, + :create_primitive => true, + :clone => false + } + end + + context 'with defaults' do + it 'should contain openstack_extras::pacemaker::service definition' do + should contain_openstack_extras__pacemaker__service(title).with(default_params) + end + + it 'should override existing service provider' do + should contain_service('foo-api').with( + { + :provider => 'pacemaker' + }) + end + + it 'should create a pacemaker primitive' do + should contain_cs_primitive('p_foo-api').with( + { + 'ensure' => default_params[:ensure], + 'primitive_class' => default_params[:primitive_class], + 'primitive_type' => default_params[:primitive_type], + 'provided_by' => default_params[:primitive_provider], + 'parameters' => default_params[:parameters], + 'operations' => default_params[:operations], + 'metadata' => default_params[:metadata], + 'ms_metadata' => default_params[:ms_metadata], + }) + end + it 'should not create a cloned resource' do + should contain_cs_clone('p_foo-api-clone').with( + { + 'ensure' => 'absent', + }) + end + end + + context 'with custom OCF file' do + let :params do + default_params.merge( + { + :ocf_script_file => 'foo/scripts/foo.ocf' + } + ) + end + let (:ocf_dir_path) { "#{params[:ocf_root_path]}/resource.d" } + let (:ocf_script_path) { "#{ocf_dir_path}/#{params[:primitive_provider]}/#{params[:primitive_type]}" } + let (:ocf_handler_name) { "ocf_handler_#{title}" } + let (:ocf_handler_path) { "#{params[:handler_root_path]}/#{ocf_handler_name}" } + + it 'should create an OCF file' do + should contain_file("#{title}-ocf-file").with( + { + 'ensure' => 'present', + 'path' => ocf_script_path, + 'mode' => '0755', + 'owner' => 'root', + 'group' => 'root', + 'source' => "puppet:///modules/#{params[:ocf_script_file]}" + }) + end + + it 'should create a handler file' do + should contain_file("#{ocf_handler_name}").with( + { + 'ensure' => 'present', + 'path' => ocf_handler_path, + 'owner' => 'root', + 'group' => 'root', + 'mode' => '0700', + }).with_content(/OCF_ROOT/) + end + + end + + context 'with custom OCF path, provider, erb and w/o a wrapper' do + let(:params) do + default_params.merge( + { + :ocf_script_template => 'foo/foo.ocf.erb', + :use_handler => false, + :primitive_provider => 'some_provider', + :ocf_root_path => '/usr/lib/some_path', + }) + end + let (:ocf_dir_path) { "#{params[:ocf_root_path]}/resource.d" } + let (:ocf_script_path) { + "#{ocf_dir_path}/#{params[:primitive_provider]}/#{params[:primitive_type]}" + } + + it 'should create an OCF file from template' do + should contain_file("#{title}-ocf-file").with( + { + 'path' => ocf_script_path, + 'mode' => '0755', + 'owner' => 'root', + 'group' => 'root' + }).with_content(/erb/) + end + + it 'should not create a handler file' do + should_not contain_file("#{params[:ocf_handler_name]}") + end + + it 'should create a pacemaker primitive' do + should contain_cs_primitive('p_foo-api').with( + { + 'ensure' => params[:ensure], + 'primitive_class' => params[:primitive_class], + 'primitive_type' => params[:primitive_type], + 'provided_by' => params[:primitive_provider], + 'parameters' => params[:parameters], + 'operations' => params[:operations], + 'metadata' => params[:metadata], + 'ms_metadata' => params[:ms_metadata], + }) + end + end + + context 'with cloned resources' do + let (:params) do + default_params.merge( + { + :clone => true, + }) + end + it 'should create a cloned resource' do + should contain_cs_clone('p_foo-api-clone').with( + { + 'ensure' => 'present', + 'primitive' => 'p_foo-api', + }) + end + end + +end diff --git a/openstack_extras/spec/fixtures/manifests/site.pp b/openstack_extras/spec/fixtures/manifests/site.pp new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/openstack_extras/spec/fixtures/manifests/site.pp @@ -0,0 +1 @@ + diff --git a/openstack_extras/spec/fixtures/modules/foo/files/scripts/foo.ocf b/openstack_extras/spec/fixtures/modules/foo/files/scripts/foo.ocf new file mode 100644 index 000000000..e69de29bb diff --git a/openstack_extras/spec/fixtures/modules/foo/manifests/init.pp b/openstack_extras/spec/fixtures/modules/foo/manifests/init.pp new file mode 100644 index 000000000..1e367d176 --- /dev/null +++ b/openstack_extras/spec/fixtures/modules/foo/manifests/init.pp @@ -0,0 +1,3 @@ +class foo () { + service { 'foo-api': } +} diff --git a/openstack_extras/spec/fixtures/modules/foo/templates/foo.ocf.erb b/openstack_extras/spec/fixtures/modules/foo/templates/foo.ocf.erb new file mode 100644 index 000000000..c1506805d --- /dev/null +++ b/openstack_extras/spec/fixtures/modules/foo/templates/foo.ocf.erb @@ -0,0 +1 @@ +erb diff --git a/openstack_extras/spec/spec_helper.rb b/openstack_extras/spec/spec_helper.rb new file mode 100644 index 000000000..3d9200524 --- /dev/null +++ b/openstack_extras/spec/spec_helper.rb @@ -0,0 +1 @@ +require 'puppetlabs_spec_helper/module_spec_helper' \ No newline at end of file diff --git a/openstack_extras/spec/unit/puppet/provider/cib.xml b/openstack_extras/spec/unit/puppet/provider/cib.xml new file mode 100644 index 000000000..4fdbf3d6f --- /dev/null +++ b/openstack_extras/spec/unit/puppet/provider/cib.xml @@ -0,0 +1,483 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/openstack_extras/spec/unit/puppet/provider/pacemaker_common_spec.rb b/openstack_extras/spec/unit/puppet/provider/pacemaker_common_spec.rb new file mode 100644 index 000000000..902cc2766 --- /dev/null +++ b/openstack_extras/spec/unit/puppet/provider/pacemaker_common_spec.rb @@ -0,0 +1,222 @@ +require 'spec_helper' +require File.expand_path(File.join(File.dirname(__FILE__), '../../../../lib/puppet/provider/pacemaker_common.rb')) + +describe Puppet::Provider::Pacemaker_common do + + cib_xml_file = File.join File.dirname(__FILE__), 'cib.xml' + + let(:raw_cib) do + File.read cib_xml_file + end + + let(:resources_regexp) do + %r{nova|cinder|glance|keystone|neutron|sahara|murano|ceilometer|heat|swift} + end + + ########################### + + #-> Cloned primitive 'clone_p_neutron-plugin-openvswitch-agent' global status: start + #node-1: start | node-2: stop | node-3: stop + #-> Cloned primitive 'clone_ping_vip__public' global status: start + #node-1: start | node-2: start | node-3: start + #-> Cloned primitive 'clone_p_neutron-metadata-agent' global status: start + #node-1: start | node-2: stop | node-3: stop + #-> Simple primitive 'vip__management' global status: start + #node-1: start | node-2: stop | node-3: stop + #-> Cloned primitive 'clone_p_mysql' global status: start + #node-1: start | node-2: start | node-3: stop + #-> Multistate primitive 'master_p_rabbitmq-server' global status: master + #node-1: master | node-2: start | node-3: stop + #-> Cloned primitive 'clone_p_haproxy' global status: start + #node-1: start | node-2: start | node-3: stop + #-> Simple primitive 'p_ceilometer-alarm-evaluator' global status: stop + #node-1: stop | node-2: stop (FAIL) | node-3: stop (FAIL) + #-> Simple primitive 'p_ceilometer-agent-central' global status: stop + #node-1: stop | node-2: stop (FAIL) | node-3: stop (FAIL) + #-> Cloned primitive 'clone_p_neutron-l3-agent' global status: start + #node-1: start | node-2: stop | node-3: stop + #-> Simple primitive 'p_neutron-dhcp-agent' global status: start + #node-1: start | node-2: stop | node-3: stop + #-> Simple primitive 'vip__public' global status: start + #node-1: start | node-2: stop | node-3: stop + #-> Simple primitive 'p_heat-engine' global status: start + #node-1: start | node-2: stop | node-3: stop + + before(:each) do + @class = subject + @class.stubs(:raw_cib).returns raw_cib + @class.stubs(:pcs).returns true + end + + context 'configuration parser' do + it 'can obtain a CIB XML object' do + expect(@class.cib.to_s).to include '' + expect(@class.cib.to_s).to include '' + expect(@class.cib.to_s).to include '' + expect(@class.cib.to_s).to include '' + expect(@class.cib.to_s).to include '' + end + + it 'can get primitives section of CIB XML' do + expect(@class.cib_section_primitives).to be_a(Array) + expect(@class.cib_section_primitives.first.to_s).to start_with '' + end + + it 'can get primitives configuration' do + expect(@class.primitives).to be_a Hash + expect(@class.primitives['vip__public']).to be_a Hash + expect(@class.primitives['vip__public']['meta_attributes']).to be_a Hash + expect(@class.primitives['vip__public']['instance_attributes']).to be_a Hash + expect(@class.primitives['vip__public']['instance_attributes']['ip']).to be_a Hash + expect(@class.primitives['vip__public']['operations']).to be_a Hash + expect(@class.primitives['vip__public']['meta_attributes']['resource-stickiness']).to be_a Hash + expect(@class.primitives['vip__public']['operations']['vip__public-start-0']).to be_a Hash + end + + it 'can determine is primitive is simple or complex' do + expect(@class.primitive_is_complex? 'p_haproxy').to eq true + expect(@class.primitive_is_complex? 'vip__management').to eq false + end + end + + context 'node status parser' do + it 'can produce nodes structure' do + expect(@class.nodes).to be_a Hash + expect(@class.nodes['node-1']['primitives']['p_heat-engine']['status']).to eq('start') + #puts @class.get_cluster_debug_report + end + + it 'can determite a global primitive status' do + expect(@class.primitive_status 'p_heat-engine').to eq('start') + expect(@class.primitive_is_running? 'p_heat-engine').to eq true + expect(@class.primitive_status 'p_ceilometer-agent-central').to eq('stop') + expect(@class.primitive_is_running? 'p_ceilometer-agent-central').to eq false + expect(@class.primitive_is_running? 'UNKNOWN').to eq nil + expect(@class.primitive_status 'UNKNOWN').to eq nil + end + + it 'can determine a local primitive status on a node' do + expect(@class.primitive_status 'p_heat-engine', 'node-1').to eq('start') + expect(@class.primitive_is_running? 'p_heat-engine', 'node-1').to eq true + expect(@class.primitive_status 'p_heat-engine', 'node-2').to eq('stop') + expect(@class.primitive_is_running? 'p_heat-engine', 'node-2').to eq false + expect(@class.primitive_is_running? 'UNKNOWN', 'node-1').to eq nil + expect(@class.primitive_status 'UNKNOWN', 'node-1').to eq nil + end + + it 'can determine if primitive is managed or not' do + expect(@class.primitive_is_managed? 'p_heat-engine').to eq true + expect(@class.primitive_is_managed? 'p_haproxy').to eq true + expect(@class.primitive_is_managed? 'UNKNOWN').to eq nil + end + + it 'can determine if primitive is started or not' do + expect(@class.primitive_is_started? 'p_heat-engine').to eq true + expect(@class.primitive_is_started? 'p_haproxy').to eq true + expect(@class.primitive_is_started? 'UNKNOWN').to eq nil + end + + it 'can determine if primitive is failed or not globally' do + expect(@class.primitive_has_failures? 'p_ceilometer-agent-central').to eq true + expect(@class.primitive_has_failures? 'p_heat-engine').to eq false + expect(@class.primitive_has_failures? 'UNKNOWN').to eq nil + end + + it 'can determine if primitive is failed or not locally' do + expect(@class.primitive_has_failures? 'p_ceilometer-agent-central', 'node-1').to eq false + expect(@class.primitive_has_failures? 'p_ceilometer-agent-central', 'node-2').to eq true + expect(@class.primitive_has_failures? 'p_heat-engine', 'node-1').to eq false + expect(@class.primitive_has_failures? 'p_heat-engine', 'node-2').to eq false + expect(@class.primitive_has_failures? 'UNKNOWN', 'node-1').to eq nil + end + + it 'can determine that primitive is complex' do + expect(@class.primitive_is_complex? 'p_haproxy').to eq true + expect(@class.primitive_is_complex? 'p_heat-engine').to eq false + expect(@class.primitive_is_complex? 'p_rabbitmq-server').to eq true + expect(@class.primitive_is_complex? 'UNKNOWN').to eq nil + end + + it 'can determine that primitive is multistate' do + expect(@class.primitive_is_multistate? 'p_haproxy').to eq false + expect(@class.primitive_is_multistate? 'p_heat-engine').to eq false + expect(@class.primitive_is_multistate? 'p_rabbitmq-server').to eq true + expect(@class.primitive_is_multistate? 'UNKNOWN').to eq nil + end + + it 'can determine that primitive has master running' do + expect(@class.primitive_has_master_running? 'p_rabbitmq-server').to eq true + expect(@class.primitive_has_master_running? 'p_heat-engine').to eq false + expect(@class.primitive_has_master_running? 'UNKNOWN').to eq nil + end + + it 'can determine that primitive is clone' do + expect(@class.primitive_is_clone? 'p_haproxy').to eq true + expect(@class.primitive_is_clone? 'p_heat-engine').to eq false + expect(@class.primitive_is_clone? 'p_rabbitmq-server').to eq false + expect(@class.primitive_is_clone? 'UNKNOWN').to eq nil + end + + end + + context 'cluster control' do + it 'can enable maintenance mode' do + @class.expects(:pcs).with 'property', 'set', 'maintenance-mode=true' + @class.maintenance_mode 'true' + end + + it 'can disable maintenance mode' do + @class.expects(:pcs).with 'property', 'set', 'maintenance-mode=false' + @class.maintenance_mode 'false' + end + + it 'can set no-quorum policy' do + @class.expects(:pcs).with 'property', 'set', 'no-quorum-policy=ignore' + @class.no_quorum_policy 'ignore' + end + end + + context 'constraints control' do + it 'can add location constraint' do + @class.expects(:pcs).with 'constraint', 'location', 'add', 'myprimitive_on_mynode', 'myprimitive', 'mynode', '200' + @class.constraint_location_add 'myprimitive', 'mynode', '200' + end + + it 'can remove location constraint' do + @class.expects(:pcs).with 'constraint', 'location', 'remove', 'myprimitive_on_mynode' + @class.constraint_location_remove 'myprimitive', 'mynode' + end + end + + context 'wait functions' do + it 'retries block until it becomes true' do + @class.retry_block_until_true { true } + end + + it 'waits for Pacemaker to become ready' do + @class.stubs(:is_online?).returns true + @class.wait_for_online + end + + it 'cleanups primitive and waits for it to become online again' do + @class.stubs(:cleanup_primitive).with('myprimitive', 'mynode').returns true + @class.stubs(:cib_reset).returns true + @class.stubs(:primitive_status).returns 'stopped' + @class.cleanup_with_wait 'myprimitive', 'mynode' + end + + it 'waits for the service to start' do + @class.stubs(:cib_reset).returns true + @class.stubs(:primitive_is_running?).with('myprimitive', nil).returns true + @class.wait_for_start 'myprimitive' + end + + it 'waits for the service to stop' do + @class.stubs(:cib_reset).returns true + @class.stubs(:primitive_is_running?).with('myprimitive', nil).returns false + @class.wait_for_stop 'myprimitive' + end + end + +end diff --git a/openstack_extras/spec/unit/puppet/provider/service/pacemaker_spec.rb b/openstack_extras/spec/unit/puppet/provider/service/pacemaker_spec.rb new file mode 100644 index 000000000..96e443176 --- /dev/null +++ b/openstack_extras/spec/unit/puppet/provider/service/pacemaker_spec.rb @@ -0,0 +1,235 @@ +require 'spec_helper' + +describe Puppet::Type.type(:service).provider(:pacemaker) do + + let(:resource) { Puppet::Type.type(:service).new(:name => title, :provider=> :pacemaker) } + let(:provider) { resource.provider } + let(:title) { 'myservice' } + let(:full_name) { 'clone-p_myservice' } + let(:name) { 'p_myservice' } + let(:hostname) { 'mynode' } + let(:primitive_class) { 'ocf' } + + before :each do + @class = provider + + @class.stubs(:title).returns(title) + @class.stubs(:hostname).returns(hostname) + @class.stubs(:name).returns(name) + @class.stubs(:full_name).returns(full_name) + @class.stubs(:basic_service_name).returns(title) + @class.stubs(:primitive_class).returns(primitive_class) + + @class.stubs(:cib_reset).returns(true) + + @class.stubs(:wait_for_online).returns(true) + @class.stubs(:cleanup_with_wait).returns(true) + @class.stubs(:wait_for_start).returns(true) + @class.stubs(:wait_for_stop).returns(true) + + @class.stubs(:disable_basic_service).returns(true) + @class.stubs(:get_primitive_puppet_status).returns(:started) + @class.stubs(:get_primitive_puppet_enable).returns(:true) + + @class.stubs(:primitive_is_managed?).returns(true) + @class.stubs(:primitive_is_running?).returns(true) + @class.stubs(:primitive_has_failures?).returns(false) + @class.stubs(:primitive_is_complex?).returns(false) + @class.stubs(:primitive_is_multistate?).returns(false) + @class.stubs(:primitive_is_clone?).returns(false) + + @class.stubs(:unban_primitive).returns(true) + @class.stubs(:ban_primitive).returns(true) + @class.stubs(:start_primitive).returns(true) + @class.stubs(:stop_primitive).returns(true) + @class.stubs(:enable).returns(true) + @class.stubs(:disable).returns(true) + + @class.stubs(:constraint_location_add).returns(true) + @class.stubs(:constraint_location_remove).returns(true) + + @class.stubs(:get_cluster_debug_report).returns(true) + end + + context 'service name mangling' do + it 'uses title as the service name if it is found in CIB' do + @class.unstub(:name) + @class.stubs(:primitive_exists?).with(title).returns(true) + expect(@class.name).to eq(title) + end + + it 'uses "p_" prefix with name if found name with prefix' do + @class.unstub(:name) + @class.stubs(:primitive_exists?).with(title).returns(false) + @class.stubs(:primitive_exists?).with(name).returns(true) + expect(@class.name).to eq(name) + end + + it 'uses name without "p_" to disable basic service' do + @class.stubs(:name).returns(name) + expect(@class.basic_service_name).to eq(title) + end + end + + context '#status' do + it 'should wait for pacemaker to become online' do + @class.expects(:wait_for_online) + @class.status + end + + it 'should reset cib mnemoization on every call' do + @class.expects(:cib_reset) + @class.status + end + + it 'gets service status locally' do + @class.expects(:get_primitive_puppet_status).with name, hostname + @class.status + end + + end + + context '#start' do + it 'tries to enable service if it is not enabled to work with it' do + @class.stubs(:primitive_is_managed?).returns(false) + @class.expects(:enable).once + @class.start + @class.stubs(:primitive_is_managed?).returns(true) + @class.expects(:enable).never + @class.start + end + + it 'tries to disable a basic service with the same name' do + @class.expects(:disable_basic_service) + @class.start + end + + it 'should cleanup a primitive only if there are errors' do + @class.stubs(:primitive_has_failures?).returns(true) + @class.expects(:cleanup_with_wait).once + @class.start + @class.stubs(:primitive_has_failures?).returns(false) + @class.expects(:cleanup_with_wait).never + @class.start + end + + it 'tries to unban the service on the node by the name' do + @class.expects(:unban_primitive).with(name, hostname) + @class.start + end + + it 'tries to start the service by its name' do + @class.expects(:start_primitive).with(name) + @class.start + end + + it 'adds a location constraint for the service by its name' do + @class.expects(:constraint_location_add).with(name, hostname) + @class.start + end + + it 'waits for the service to start locally if primitive is clone' do + @class.stubs(:primitive_is_clone?).returns(true) + @class.stubs(:primitive_is_multistate?).returns(false) + @class.stubs(:primitive_is_complex?).returns(true) + @class.expects(:wait_for_start).with name + @class.start + end + + it 'waits for the service to start master anywhere if primitive is multistate' do + @class.stubs(:primitive_is_clone?).returns(false) + @class.stubs(:primitive_is_multistate?).returns(true) + @class.stubs(:primitive_is_complex?).returns(true) + @class.expects(:wait_for_master).with name + @class.start + end + + it 'waits for the service to start anywhere if primitive is simple' do + @class.stubs(:primitive_is_clone?).returns(false) + @class.stubs(:primitive_is_multistate?).returns(false) + @class.stubs(:primitive_is_complex?).returns(false) + @class.expects(:wait_for_start).with name + @class.start + end + end + + context '#stop' do + it 'tries to enable service if it is not enabled to work with it' do + @class.stubs(:primitive_is_managed?).returns(false) + @class.expects(:enable).once + @class.start + @class.stubs(:primitive_is_managed?).returns(true) + @class.expects(:enable).never + @class.start + end + + it 'should cleanup a primitive only if there are errors' do + @class.stubs(:primitive_has_failures?).returns(true) + @class.expects(:cleanup_with_wait).once + @class.start + @class.stubs(:primitive_has_failures?).returns(false) + @class.expects(:cleanup_with_wait).never + @class.start + end + + it 'uses Ban to stop the service and waits for it to stop locally if service is complex' do + @class.stubs(:primitive_is_complex?).returns(true) + @class.expects(:wait_for_stop).with name, hostname + @class.expects(:ban_primitive).with name, hostname + @class.stop + end + + it 'uses Stop to stop the service and waits for it to stop globally if service is simple' do + @class.stubs(:primitive_is_complex?).returns(false) + @class.expects(:wait_for_stop).with name + @class.expects(:stop_primitive).with name + @class.stop + end + end + + context '#restart' do + it 'does not stop or start the service if it is not locally running' do + @class.stubs(:primitive_is_running?).with(name, hostname).returns(false) + @class.expects(:stop).never + @class.expects(:start).never + @class.restart + end + + it 'stops and start the service if it is locally running' do + @class.stubs(:primitive_is_running?).with(name, hostname).returns(true) + restart_sequence = sequence('restart') + @class.expects(:stop).in_sequence(restart_sequence) + @class.expects(:start).in_sequence(restart_sequence) + @class.restart + end + end + + context 'basic service handling' do + before :each do + @class.unstub(:disable_basic_service) + @class.extra_provider.stubs(:enableable?).returns true + @class.extra_provider.stubs(:enabled?).returns :true + @class.extra_provider.stubs(:disable).returns true + @class.extra_provider.stubs(:stop).returns true + @class.extra_provider.stubs(:status).returns :running + end + + it 'tries to disable the basic service if it is enabled' do + @class.extra_provider.expects(:disable) + @class.disable_basic_service + end + + it 'tries to stop the service if it is running' do + @class.extra_provider.expects(:stop) + @class.disable_basic_service + end + + it 'does not try to stop a systemd running service' do + @class.stubs(:primitive_class).returns('systemd') + @class.extra_provider.expects(:stop).never + @class.disable_basic_service + end + end + +end + diff --git a/openstack_extras/templates/ocf_handler.erb b/openstack_extras/templates/ocf_handler.erb new file mode 100644 index 000000000..cd22f0d3b --- /dev/null +++ b/openstack_extras/templates/ocf_handler.erb @@ -0,0 +1,118 @@ +#!/bin/sh +export PATH='/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin' +export OCF_ROOT='<%= @ocf_root_path %>' +export OCF_RA_VERSION_MAJOR='1' +export OCF_RA_VERSION_MINOR='0' +export OCF_RESOURCE_INSTANCE='<%= @primitive_name %>' + +# OCF Parameters +<% if @parameters.is_a? Hash -%> + <% @parameters.each do |k,v| -%> + <% v = v.to_s -%> + <% v = v + "'" unless v.end_with? "'" -%> + <% v = "'" + v unless v.start_with? "'" -%> + <%= "export OCF_RESKEY_#{k}=#{v}" %> + <% end -%> +<% end -%> + +help() { +cat< Pacemaker primitive + +Usage: <%= @ocf_handler_name %> [-dh] (action) + +Options: +-d - Use set -x to debug the shell script +-h - Show this help + +Main actions: +* start +* stop +* monitor +* meta-data +* validate-all + +Multistate: +* promote +* demote +* notify + +Migration: +* migrate_to +* migrate_from + +Optional and unused: +* usage +* help +* status +* reload +* restart +* recover +EOF +} + +red() { + echo -e "\033[31m${1}\033[0m" +} + +green() { + echo -e "\033[32m${1}\033[0m" +} + +blue() { + echo -e "\033[34m${1}\033[0m" +} + +ec2error() { + case "${1}" in + 0) green 'Success' ;; + 1) red 'Error: Generic' ;; + 2) red 'Error: Arguments' ;; + 3) red 'Error: Unimplemented' ;; + 4) red 'Error: Permissions' ;; + 5) red 'Error: Installation' ;; + 6) red 'Error: Configuration' ;; + 7) blue 'Not Running' ;; + 8) green 'Master Running' ;; + 9) red 'Master Failed' ;; + *) red "Unknown" ;; + esac +} + +DEBUG='0' +while getopts ':dh' opt; do + case $opt in + d) + DEBUG='1' + ;; + h) + help + exit 0 + ;; + \?) + echo "Invalid option: -${OPTARG}" >&2 + help + exit 1 + ;; + esac +done + +shift "$((OPTIND - 1))" + +ACTION="${1}" + +# set default action to monitor +if [ "${ACTION}" = '' ]; then + ACTION='monitor' +fi + +if [ "${DEBUG}" = '1' ]; then + bash -x <%= @ocf_script_path %> "${ACTION}" +else + <%= @ocf_script_path %> "${ACTION}" +fi +ec="${?}" + +message="$(ec2error ${ec})" +echo "Exit status: ${message} (${ec})" +exit "${ec}" diff --git a/openstack_extras/templates/openrc.erb b/openstack_extras/templates/openrc.erb new file mode 100644 index 000000000..9b39f39e1 --- /dev/null +++ b/openstack_extras/templates/openrc.erb @@ -0,0 +1,17 @@ +#!/bin/sh +<% if @service_token -%> +export OS_SERVICE_TOKEN='<%= @service_token.gsub(/'/){ %q(\') } %>' +export OS_SERVICE_ENDPOINT='<%= @service_endpoint %>' +<% end -%> +export OS_NO_CACHE='<%= @use_no_cache %>' +export OS_TENANT_NAME='<%= @tenant_name %>' +export OS_USERNAME='<%= @username %>' +export OS_PASSWORD='<%= @password.gsub(/'/){ %q(\') } %>' +export OS_AUTH_URL='<%= @auth_url %>' +export OS_AUTH_STRATEGY='<%= @auth_strategy %>' +export OS_REGION_NAME='<%= @region_name %>' +export CINDER_ENDPOINT_TYPE='<%= @cinder_endpoint_type %>' +export GLANCE_ENDPOINT_TYPE='<%= @glance_endpoint_type %>' +export KEYSTONE_ENDPOINT_TYPE='<%= @keystone_endpoint_type %>' +export NOVA_ENDPOINT_TYPE='<%= @nova_endpoint_type %>' +export NEUTRON_ENDPOINT_TYPE='<%= @neutron_endpoint_type %>' diff --git a/tripleo/.gitignore b/tripleo/.gitignore new file mode 100644 index 000000000..9897b6f7e --- /dev/null +++ b/tripleo/.gitignore @@ -0,0 +1,5 @@ +*.swp +spec/fixtures/modules/* +pkg +Gemfile.lock +.vagrant/* diff --git a/tripleo/.gitreview b/tripleo/.gitreview new file mode 100644 index 000000000..fd60bc5a1 --- /dev/null +++ b/tripleo/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=stackforge/puppet-tripleo.git diff --git a/tripleo/Gemfile b/tripleo/Gemfile new file mode 100644 index 000000000..790afc57c --- /dev/null +++ b/tripleo/Gemfile @@ -0,0 +1,24 @@ +source 'https://rubygems.org' + +group :development, :test do + gem 'puppetlabs_spec_helper', :require => false + gem 'puppet-lint' + gem 'puppet-lint-param-docs', '1.1.0' + gem 'metadata-json-lint' + gem 'rake', '10.1.1' + gem 'puppet-syntax' + gem 'rspec-puppet', :git => 'https://github.com/rodjek/rspec-puppet.git' + gem 'rspec' + gem 'json' + gem 'webmock' + gem 'r10k' + gem 'librarian-puppet-simple', '~> 0.0.3' +end + +if puppetversion = ENV['PUPPET_GEM_VERSION'] + gem 'puppet', puppetversion, :require => false +else + gem 'puppet', :require => false +end + +# vim:ft=ruby diff --git a/tripleo/LICENSE b/tripleo/LICENSE new file mode 100644 index 000000000..68c771a09 --- /dev/null +++ b/tripleo/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/tripleo/README.md b/tripleo/README.md new file mode 100644 index 000000000..2d7d568d6 --- /dev/null +++ b/tripleo/README.md @@ -0,0 +1,3 @@ +# puppet-tripleo + +Lightweight composition layer for Puppet TripleO. diff --git a/tripleo/Rakefile b/tripleo/Rakefile new file mode 100644 index 000000000..56dc07de3 --- /dev/null +++ b/tripleo/Rakefile @@ -0,0 +1,7 @@ +require 'puppetlabs_spec_helper/rake_tasks' +require 'puppet-lint/tasks/puppet-lint' + +PuppetLint.configuration.fail_on_warnings = true +PuppetLint.configuration.send('disable_80chars') +PuppetLint.configuration.send('disable_class_parameter_defaults') +PuppetLint.configuration.send('disable_class_inherits_from_params_class') diff --git a/tripleo/lib/puppet/provider/package/norpm.rb b/tripleo/lib/puppet/provider/package/norpm.rb new file mode 100644 index 000000000..1616e5720 --- /dev/null +++ b/tripleo/lib/puppet/provider/package/norpm.rb @@ -0,0 +1,36 @@ +# Copyright 2015 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +require 'puppet/provider/package' + +Puppet::Type.type(:package).provide :norpm, :source => :rpm, :parent => :rpm do + desc "RPM packaging provider that does not install anything." + + def latest + @resource.fail "'latest' is unsupported by this provider." + end + + def install + true + end + + def uninstall + true + end + + def update + true + end + +end diff --git a/tripleo/manifests/database/mysql.pp b/tripleo/manifests/database/mysql.pp new file mode 100644 index 000000000..dae80b3e5 --- /dev/null +++ b/tripleo/manifests/database/mysql.pp @@ -0,0 +1,367 @@ +# +# Copyright (C) 2015 eNovance SAS +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless optional by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# == Class: tripleo::database::mysql +# +# Configure a MySQL for TripleO with or without HA. +# +# === Parameters +# +# [*bind_address*] +# (optional) IP to bind MySQL daemon. +# Defaults to undef +# +# [*mysql_root_password*] +# (optional) MySQL root password. +# Defaults to 'secrete' +# +# [*mysql_sys_maint_password*] +# (optional) The MySQL debian-sys-maint password. +# Debian only parameter. +# Defaults to 'sys-maint' +# +# [*galera_clustercheck_dbpassword*] +# (optional) The MySQL password for Galera cluster check +# Defaults to 'password' +# +# [*galera_clustercheck_dbuser*] +# (optional) The MySQL username for Galera cluster check (using monitoring database) +# Defaults to 'clustercheck' +# +# [*galera_clustercheck_ipaddress*] +# (optional) The name or ip address of host running monitoring database (clustercheck) +# Defaults to undef +# +# [*galera_gcache*] +# (optional) Size of the Galera gcache +# wsrep_provider_options, for master/slave mode +# Defaults to '1G' +# +# [*galera_master*] +# (optional) Hostname or IP of the Galera master node, databases and users +# resources are created on this node and propagated on the cluster. +# Defining to false means we disable MySQL HA and run a single node setup. +# Defaults to false +# +# [*controller_host*] +# (optional) Array of internal ip of the controller nodes. +# They need access to all OpenStack databases. +# Defaults to false +# +# [*database_host*] +# (optional) Array of internal ip of the database nodes. +# Used to boostrap Galera cluster. +# Defaults to false +# +# [*ceilometer_database_connection*] +# (optional) URL to connect at Ceilometer database. +# Example: 'mysql://user:password@host/database' +# Defaults to undef +# +# [*cinder_database_connection*] +# (optional) URL to connect at Cinder database. +# Example: 'mysql://user:password@host/database' +# Defaults to undef +# +# [*glance_database_connection*] +# (optional) URL to connect at Glance database. +# Example: 'mysql://user:password@host/database' +# Defaults to undef +# +# [*heat_database_connection*] +# (optional) URL to connect at Heat database. +# Example: 'mysql://user:password@host/database' +# Defaults to undef +# +# [*keystone_database_connection*] +# (optional) URL to connect at Keystone database. +# Example: 'mysql://user:password@host/database' +# Defaults to undef +# +# [*neutron_database_connection*] +# (optional) URL to connect at Neutron database. +# Example: 'mysql://user:password@host/database' +# Defaults to undef +# +# [*nova_database_connection*] +# (optional) URL to connect at Nova database. +# Example: 'mysql://user:password@host/database' +# Defaults to undef +# +class tripleo::database::mysql ( + $bind_address = undef, + $mysql_root_password = 'secrete', + $mysql_sys_maint_password = 'sys-maint', + $galera_clustercheck_dbpassword = 'secrete', + $galera_clustercheck_dbuser = 'clustercheck', + $galera_clustercheck_ipaddress = undef, + $galera_gcache = '1G', + $galera_master = false, + $controller_host = false, + $database_host = false, + $ceilometer_database_connection = undef, + $cinder_database_connection = undef, + $glance_database_connection = undef, + $heat_database_connection = undef, + $keystone_database_connection = undef, + $neutron_database_connection = undef, + $nova_database_connection = undef, +) { + + include ::xinetd + + $gcomm_definition = inline_template('<%= @database_host.join(",") + "?pc.wait_prim=no" -%>') + + # If HA enabled + if $galera_master { + # Specific to Galera master node + if $::hostname == $galera_master { + mysql_database { 'monitoring': + ensure => 'present', + charset => 'utf8', + collate => 'utf8_unicode_ci', + require => File['/root/.my.cnf'] + } + mysql_user { "${galera_clustercheck_dbuser}@localhost": + ensure => 'present', + password_hash => mysql_password($galera_clustercheck_dbpassword), + require => File['/root/.my.cnf'] + } + mysql_grant { "${galera_clustercheck_dbuser}@localhost/monitoring": + ensure => 'present', + options => ['GRANT'], + privileges => ['ALL'], + table => 'monitoring.*', + user => "${galera_clustercheck_dbuser}@localhost", + } + Database_user<<| |>> + } else { + # NOTE(sileht): Only the master must create the password + # into the database, slave nodes must just use the password. + # The one in the database have been retrieved via galera. + file { "${::root_home}/.my.cnf": + content => "[client]\nuser=root\nhost=localhost\npassword=${mysql_root_password}\n", + owner => 'root', + mode => '0600', + } + } + + # Specific to Red Hat or Debian systems + case $::osfamily { + 'RedHat': { + $mysql_server_package_name = 'mariadb-galera-server' + $mysql_client_package_name = 'mariadb' + $wsrep_provider = '/usr/lib64/galera/libgalera_smm.so' + $mysql_server_config_file = '/etc/my.cnf' + $mysql_init_file = '/usr/lib/systemd/system/mysql-bootstrap.service' + + if $::hostname == $galera_master { + $mysql_service_name = 'mysql-bootstrap' + } else { + $mysql_service_name = 'mariadb' + } + + # In Red Hat, the package does not perform the mysql db installation. + # We need to do this manually. + # Note: in MariaDB repository, package perform this action in post-install, + # but MariaDB is not packaged for Red Hat / CentOS 7 in MariaDB repository. + exec { 'bootstrap-mysql': + command => '/usr/bin/mysql_install_db --rpm --user=mysql', + unless => 'test -d /var/lib/mysql/mysql', + before => Service['mysqld'], + require => [Package[$mysql_server_package_name], File[$mysql_server_config_file]] + } + + } + 'Debian': { + $mysql_server_package_name = 'mariadb-galera-server' + $mysql_client_package_name = 'mariadb-client' + $wsrep_provider = '/usr/lib/galera/libgalera_smm.so' + $mysql_server_config_file = '/etc/mysql/my.cnf' + $mysql_init_file = '/etc/init.d/mysql-bootstrap' + + if $::hostname == $galera_master { + $mysql_service_name = 'mysql-bootstrap' + } else { + $mysql_service_name = 'mysql' + } + + mysql_user { 'debian-sys-maint@localhost': + ensure => 'present', + password_hash => mysql_password($mysql_sys_maint_password), + require => File['/root/.my.cnf'] + } + + file{'/etc/mysql/debian.cnf': + ensure => file, + content => template('tripleo/database/debian.cnf.erb'), + owner => 'root', + group => 'root', + mode => '0600', + require => Exec['clean-mysql-binlog'], + } + } + default: { + err "${::osfamily} not supported yet" + } + } + + file { $mysql_init_file : + content => template("tripleo/database/etc_initd_mysql_${::osfamily}"), + owner => 'root', + mode => '0755', + group => 'root', + notify => Service['mysqld'], + before => Package[$mysql_server_package_name], + } + + class { 'mysql::server': + manage_config_file => false, + config_file => $mysql_server_config_file, + package_name => $mysql_server_package_name, + service_name => $mysql_service_name, + override_options => { + 'mysqld' => { + 'bind-address' => $bind_address, + } + }, + root_password => $mysql_root_password, + notify => Service['xinetd'], + } + + file { $mysql_server_config_file: + content => template('tripleo/database/mysql.conf.erb'), + mode => '0644', + owner => 'root', + group => 'root', + notify => [Service['mysqld'],Exec['clean-mysql-binlog']], + require => Package[$mysql_server_package_name], + } + + class { 'mysql::client': + package_name => $mysql_client_package_name, + } + + # Haproxy http monitoring + augeas { 'mysqlchk': + context => '/files/etc/services', + changes => [ + 'ins service-name after service-name[last()]', + 'set service-name[last()] "mysqlchk"', + 'set service-name[. = "mysqlchk"]/port 9200', + 'set service-name[. = "mysqlchk"]/protocol tcp', + ], + onlyif => 'match service-name[. = "mysqlchk"] size == 0', + notify => [ Service['xinetd'], Exec['reload_xinetd'] ] + } + file { + '/etc/xinetd.d/mysqlchk': + content => template('tripleo/database/mysqlchk.erb'), + owner => 'root', + group => 'root', + mode => '0755', + require => File['/usr/bin/clustercheck'], + notify => [ Service['xinetd'], Exec['reload_xinetd'] ]; + '/usr/bin/clustercheck': + ensure => present, + content => template('tripleo/database/clustercheck.erb'), + mode => '0755', + owner => 'root', + group => 'root'; + } + + exec{'clean-mysql-binlog': + # first sync take a long time + command => "/bin/bash -c '/usr/bin/mysqladmin --defaults-file=/root/.my.cnf shutdown ; /bin/rm ${::mysql::params::datadir}/ib_logfile*'", + path => '/usr/bin', + notify => Service['mysqld'], + refreshonly => true, + onlyif => "stat ${::mysql::params::datadir}/ib_logfile0 && test `du -sh ${::mysql::params::datadir}/ib_logfile0 | cut -f1` != '256M'", + } + } else { + # When HA is disabled + class { 'mysql::server': + override_options => { + 'mysqld' => { + 'bind-address' => $bind_address, + } + }, + root_password => $mysql_root_password, + } + } + + # On master node (when using Galera) or single node (when no HA) + if $galera_master == $::hostname or ! $galera_master { + # Create all the database schemas + $allowed_hosts = ['%',$controller_host] + $keystone_dsn = split($keystone_database_connection, '[@:/?]') + class { 'keystone::db::mysql': + user => $keystone_dsn[3], + password => $keystone_dsn[4], + host => $keystone_dsn[5], + dbname => $keystone_dsn[6], + allowed_hosts => $allowed_hosts, + } + $glance_dsn = split($glance_database_connection, '[@:/?]') + class { 'glance::db::mysql': + user => $glance_dsn[3], + password => $glance_dsn[4], + host => $glance_dsn[5], + dbname => $glance_dsn[6], + allowed_hosts => $allowed_hosts, + } + $nova_dsn = split($nova_database_connection, '[@:/?]') + class { 'nova::db::mysql': + user => $nova_dsn[3], + password => $nova_dsn[4], + host => $nova_dsn[5], + dbname => $nova_dsn[6], + allowed_hosts => $allowed_hosts, + } + $neutron_dsn = split($neutron_database_connection, '[@:/?]') + class { 'neutron::db::mysql': + user => $neutron_dsn[3], + password => $neutron_dsn[4], + host => $neutron_dsn[5], + dbname => $neutron_dsn[6], + allowed_hosts => $allowed_hosts, + } + $cinder_dsn = split($cinder_database_connection, '[@:/?]') + class { 'cinder::db::mysql': + user => $cinder_dsn[3], + password => $cinder_dsn[4], + host => $cinder_dsn[5], + dbname => $cinder_dsn[6], + allowed_hosts => $allowed_hosts, + } + $heat_dsn = split($heat_database_connection, '[@:/?]') + class { 'heat::db::mysql': + user => $heat_dsn[3], + password => $heat_dsn[4], + host => $heat_dsn[5], + dbname => $heat_dsn[6], + allowed_hosts => $allowed_hosts, + } + $ceilometer_dsn = split($ceilometer_database_connection, '[@:/?]') + class { 'ceilometer::db::mysql': + user => $ceilometer_dsn[3], + password => $ceilometer_dsn[4], + host => $ceilometer_dsn[5], + dbname => $ceilometer_dsn[6], + allowed_hosts => $allowed_hosts, + } + } + +} diff --git a/tripleo/manifests/init.pp b/tripleo/manifests/init.pp new file mode 100644 index 000000000..9f6d77523 --- /dev/null +++ b/tripleo/manifests/init.pp @@ -0,0 +1,23 @@ +# +# Copyright (C) 2015 eNovance SAS +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# == Class: tripleo +# +# Installs the system requirements +# + +class tripleo{ + +} diff --git a/tripleo/manifests/loadbalancer.pp b/tripleo/manifests/loadbalancer.pp new file mode 100644 index 000000000..8a227997e --- /dev/null +++ b/tripleo/manifests/loadbalancer.pp @@ -0,0 +1,519 @@ +# Copyright 2014 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# == Class: tripleo::loadbalancer +# +# Configure an HAProxy/keepalived loadbalancer for TripleO. +# +# === Parameters: +# +# [*controller_host*] +# (optional) Host or group of hosts to load-balance the services +# Can be a string or an array. +# Defaults to undef +# +# [*controller_virtual_ip*] +# (optional) Control IP or group of IPs to bind the pools +# Can be a string or an array. +# Defaults to undef +# +# [*control_virtual_interface*] +# (optional) Interface to bind the control VIP +# Can be a string or an array. +# Defaults to undef +# +# [*public_virtual_interface*] +# (optional) Interface to bind the public VIP +# Can be a string or an array. +# Defaults to undef +# +# [*public_virtual_ip*] +# (optional) Public IP or group of IPs to bind the pools +# Can be a string or an array. +# Defaults to undef +# +# [*keystone_admin*] +# (optional) Enable or not Keystone Admin API binding +# Defaults to false +# +# [*keystone_public*] +# (optional) Enable or not Keystone Public API binding +# Defaults to false +# +# [*neutron*] +# (optional) Enable or not Neutron API binding +# Defaults to false +# +# [*cinder*] +# (optional) Enable or not Cinder API binding +# Defaults to false +# +# [*glance_api*] +# (optional) Enable or not Glance API binding +# Defaults to false +# +# [*glance_registry*] +# (optional) Enable or not Glance registry binding +# Defaults to false +# +# [*nova_ec2*] +# (optional) Enable or not Nova EC2 API binding +# Defaults to false +# +# [*nova_osapi*] +# (optional) Enable or not Nova API binding +# Defaults to false +# +# [*nova_metadata*] +# (optional) Enable or not Nova metadata binding +# Defaults to false +# +# [*nova_novncproxy*] +# (optional) Enable or not Nova novncproxy binding +# Defaults to false +# +# [*ceilometer*] +# (optional) Enable or not Ceilometer API binding +# Defaults to false +# +# [*swift_proxy_server*] +# (optional) Enable or not Swift API binding +# Defaults to false +# +# [*heat_api*] +# (optional) Enable or not Heat API binding +# Defaults to false +# +# [*heat_cloudwatch*] +# (optional) Enable or not Heat Cloudwatch API binding +# Defaults to false +# +# [*heat_cfn*] +# (optional) Enable or not Heat CFN API binding +# Defaults to false +# +# [*horizon*] +# (optional) Enable or not Horizon dashboard binding +# Defaults to false +# +# [*mysql*] +# (optional) Enable or not MySQL Galera binding +# Defaults to false +# +# [*rabbitmq*] +# (optional) Enable or not RabbitMQ binding +# Defaults to false +# +class tripleo::loadbalancer ( + $controller_host = undef, + $controller_virtual_ip = undef, + $control_virtual_interface = undef, + $public_virtual_interface = undef, + $public_virtual_ip = undef, + $keystone_admin = false, + $keystone_public = false, + $neutron = false, + $cinder = false, + $glance_api = false, + $glance_registry = false, + $nova_ec2 = false, + $nova_osapi = false, + $nova_metadata = false, + $nova_novncproxy = false, + $ceilometer = false, + $swift_proxy_server = false, + $heat_api = false, + $heat_cloudwatch = false, + $heat_cfn = false, + $horizon = false, + $mysql = false, + $rabbitmq = false, +) { + + case $::osfamily { + 'RedHat': { + $keepalived_name_is_process = false + $keepalived_vrrp_script = 'systemctl status haproxy.service' + } # RedHat + 'Debian': { + $keepalived_name_is_process = true + $keepalived_vrrp_script = undef + } + default: { + warning('Please configure keepalived defaults in tripleo::loadbalancer.') + $keepalived_name_is_process = undef + $keepalived_vrrp_script = undef + } + } + + class { 'keepalived': } + keepalived::vrrp_script { 'haproxy': + name_is_process => $keepalived_name_is_process, + script => $keepalived_vrrp_script, + } + + # KEEPALIVE INSTANCE CONTROL + keepalived::instance { '51': + interface => $control_virtual_interface, + virtual_ips => [join([$controller_virtual_ip, ' dev ', $control_virtual_interface])], + state => 'MASTER', + track_script => ['haproxy'], + priority => 101, + } + + # KEEPALIVE INSTANCE PUBLIC + keepalived::instance { '52': + interface => $public_virtual_interface, + virtual_ips => [join([$public_virtual_ip, ' dev ', $public_virtual_interface])], + state => 'MASTER', + track_script => ['haproxy'], + priority => 101, + } + + sysctl::value { 'net.ipv4.ip_nonlocal_bind': value => '1' } + + class { 'haproxy': + global_options => { + 'log' => '/dev/log local0', + 'pidfile' => '/var/run/haproxy.pid', + 'user' => 'haproxy', + 'group' => 'haproxy', + 'daemon' => '', + 'maxconn' => '4000', + }, + defaults_options => { + 'mode' => 'tcp', + 'log' => 'global', + 'retries' => '3', + 'maxconn' => '150', + 'option' => [ 'tcpka', 'tcplog' ], + 'timeout' => [ 'http-request 10s', 'queue 1m', 'connect 10s', 'client 1m', 'server 1m', 'check 10s' ], + }, + } + + haproxy::listen { 'haproxy.stats': + ipaddress => '*', + ports => '1993', + mode => 'http', + options => { + 'stats' => 'enable', + }, + collect_exported => false, + } + + if $keystone_admin { + haproxy::listen { 'keystone_admin': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 35357, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'keystone_admin': + listening_service => 'keystone_admin', + ports => '35357', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $keystone_public { + haproxy::listen { 'keystone_public': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 5000, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'keystone_public': + listening_service => 'keystone_public', + ports => '5000', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $neutron { + haproxy::listen { 'neutron': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 9696, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'neutron': + listening_service => 'neutron', + ports => '9696', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $cinder { + haproxy::listen { 'cinder': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 8776, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'cinder': + listening_service => 'cinder', + ports => '8776', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $glance_api { + haproxy::listen { 'glance_api': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 9292, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'glance_api': + listening_service => 'glance_api', + ports => '9292', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $glance_registry { + haproxy::listen { 'glance_registry': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 9191, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'glance_registry': + listening_service => 'glance_registry', + ports => '9191', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $nova_ec2 { + haproxy::listen { 'nova_ec2': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 8773, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'nova_ec2': + listening_service => 'nova_ec2', + ports => '8773', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $nova_osapi { + haproxy::listen { 'nova_osapi': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 8774, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'nova_osapi': + listening_service => 'nova_osapi', + ports => '8774', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $nova_metadata { + haproxy::listen { 'nova_metadata': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 8775, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'nova_metadata': + listening_service => 'nova_metadata', + ports => '8775', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $nova_novncproxy { + haproxy::listen { 'nova_novncproxy': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 6080, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'nova_novncproxy': + listening_service => 'nova_novncproxy', + ports => '6080', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $ceilometer { + haproxy::listen { 'ceilometer': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 8777, + collect_exported => false, + } + haproxy::balancermember { 'ceilometer': + listening_service => 'ceilometer', + ports => '8777', + ipaddresses => $controller_host, + options => [], + } + } + + if $swift_proxy_server { + haproxy::listen { 'swift_proxy_server': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 8080, + options => { + 'option' => [ 'httpchk GET /info' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'swift_proxy_server': + listening_service => 'swift_proxy_server', + ports => '8080', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $heat_api { + haproxy::listen { 'heat_api': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 8004, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'heat_api': + listening_service => 'heat_api', + ports => '8004', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $heat_cloudwatch { + haproxy::listen { 'heat_cloudwatch': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 8003, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'heat_cloudwatch': + listening_service => 'heat_cloudwatch', + ports => '8003', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $heat_cfn { + haproxy::listen { 'heat_cfn': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 8000, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'heat_cfn': + listening_service => 'heat_cfn', + ports => '8000', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $horizon { + haproxy::listen { 'horizon': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 80, + options => { + 'option' => [ 'httpchk GET /' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'horizon': + listening_service => 'horizon', + ports => '80', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $mysql { + haproxy::listen { 'mysql': + ipaddress => [$controller_virtual_ip], + ports => 3306, + options => { + 'timeout' => [ 'client 0', 'server 0' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'mysql': + listening_service => 'mysql', + ports => '3306', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + + if $rabbitmq { + haproxy::listen { 'rabbitmq': + ipaddress => [$controller_virtual_ip, $public_virtual_ip], + ports => 5672, + options => { + 'timeout' => [ 'client 0', 'server 0' ] + }, + collect_exported => false, + } + haproxy::balancermember { 'rabbitmq': + listening_service => 'rabbitmq', + ports => '5672', + ipaddresses => $controller_host, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + +} diff --git a/tripleo/spec/shared_examples.rb b/tripleo/spec/shared_examples.rb new file mode 100644 index 000000000..fec0eacc9 --- /dev/null +++ b/tripleo/spec/shared_examples.rb @@ -0,0 +1,5 @@ +shared_examples_for "a Puppet::Error" do |description| + it "with message matching #{description.inspect}" do + expect { is_expected.to have_class_count(1) }.to raise_error(Puppet::Error, description) + end +end diff --git a/tripleo/spec/spec_helper.rb b/tripleo/spec/spec_helper.rb new file mode 100644 index 000000000..0171d5dd0 --- /dev/null +++ b/tripleo/spec/spec_helper.rb @@ -0,0 +1,16 @@ +require 'puppetlabs_spec_helper/module_spec_helper' +require 'shared_examples' + +RSpec.configure do |c| + c.alias_it_should_behave_like_to :it_configures, 'configures' + c.alias_it_should_behave_like_to :it_raises, 'raises' + + c.default_facts = { + :kernel => 'Linux', + :concat_basedir => '/var/lib/puppet/concat', + :memorysize => '1000 MB', + :processorcount => '1', + :puppetversion => '3.7.3', + :uniqueid => '123' + } +end diff --git a/tripleo/templates/database/clustercheck.erb b/tripleo/templates/database/clustercheck.erb new file mode 100644 index 000000000..cc3231b3b --- /dev/null +++ b/tripleo/templates/database/clustercheck.erb @@ -0,0 +1,56 @@ +#!/bin/bash +# Managed by puppet +# +# Script to make a proxy (ie HAProxy) capable of monitoring Galera cluster +# +# Author: Olaf van Zandwijk +# Mehdi Abaakouk +# +# Documentation and download: https://github.com/olafz/percona-clustercheck +# +# Based on the original script from Unai Rodriguez +# +MYSQL_USERNAME='<%= @galera_clustercheck_dbuser %>' +MYSQL_PASSWORD='<%= @galera_clustercheck_dbpassword %>' + +TIMEOUT=10 +ERR_FILE="/dev/null" +AVAILABLE_WHEN_DONOR=0 + +MYSQL_CMDLINE="mysql -nNE --connect-timeout=$TIMEOUT --user=${MYSQL_USERNAME} --password=${MYSQL_PASSWORD} " + +mysql_get_status(){ + ( $MYSQL_CMDLINE -e "SHOW STATUS LIKE '$1';" | tail -1 ) 2>>${ERR_FILE} +} +mysql_get_var(){ + ( $MYSQL_CMDLINE -e "SHOW GLOBAL VARIABLES LIKE '$1';" | tail -1 ) 2>>${ERR_FILE} +} + +http_response(){ + status=$1 + shift + msg="$@" + if [ "$status" == 200 ]; then + /bin/echo -en "HTTP/1.1 200 OK\r\n" + else + /bin/echo -en "HTTP/1.1 503 Service Unavailable\r\n" + fi + /bin/echo -en "Content-Type: text/plain\r\n" + /bin/echo -en "\r\n" + /bin/echo -en "$msg\r\n" + /bin/echo -en "\r\n" +} + + +WSREP_LOCAL_STATE=$(mysql_get_status wsrep_local_state) +WSREP_READY=$(mysql_get_status wsrep_ready) +WSREP_CONNECTED=$(mysql_get_status wsrep_connected) +READY_ONLY=$(mysql_get_var read_only) + +case ${AVAILABLE_WHEN_DONOR}-${WSREP_LOCAL_STATE}-${WSREP_READY}-${WSREP_CONNECTED}-${READY_ONLY} in + 1-2-ON-ON-OFF|0-4-ON-ON-OFF) http_response 200 "Mariadb Cluster Node is synced, ready and connected." ;; + *-*-OFF-*-*) http_response 503 "Mariadb Cluster Node is not ready." ;; + *-*-*-OFF-*) http_response 503 "Mariadb Cluster Node is not connected" ;; + *-*-*-*-ON) http_response 503 "Mariadb Cluster Node is readonly" ;; + *) http_response 503 "Mariadb Cluster Node is not synced" ;; +esac diff --git a/tripleo/templates/database/debian.cnf.erb b/tripleo/templates/database/debian.cnf.erb new file mode 100644 index 000000000..ddcc44240 --- /dev/null +++ b/tripleo/templates/database/debian.cnf.erb @@ -0,0 +1,13 @@ +# Managed by Puppet +# +[client] +host = localhost +user = debian-sys-maint +password = <%= @mysql_sys_maint_password %> +socket = /var/run/mysqld/mysqld.sock +[mysql_upgrade] +host = localhost +user = debian-sys-maint +password = <%= @mysql_sys_maint_password %> +socket = /var/run/mysqld/mysqld.sock +basedir = /usr diff --git a/tripleo/templates/database/etc_initd_mysql_Debian b/tripleo/templates/database/etc_initd_mysql_Debian new file mode 100755 index 000000000..292251f3f --- /dev/null +++ b/tripleo/templates/database/etc_initd_mysql_Debian @@ -0,0 +1,202 @@ +#!/bin/bash +# +### BEGIN INIT INFO +# Provides: mysql-bootstrap +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Should-Start: $network $named $time +# Should-Stop: $network $named $time +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start and stop the mysql database server daemon +# Description: Controls the main MariaDB database server daemon "mysqld" +# and its wrapper script "mysqld_safe". +### END INIT INFO +# +# Managed by Puppet +# +MYSQLD_STARTUP_TIMEOUT=${MYSQLD_STARTUP_TIMEOUT:-60} +[ -e /etc/mysql/my.cnf ] && \ + MYSQLD_DATA_DIR=$(awk -F= '/^datadir/{print $2}' /etc/mysql/my.cnf | sed -e 's/^ *//') +MYSQLD_DATA_DIR=${MYSQLD_DATA_DIR:-<%= scope.lookupvar('::mysql::datadir') %>} +set -e +set -u +${DEBIAN_SCRIPT_DEBUG:+ set -v -x} + +test -x /usr/sbin/mysqld || exit 0 + +. /lib/lsb/init-functions + +SELF=$(cd $(dirname $0); pwd -P)/$(basename $0) +CONF=/etc/mysql/my.cnf +MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf" + +# priority can be overriden and "-s" adds output to stderr +ERR_LOGGER="logger -p daemon.err -t /etc/init.d/mysql -i" + +# Safeguard (relative paths, core dumps..) +cd / +umask 077 + +# mysqladmin likes to read /root/.my.cnf. This is usually not what I want +# as many admins e.g. only store a password without a username there and +# so break my scripts. +export HOME=/etc/mysql/ + +## Fetch a particular option from mysql's invocation. +# +# Usage: void mysqld_get_param option +mysqld_get_param() { + /usr/sbin/mysqld --print-defaults \ + | tr " " "\n" \ + | grep -- "--$1" \ + | tail -n 1 \ + | cut -d= -f2 +} + +## Do some sanity checks before even trying to start mysqld. +sanity_checks() { + # check for config file + if [ ! -r /etc/mysql/my.cnf ]; then + log_warning_msg "$0: WARNING: /etc/mysql/my.cnf cannot be read. See README.Debian.gz" + echo "WARNING: /etc/mysql/my.cnf cannot be read. See README.Debian.gz" | $ERR_LOGGER + fi + + # check for diskspace shortage + datadir=`mysqld_get_param datadir` + if LC_ALL=C BLOCKSIZE= df --portability $datadir/. | tail -n 1 | awk '{ exit ($4>4096) }'; then + log_failure_msg "$0: ERROR: The partition with $datadir is too full!" + echo "ERROR: The partition with $datadir is too full!" | $ERR_LOGGER + exit 1 + fi +} + +## Checks if there is a server running and if so if it is accessible. +# +# check_alive insists on a pingable server +# check_dead also fails if there is a lost mysqld in the process list +# +# Usage: boolean mysqld_status [check_alive|check_dead] [warn|nowarn] +mysqld_status () { + ping_output=`$MYADMIN ping 2>&1`; ping_alive=$(( ! $? )) + + ps_alive=0 + pidfile=`mysqld_get_param pid-file` + if [ -f "$pidfile" ] && ps `cat $pidfile` >/dev/null 2>&1; then ps_alive=1; fi + + if [ "$1" = "check_alive" -a $ping_alive = 1 ] || + [ "$1" = "check_dead" -a $ping_alive = 0 -a $ps_alive = 0 ]; then + return 0 # EXIT_SUCCESS + else + if [ "$2" = "warn" ]; then + echo -e "$ps_alive processes alive and '$MYADMIN ping' resulted in\n$ping_output\n" | $ERR_LOGGER -p daemon.debug + fi + return 1 # EXIT_FAILURE + fi +} + +# +# main() +# + +case "${1:-''}" in + 'start') + sanity_checks; + # Start daemon + log_daemon_msg "Starting MariaDB database server" "mysqld" + if mysqld_status check_alive nowarn; then + log_progress_msg "already running" + log_end_msg 0 + else + # Could be removed during boot + test -e /var/run/mysqld || install -m 755 -o mysql -g root -d /var/run/mysqld + + # Start MariaDB! in a Galera setup we want to use + # new-cluster only when the galera cluster hasn't been + # bootstraped + if [ -e ${MYSQLD_DATA_DIR}/grastate.dat ]; then + # normal boot + /usr/bin/mysqld_safe "${@:2}" > /dev/null 2>&1 & + else + # bootstrap boot + log_progress_msg " (Galera bootstrap) " + /usr/bin/mysqld_safe "${@:2}" --wsrep-new-cluster > /dev/null 2>&1 & + fi + + # 6s was reported in #352070 to be too few when using ndbcluster + for i in $(seq 1 "${MYSQLD_STARTUP_TIMEOUT:-30}"); do + sleep 1 + if mysqld_status check_alive nowarn ; then break; fi + log_progress_msg "." + done + if mysqld_status check_alive warn; then + log_end_msg 0 + # Now start mysqlcheck or whatever the admin wants. + output=$(/etc/mysql/debian-start) + [ -n "$output" ] && log_action_msg "$output" + else + log_end_msg 1 + log_failure_msg "Please take a look at the syslog" + fi + fi + ;; + + 'stop') + # * As a passwordless mysqladmin (e.g. via ~/.my.cnf) must be possible + # at least for cron, we can rely on it here, too. (although we have + # to specify it explicit as e.g. sudo environments points to the normal + # users home and not /root) + log_daemon_msg "Stopping MariaDB database server" "mysqld" + if ! mysqld_status check_dead nowarn; then + set +e + shutdown_out=`$MYADMIN shutdown 2>&1`; r=$? + set -e + if [ "$r" -ne 0 ]; then + log_end_msg 1 + [ "$VERBOSE" != "no" ] && log_failure_msg "Error: $shutdown_out" + log_daemon_msg "Killing MariaDB database server by signal" "mysqld" + killall -15 mysqld + server_down= + for i in `seq 1 600`; do + sleep 1 + if mysqld_status check_dead nowarn; then server_down=1; break; fi + done + if test -z "$server_down"; then killall -9 mysqld; fi + fi + fi + + if ! mysqld_status check_dead warn; then + log_end_msg 1 + log_failure_msg "Please stop MariaDB manually and read /usr/share/doc/mariadb-server-5.5/README.Debian.gz!" + exit -1 + else + log_end_msg 0 + fi + ;; + + 'restart') + set +e; $SELF stop; set -e + $SELF start + ;; + + 'reload'|'force-reload') + log_daemon_msg "Reloading MariaDB database server" "mysqld" + $MYADMIN reload + log_end_msg 0 + ;; + + 'status') + if mysqld_status check_alive nowarn; then + log_action_msg "$($MYADMIN version)" + else + log_action_msg "MariaDB is stopped." + exit 3 + fi + ;; + + *) + echo "Usage: $SELF start|stop|restart|reload|force-reload|status" + exit 1 + ;; +esac + diff --git a/tripleo/templates/database/etc_initd_mysql_RedHat b/tripleo/templates/database/etc_initd_mysql_RedHat new file mode 100755 index 000000000..5efdde284 --- /dev/null +++ b/tripleo/templates/database/etc_initd_mysql_RedHat @@ -0,0 +1,48 @@ +# It's not recommended to modify this file in-place, because it will be +# overwritten during package upgrades. If you want to customize, the +# best way is to create a file "/etc/systemd/system/mariadb.service", +# containing +# .include /lib/systemd/system/mariadb.service +# ...make your changes here... +# or create a file "/etc/systemd/system/mariadb.service.d/foo.conf", +# which doesn't need to include ".include" call and which will be parsed +# after the file mariadb.service itself is parsed. +# +# For more info about custom unit files, see systemd.unit(5) or +# http://fedoraproject.org/wiki/Systemd#How_do_I_customize_a_unit_file.2F_add_a_custom_unit_file.3F +# For example, if you want to increase mysql's open-files-limit to 10000, +# you need to increase systemd's LimitNOFILE setting, so create a file named +# "/etc/systemd/system/mariadb.service.d/limits.conf" containing: +# [Service] +# LimitNOFILE=10000 +# Note: /usr/lib/... is recommended in the .include line though /lib/... +# still works. +# Don't forget to reload systemd daemon after you change unit configuration: +# root> systemctl --system daemon-reload +# +# Managed by Puppet +# + +[Unit] +Description=MariaDB database server +After=syslog.target +After=network.target + +[Service] +Type=simple +User=mysql +Group=mysql +ExecStartPre=/usr/libexec/mariadb-prepare-db-dir %n +# Note: we set --basedir to prevent probes that might trigger SELinux alarms, +# per bug #547485 +ExecStart=/usr/bin/mysqld_safe --wsrep-new-cluster --basedir=/usr +ExecStartPost=/usr/libexec/mariadb-wait-ready $MAINPID + +# Give a reasonable amount of time for the server to start up/shut down +TimeoutSec=60 + +# Place temp files in a secure directory, not /tmp +PrivateTmp=true + +[Install] +WantedBy=multi-user.target diff --git a/tripleo/templates/database/mysql.conf.erb b/tripleo/templates/database/mysql.conf.erb new file mode 100644 index 000000000..a997a7b7e --- /dev/null +++ b/tripleo/templates/database/mysql.conf.erb @@ -0,0 +1,69 @@ +# MANAGED BY PUPPET +# +[mysqld] +bind-address = <%= @bind_address %> +default_storage_engine = innodb +collation_server = utf8_general_ci +init_connect = 'SET NAMES utf8' +character_set_server = utf8 +max_connections = 1000 +skip_name_resolve = 1 +connect_timeout = 5 +wait_timeout = 600 +max_allowed_packet = 64M +thread_cache_size = 128 +sort_buffer_size = 4M +bulk_insert_buffer_size = 16M +tmp_table_size = 512M +max_heap_table_size = 128M +query_cache_type = 0 +myisam_recover = BACKUP +key_buffer_size = 16M +open_files_limit = 65535 +table_open_cache = 1024 +table_definition_cache = 500 +myisam_sort_buffer_size = 512M +concurrent_insert = 2 +read_buffer_size = 2M +read_rnd_buffer_size = 1M +slow_query_log = 1 +slow_query_log_file = /var/log/mysql/slow.log +log_error = /var/log/mysql/error.log +long_query_time = 1 +log_slow_verbosity = query_plan +innodb_buffer_pool_size = 512M +innodb_flush_log_at_trx_commit = 1 +innodb_lock_wait_timeout = 50 +innodb_thread_concurrency = 48 +innodb_file_per_table = 1 +innodb_open_files = 65535 +innodb_io_capacity = 1000 +innodb_file_format = Barracuda +innodb_file_format_max = Barracuda +innodb_max_dirty_pages_pct = 50 +binlog_format = ROW +innodb_autoinc_lock_mode = 2 +innodb_locks_unsafe_for_binlog = 1 +wsrep_provider = "<%= @wsrep_provider %>" +wsrep_cluster_name = "galera_cluster" +wsrep_cluster_address = "gcomm://<%= @gcomm_definition %>" +wsrep_sst_auth = root:<%= @mysql_root_password %> +wsrep_drupal_282555_workaround = 0 +wsrep_sst_method = rsync +wsrep_node_address = "<%= @bind_address %>" +wsrep_node_incoming_address = "<%= @bind_address %>" +# This is the minimal value (proc*2) +wsrep_slave_threads = "<%= @processorcount.to_i * 2 %>" + +# Thoses TWEAK assume that the galera cluster is used in master/slave mode +wsrep_provider_options = "gcache.size=<%= @galera_gcache %>;gcs.fc_master_slave=1;gcs.fc_limit=256;gcs.fc_factor=0.9" + +# this value here are used by /usr/bin/innobackupex +# and wsrep_sst_xtrabackup take only one configuration file and use the last one +# (/etc/mysql/my.cnf is not used) +datadir = /var/lib/mysql +tmpdir = /tmp/ +innodb_flush_method = O_DIRECT +innodb_log_buffer_size = 32M +innodb_log_file_size = 256M +innodb_log_files_in_group = 2 diff --git a/tripleo/templates/database/mysqlchk.erb b/tripleo/templates/database/mysqlchk.erb new file mode 100644 index 000000000..f9fea1d01 --- /dev/null +++ b/tripleo/templates/database/mysqlchk.erb @@ -0,0 +1,23 @@ +# Managed by puppet +# +# default: on +# description: mysqlchk +service mysqlchk +{ +# this is a config for xinetd, place it in /etc/xinetd.d/ + disable = no + flags = REUSE + socket_type = stream + port = 9200 + wait = no + user = nobody + server = /usr/bin/clustercheck + log_on_failure += USERID + log_on_success = + #FIXME(sbadia) Security: Restrict this parameter to HAProxy pool. + only_from = 0.0.0.0/0 + bind = <%= @galera_clustercheck_ipaddress %> + # recommended to put the IPs that need + # to connect exclusively (security purposes) + per_source = UNLIMITED +}