diff --git a/Puppetfile b/Puppetfile index c7d33ed6c..044a0a081 100644 --- a/Puppetfile +++ b/Puppetfile @@ -1,9 +1,9 @@ mod 'aodh', - :commit => '54f857d36bfa8567e9f6f5f4f8d778377229c702', + :commit => '86d3e7e214a536a88ee4f4c4e26b9e1e36d09ea8', :git => 'https://github.com/openstack/puppet-aodh.git' mod 'apache', - :commit => '13b79e2446ca1ee08301c03bc2ee2579114da84a', + :commit => '13797dadb81b99bd16375ef2d15edd9976edf326', :git => 'https://github.com/puppetlabs/puppetlabs-apache.git' mod 'aviator', @@ -15,11 +15,11 @@ mod 'cassandra', :git => 'https://github.com/locp/cassandra.git' mod 'ceilometer', - :commit => '9869fbee668d35ff1f1838bec6545b1e13c0bfe3', + :commit => 'be054317ef0f2b760bf5ecf43c58faa22a26cc19', :git => 'https://github.com/openstack/puppet-ceilometer.git' mod 'ceph', - :commit => 'c60c6209ac799555b94bf62ce1c2cf00084e42d2', + :commit => 'e4b05caf4825af51f679f0618229dce4c3388a5f', :git => 'https://github.com/stackforge/puppet-ceph.git' mod 'certmonger', @@ -27,7 +27,7 @@ mod 'certmonger', :git => 'https://github.com/rcritten/puppet-certmonger.git' mod 'cinder', - :commit => '181de2ef5119afab5157c290f10e9750507d95d2', + :commit => 'fbcd3d7e0c574865753b51bfab144afe0ded488c', :git => 'https://github.com/openstack/puppet-cinder.git' mod 'common', @@ -39,7 +39,7 @@ mod 'concat', :git => 'https://github.com/puppetlabs/puppetlabs-concat.git' mod 'contrail', - :commit => '63803ba710ade71ce93c6e5c94054f193d66d56e', + :commit => '83471677d5b7b7a1e26c7ccb20f5ab355f41efae', :git => 'https://github.com/redhat-cip/puppet-contrail.git' mod 'corosync', @@ -51,7 +51,7 @@ mod 'datacat', :git => 'http://github.com/richardc/puppet-datacat' mod 'elasticsearch', - :commit => 'b930ab8', + :commit => '171a4a4dc89ad4543f486ed90040b5fbea8d81f0', :git => 'https://github.com/elastic/puppet-elasticsearch.git' mod 'firewall', @@ -67,7 +67,7 @@ mod 'galera', :git => 'https://github.com/redhat-openstack/puppet-galera.git' mod 'glance', - :commit => 'da26542fbbdcf93e454f3c91313c7c2f778f63d0', + :commit => 'c3b685ba0dfd4a0ac78642844d9c16e5f472a78f', :git => 'https://github.com/openstack/puppet-glance.git' mod 'gluster', @@ -75,7 +75,7 @@ mod 'gluster', :git => 'https://github.com/purpleidea/puppet-gluster.git' mod 'gnocchi', - :commit => '3594acae464c01f2475daf853b10ce777e5c3ed0', + :commit => '3b46e6845a7caf355b870ad0cb4b21eb83ef0cbc', :git => 'https://github.com/openstack/puppet-gnocchi.git' mod 'haproxy', @@ -83,11 +83,11 @@ mod 'haproxy', :git => 'https://github.com/puppetlabs/puppetlabs-haproxy.git' mod 'heat', - :commit => '35bf220a375e42ecb175481aa4d97839b4a4e382', + :commit => '057649984af58c5dec0d0466547d993792f42e18', :git => 'https://github.com/openstack/puppet-heat.git' mod 'horizon', - :commit => '93b54f518f4eb283191478d5b2d0b1f9fd9e6389', + :commit => 'c6c0d1aa9b45cb4763d5e43810618e720908b5c7', :git => 'https://github.com/openstack/puppet-horizon.git' mod 'inifile', @@ -99,7 +99,7 @@ mod 'ipa', :git => 'https://github.com/xbezdick/puppet-ipa-1.git' mod 'ironic', - :commit => '5f9c8fc38485e9d974a889de7ea7cbbb1c51fa3c', + :commit => '2c8397256728024860fa9f9a2018b4b5f0d21f15', :git => 'https://github.com/openstack/puppet-ironic.git' mod 'java', @@ -115,15 +115,15 @@ mod 'keepalived', :git => 'https://github.com/Unyonsys/puppet-module-keepalived.git' mod 'keystone', - :commit => 'bee02643562f38af2c5b2ebb4ba65c0e86388952', + :commit => '62f3f6e0fcbfef4563d632867d4a0d8592c6d1a2', :git => 'https://github.com/openstack/puppet-keystone.git' mod 'kibana3', - :commit => '6ca9631', + :commit => '6ca9631fbe82766134f98e2e8780bb91e7cd3f0e', :git => 'https://github.com/thejandroman/puppet-kibana3.git' mod 'manila', - :commit => 'b77f1736bc0221acb433b5ffb6aa8f291d83ff13', + :commit => 'b3667a28e570e3889bb5a8a3859808dd3ca88f30', :git => 'https://github.com/openstack/puppet-manila.git' mod 'memcached', @@ -131,7 +131,7 @@ mod 'memcached', :git => 'https://github.com/saz/puppet-memcached.git' mod 'midonet', - :commit => 'e24515c135a03096fc69651dfadac5b545c74538', + :commit => 'a4bb534be34a806811df51874a5bba132ca24724', :git => 'https://github.com/midonet/puppet-midonet.git' mod 'module-collectd', @@ -159,11 +159,11 @@ mod 'nagios', :git => 'https://github.com/gildub/puppet-nagios-openstack.git' mod 'neutron', - :commit => '11f05fff53f7ec9964494d63def3db37fdf9da39', + :commit => '23875c218f802e3cc8f2f8f6fa09d89e97194878', :git => 'https://github.com/openstack/puppet-neutron.git' mod 'nova', - :commit => '5d42ed567197928e09d731c545877a6b957e04b1', + :commit => 'd93b9709af1786ad3b2401c2de2fef9f96bd6827', :git => 'https://github.com/openstack/puppet-nova.git' mod 'nssdb', @@ -175,15 +175,15 @@ mod 'ntp', :git => 'https://github.com/puppetlabs/puppetlabs-ntp' mod 'opendaylight', - :commit => '3aa0ccf', + :commit => '33353e8d869f4956d706fedd6f4f9c8b4b59a4e0', :git => 'https://github.com/dfarrell07/puppet-opendaylight.git' mod 'openstack_extras', - :commit => 'cdeede97b90b5bbb0a54125ec68c36cf7e249370', + :commit => '9bdabc182d54071973cc1dbdc71cb022e7ed7cd2', :git => 'https://github.com/openstack/puppet-openstack_extras.git' mod 'openstacklib', - :commit => '47a0c6392fad4c1fb11811b17968576033a24baa', + :commit => 'f84baa1f695a94c6357468fcac1309066d11e06d', :git => 'https://github.com/openstack/puppet-openstacklib.git' mod 'pacemaker', @@ -199,11 +199,11 @@ mod 'qpid', :git => 'https://github.com/dprince/puppet-qpid' mod 'rabbitmq', - :commit => '151391f03b1d9dcaf895123aa98b0a0d67e30540', + :commit => '3d74c2d77bd482f59ea919e54d24589487221702', :git => 'https://github.com/puppetlabs/puppetlabs-rabbitmq.git' mod 'redis', - :commit => '47b6fe8a64dce5447105ab59aa1fa2984d6ef1d0', + :commit => '62c0c71e59182ebc252a9405db68b7a1538a745e', :git => 'https://github.com/arioch/puppet-redis.git' mod 'remote', @@ -215,11 +215,11 @@ mod 'rsync', :git => 'https://github.com/puppetlabs/puppetlabs-rsync.git' mod 'sahara', - :commit => '6862557a4db48b01176e30be6877fda5503f085b', + :commit => '0c465a03331b45ad2f8606e202d1b1d3a54ed9f3', :git => 'https://github.com/openstack/puppet-sahara.git' mod 'sensu', - :commit => 'bc5e501e9b6c7ac38181c8749fb6a04b24efdb94', + :commit => '553994fb8724f84ec820a0a36b347a148d4495e3', :git => 'https://github.com/sensu/sensu-puppet.git' mod 'snmp', @@ -239,7 +239,7 @@ mod 'stdlib', :git => 'https://github.com/puppetlabs/puppetlabs-stdlib.git' mod 'swift', - :commit => '6145bf61206381643cdcaaabc285f93d75c0c2e4', + :commit => '772bba90f179e71e24b2e26511e57be3897cefc8', :git => 'https://github.com/openstack/puppet-swift.git' mod 'sysctl', @@ -247,7 +247,7 @@ mod 'sysctl', :git => 'https://github.com/puppetlabs/puppetlabs-sysctl.git' mod 'tempest', - :commit => 'e6edd0871739c0dfe6cdc2b22fa5de3ba269f401', + :commit => '9d2f18df7df8cfb361cffeca9ba0c31151915567', :git => 'https://github.com/openstack/puppet-tempest.git' mod 'timezone', @@ -259,7 +259,7 @@ mod 'tomcat', :git => 'https://github.com/puppetlabs/puppetlabs-tomcat.git' mod 'tripleo', - :commit => 'ae595ce7731f2b286d4ffd280ed735b61d86b63c', + :commit => 'd7e457b8af855b2ecc08d94776532660ef56b736', :git => 'https://github.com/openstack/puppet-tripleo.git' mod 'trove', @@ -271,7 +271,7 @@ mod 'tuskar', :git => 'https://github.com/openstack/puppet-tuskar.git' mod 'uchiwa', - :commit => 'e3dc4fc3d5291d08a23f16843c673932adf450d5', + :commit => 'b3f9ed80d33f6f4f6ca69b60fc3c5aa74fb3ac85', :git => 'https://github.com/Yelp/puppet-uchiwa.git' mod 'vcsrepo', @@ -283,7 +283,7 @@ mod 'vlan', :git => 'https://github.com/derekhiggins/puppet-vlan.git' mod 'vswitch', - :commit => 'ae87d9b0f8f7dbba9151312fb5205e89caf61109', + :commit => 'd3924b0d4d7fe53ec29e11250d9ef597dba8f8c7', :git => 'https://github.com/openstack/puppet-vswitch.git' mod 'xinetd', diff --git a/aodh/CHANGELOG.md b/aodh/CHANGELOG.md new file mode 100644 index 000000000..1185c957a --- /dev/null +++ b/aodh/CHANGELOG.md @@ -0,0 +1,4 @@ +##2015-11-24 - 7.0.0 +###Summary + +- Initial release of the puppet-aodh module diff --git a/aodh/README.md b/aodh/README.md index 234a0a87d..969aa33d3 100644 --- a/aodh/README.md +++ b/aodh/README.md @@ -1,6 +1,8 @@ AODH ==== +7.0.0 - 2015.2.0 - Liberty + #### Table of Contents 1. [Overview - What is the AODH module?](#overview) diff --git a/aodh/Rakefile b/aodh/Rakefile index 3c3603e3c..ed79bead4 100644 --- a/aodh/Rakefile +++ b/aodh/Rakefile @@ -15,7 +15,7 @@ PuppetLint::RakeTask.new :lint do |config| config.ignore_paths = ["spec/**/*.pp", "vendor/**/*.pp"] config.fail_on_warnings = true config.log_format = '%{path}:%{linenumber}:%{KIND}: %{message}' - config.disable_checks = ["80chars", "class_inherits_from_params_class", "class_parameter_defaults", "only_variable_string"] + config.disable_checks = ["80chars", "class_inherits_from_params_class", "only_variable_string"] end desc "Run acceptance tests" diff --git a/aodh/examples/aodh.pp b/aodh/examples/aodh.pp index 26cbdf1c0..1dcb65eb8 100644 --- a/aodh/examples/aodh.pp +++ b/aodh/examples/aodh.pp @@ -9,3 +9,10 @@ class { '::aodh::wsgi::apache': ssl => false, } +class { '::aodh::auth': + auth_password => 'a_big_secret', +} +class { '::aodh::evaluator': } +class { '::aodh::notifier': } +class { '::aodh::listener': } +class { '::aodh::client': } diff --git a/aodh/manifests/api.pp b/aodh/manifests/api.pp index 3b9576569..7d7f5044b 100644 --- a/aodh/manifests/api.pp +++ b/aodh/manifests/api.pp @@ -36,7 +36,7 @@ # # [*port*] # (optional) The aodh api port. -# Defaults to 8777 +# Defaults to 8042 # # [*package_ensure*] # (optional) ensure state for package. @@ -61,7 +61,7 @@ $keystone_auth_uri = false, $keystone_identity_uri = false, $host = '0.0.0.0', - $port = '8777', + $port = '8042', $service_name = $::aodh::params::api_service_name, ) inherits aodh::params { diff --git a/aodh/manifests/auth.pp b/aodh/manifests/auth.pp new file mode 100644 index 000000000..d83551f16 --- /dev/null +++ b/aodh/manifests/auth.pp @@ -0,0 +1,73 @@ +# The aodh::auth class helps configure auth settings +# +# == Parameters +# [*auth_url*] +# the keystone public endpoint +# Optional. Defaults to 'http://localhost:5000/v2.0' +# +# [*auth_region*] +# the keystone region of this node +# Optional. Defaults to 'RegionOne' +# +# [*auth_user*] +# the keystone user for aodh services +# Optional. Defaults to 'aodh' +# +# [*auth_password*] +# the keystone password for aodh services +# Required. +# +# [*auth_tenant_name*] +# the keystone tenant name for aodh services +# Optional. Defaults to 'services' +# +# [*auth_tenant_id*] +# the keystone tenant id for aodh services. +# Optional. Defaults to undef. +# +# [*auth_cacert*] +# Certificate chain for SSL validation. Optional; Defaults to 'undef' +# +# [*auth_endpoint_type*] +# Type of endpoint in Identity service catalog to use for +# communication with OpenStack services. +# Optional. Defaults to undef. +# +class aodh::auth ( + $auth_password, + $auth_url = 'http://localhost:5000/v2.0', + $auth_region = 'RegionOne', + $auth_user = 'aodh', + $auth_tenant_name = 'services', + $auth_tenant_id = undef, + $auth_cacert = undef, + $auth_endpoint_type = undef, +) { + + if $auth_cacert { + aodh_config { 'service_credentials/os_cacert': value => $auth_cacert } + } else { + aodh_config { 'service_credentials/os_cacert': ensure => absent } + } + + aodh_config { + 'service_credentials/os_auth_url' : value => $auth_url; + 'service_credentials/os_region_name' : value => $auth_region; + 'service_credentials/os_username' : value => $auth_user; + 'service_credentials/os_password' : value => $auth_password, secret => true; + 'service_credentials/os_tenant_name' : value => $auth_tenant_name; + } + + if $auth_tenant_id { + aodh_config { + 'service_credentials/os_tenant_id' : value => $auth_tenant_id; + } + } + + if $auth_endpoint_type { + aodh_config { + 'service_credentials/os_endpoint_type' : value => $auth_endpoint_type; + } + } + +} diff --git a/aodh/manifests/client.pp b/aodh/manifests/client.pp new file mode 100644 index 000000000..ebffe6c76 --- /dev/null +++ b/aodh/manifests/client.pp @@ -0,0 +1,22 @@ +# +# Installs the aodh python library. +# +# == parameters +# [*ensure*] +# ensure state for pachage. +# +class aodh::client ( + $ensure = 'present' +) { + + include ::aodh::params + + # there is no aodhclient yet + package { 'python-ceilometerclient': + ensure => $ensure, + name => $::aodh::params::client_package_name, + tag => 'openstack', + } + +} + diff --git a/aodh/manifests/db.pp b/aodh/manifests/db.pp index 6d5d521b3..a5ceba4a3 100644 --- a/aodh/manifests/db.pp +++ b/aodh/manifests/db.pp @@ -43,6 +43,8 @@ $database_max_overflow = 20, ) { + include ::aodh::params + $database_connection_real = pick($::aodh::database_connection, $database_connection) $database_idle_timeout_real = pick($::aodh::database_idle_timeout, $database_idle_timeout) $database_min_pool_size_real = pick($::aodh::database_min_pool_size, $database_min_pool_size) @@ -52,7 +54,7 @@ $database_max_overflow_real = pick($::aodh::database_max_overflow, $database_max_overflow) validate_re($database_connection_real, - '(sqlite|mysql|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') + '(sqlite|mysql|postgresql|mongodb):\/\/(\S+:\S+@\S+\/\S+)?') if $database_connection_real { case $database_connection_real { @@ -62,7 +64,11 @@ require 'mysql::bindings::python' } /^postgresql:\/\//: { - $backend_package = $::aodh::params::psycopg_package_name + $backend_package = false + require 'postgresql::lib::python' + } + /^mongodb:\/\//: { + $backend_package = $::aodh::params::pymongo_package_name } /^sqlite:\/\//: { $backend_package = $::aodh::params::sqlite_package_name diff --git a/aodh/manifests/db/mysql.pp b/aodh/manifests/db/mysql.pp index 71078e1bf..6cda0479f 100644 --- a/aodh/manifests/db/mysql.pp +++ b/aodh/manifests/db/mysql.pp @@ -65,5 +65,5 @@ allowed_hosts => $allowed_hosts, } - ::Openstacklib::Db::Mysql['aodh'] ~> Exec<| title == 'aodh-manage db_sync' |> + ::Openstacklib::Db::Mysql['aodh'] ~> Exec<| title == 'aodh-db-sync' |> } diff --git a/aodh/manifests/db/postgresql.pp b/aodh/manifests/db/postgresql.pp index 86e645b41..d34aa9d93 100644 --- a/aodh/manifests/db/postgresql.pp +++ b/aodh/manifests/db/postgresql.pp @@ -50,6 +50,6 @@ privileges => $privileges, } - ::Openstacklib::Db::Postgresql['aodh'] ~> Exec<| title == 'aodh-manage db_sync' |> + ::Openstacklib::Db::Postgresql['aodh'] ~> Exec<| title == 'aodh-db-sync' |> } diff --git a/aodh/manifests/db/sync.pp b/aodh/manifests/db/sync.pp index 27aa0f80c..a6566c741 100644 --- a/aodh/manifests/db/sync.pp +++ b/aodh/manifests/db/sync.pp @@ -1,14 +1,23 @@ # -# Class to execute "aodh-manage db_sync +# Class to execute "aodh-dbsync" # -class aodh::db::sync { - exec { 'aodh-manage db_sync': +# [*user*] +# (optional) User to run dbsync command. +# Defaults to 'aodh' +# +class aodh::db::sync ( + $user = 'aodh', +){ + exec { 'aodh-db-sync': + command => 'aodh-dbsync --config-file /etc/aodh/aodh.conf', path => '/usr/bin', - user => 'aodh', refreshonly => true, - subscribe => [Package['aodh'], Aodh_config['database/connection']], - require => User['aodh'], + user => $user, + logoutput => on_failure, } - Exec['aodh-manage db_sync'] ~> Service<| title == 'aodh' |> + Package<| tag == 'aodh-package' |> ~> Exec['aodh-db-sync'] + Exec['aodh-db-sync'] ~> Service<| tag == 'aodh-db-sync-service' |> + Aodh_config<||> ~> Exec['aodh-db-sync'] + Aodh_config<| title == 'database/connection' |> ~> Exec['aodh-db-sync'] } diff --git a/aodh/manifests/evaluator.pp b/aodh/manifests/evaluator.pp new file mode 100644 index 000000000..7270b79b5 --- /dev/null +++ b/aodh/manifests/evaluator.pp @@ -0,0 +1,60 @@ +# Installs the aodh evaluator service +# +# == Params +# [*enabled*] +# (optional) Should the service be enabled. +# Defaults to true. +# +# [*manage_service*] +# (optional) Whether the service should be managed by Puppet. +# Defaults to true. +# +# [*package_ensure*] +# (optional) ensure state for package. +# Defaults to 'present' +# +# [*coordination_url*] +# (optional) The url to use for distributed group membership coordination. +# Defaults to undef. +# +class aodh::evaluator ( + $manage_service = true, + $enabled = true, + $package_ensure = 'present', + $coordination_url = undef, +) { + + include ::aodh::params + + Aodh_config<||> ~> Service['aodh-evaluator'] + + if $coordination_url { + aodh_config { + 'coordination/backend_url' : value => $coordination_url; + } + } + + Package[$::aodh::params::evaluator_package_name] -> Service['aodh-evaluator'] + ensure_resource( 'package', [$::aodh::params::evaluator_package_name], + { ensure => $package_ensure, + tag => ['openstack', 'aodh-package'] } + ) + + if $manage_service { + if $enabled { + $service_ensure = 'running' + } else { + $service_ensure = 'stopped' + } + } + + Package['aodh'] -> Service['aodh-evaluator'] + service { 'aodh-evaluator': + ensure => $service_ensure, + name => $::aodh::params::evaluator_service_name, + enable => $enabled, + hasstatus => true, + hasrestart => true, + tag => ['aodh-service','aodh-db-sync-service'] + } +} diff --git a/aodh/manifests/init.pp b/aodh/manifests/init.pp index 58fadaacf..93ab7de18 100644 --- a/aodh/manifests/init.pp +++ b/aodh/manifests/init.pp @@ -89,42 +89,11 @@ # (optional) Define queues as "durable" to rabbitmq. # Defaults to false # -# [*qpid_hostname*] -# (optional) Location of qpid server -# Defaults to 'localhost' -# -# [*qpid_port*] -# (optional) Port for qpid server -# Defaults to '5672' -# -# [*qpid_username*] -# (optional) Username to use when connecting to qpid -# Defaults to 'guest' -# -# [*qpid_password*] -# (optional) Password to use when connecting to qpid -# Defaults to 'guest' -# -# [*qpid_heartbeat*] -# (optional) Seconds between connection keepalive heartbeats -# Defaults to 60 -# -# [*qpid_protocol*] -# (optional) Transport to use, either 'tcp' or 'ssl'' -# Defaults to 'tcp' -# -# [*qpid_sasl_mechanisms*] -# (optional) Enable one or more SASL mechanisms -# Defaults to false -# -# [*qpid_tcp_nodelay*] -# (optional) Disable Nagle algorithm -# Defaults to true -# # [*log_dir*] # (optional) Directory where logs should be stored. -# If set to boolean false, it will not log to any directory. -# Defaults to undef +# If set to boolean false or the $::os_service_default, it will not log to +# any directory. +# Defaults to undef. # # [*state_path*] # (optional) Directory for storing state. @@ -196,6 +165,40 @@ # (optional) If set, use this value for max_overflow with sqlalchemy. # Defaults to: undef. # +# DEPRECATED PARAMETERS +# +# [*qpid_hostname*] +# (optional) Location of qpid server +# Defaults to undef +# +# [*qpid_port*] +# (optional) Port for qpid server +# Defaults to undef +# +# [*qpid_username*] +# (optional) Username to use when connecting to qpid +# Defaults to undef +# +# [*qpid_password*] +# (optional) Password to use when connecting to qpid +# Defaults to undef +# +# [*qpid_heartbeat*] +# (optional) Seconds between connection keepalive heartbeats +# Defaults to undef +# +# [*qpid_protocol*] +# (optional) Transport to use, either 'tcp' or 'ssl'' +# Defaults to undef +# +# [*qpid_sasl_mechanisms*] +# (optional) Enable one or more SASL mechanisms +# Defaults to undef +# +# [*qpid_tcp_nodelay*] +# (optional) Disable Nagle algorithm +# Defaults to undef +# class aodh ( $ensure_package = 'present', $rpc_backend = 'rabbit', @@ -215,19 +218,12 @@ $kombu_ssl_version = 'TLSv1', $kombu_reconnect_delay = '1.0', $amqp_durable_queues = false, - $qpid_hostname = 'localhost', - $qpid_port = '5672', - $qpid_username = 'guest', - $qpid_password = 'guest', - $qpid_sasl_mechanisms = false, - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, $verbose = undef, $debug = undef, $use_syslog = undef, $use_stderr = undef, $log_facility = undef, + $log_dir = undef, $notification_driver = undef, $notification_topics = 'notifications', $database_connection = undef, @@ -238,6 +234,15 @@ $database_max_retries = undef, $database_retry_interval = undef, $database_max_overflow = undef, + # DEPRECATED PARAMETERS + $qpid_hostname = undef, + $qpid_port = undef, + $qpid_username = undef, + $qpid_password = undef, + $qpid_sasl_mechanisms = undef, + $qpid_heartbeat = undef, + $qpid_protocol = undef, + $qpid_tcp_nodelay = undef, ) inherits aodh::params { include ::aodh::db @@ -330,30 +335,7 @@ } if $rpc_backend == 'qpid' { - aodh_config { - 'oslo_messaging_qpid/qpid_hostname': value => $qpid_hostname; - 'oslo_messaging_qpid/qpid_port': value => $qpid_port; - 'oslo_messaging_qpid/qpid_username': value => $qpid_username; - 'oslo_messaging_qpid/qpid_password': value => $qpid_password, secret => true; - 'oslo_messaging_qpid/qpid_heartbeat': value => $qpid_heartbeat; - 'oslo_messaging_qpid/qpid_protocol': value => $qpid_protocol; - 'oslo_messaging_qpid/qpid_tcp_nodelay': value => $qpid_tcp_nodelay; - } - if is_array($qpid_sasl_mechanisms) { - aodh_config { - 'oslo_messaging_qpid/qpid_sasl_mechanisms': value => join($qpid_sasl_mechanisms, ' '); - } - } - elsif $qpid_sasl_mechanisms { - aodh_config { - 'oslo_messaging_qpid/qpid_sasl_mechanisms': value => $qpid_sasl_mechanisms; - } - } - else { - aodh_config { - 'oslo_messaging_qpid/qpid_sasl_mechanisms': ensure => absent; - } - } + warning('Qpid driver was removed from Oslo.messaging in Mitaka release') } if $notification_driver { diff --git a/aodh/manifests/keystone/auth.pp b/aodh/manifests/keystone/auth.pp index 9a2747987..d5f83fffc 100644 --- a/aodh/manifests/keystone/auth.pp +++ b/aodh/manifests/keystone/auth.pp @@ -38,15 +38,15 @@ # Defaults to the value of auth_name. # # [*public_url*] -# (optional) The endpoint's public url. (Defaults to 'http://127.0.0.1:9311') +# (optional) The endpoint's public url. (Defaults to 'http://127.0.0.1:8042') # This url should *not* contain any trailing '/'. # # [*admin_url*] -# (optional) The endpoint's admin url. (Defaults to 'http://127.0.0.1:9311') +# (optional) The endpoint's admin url. (Defaults to 'http://127.0.0.1:8042') # This url should *not* contain any trailing '/'. # # [*internal_url*] -# (optional) The endpoint's internal url. (Defaults to 'http://127.0.0.1:9311') +# (optional) The endpoint's internal url. (Defaults to 'http://127.0.0.1:8042') # This url should *not* contain any trailing '/'. # class aodh::keystone::auth ( diff --git a/aodh/manifests/listener.pp b/aodh/manifests/listener.pp new file mode 100644 index 000000000..7de3b4b65 --- /dev/null +++ b/aodh/manifests/listener.pp @@ -0,0 +1,49 @@ +# Installs the aodh listener service +# +# == Params +# [*enabled*] +# (optional) Should the service be enabled. +# Defaults to true. +# +# [*manage_service*] +# (optional) Whether the service should be managed by Puppet. +# Defaults to true. +# +# [*package_ensure*] +# (optional) ensure state for package. +# Defaults to 'present' +# +class aodh::listener ( + $manage_service = true, + $enabled = true, + $package_ensure = 'present', +) { + + include ::aodh::params + + Aodh_config<||> ~> Service['aodh-listener'] + + Package[$::aodh::params::listener_package_name] -> Service['aodh-listener'] + ensure_resource( 'package', [$::aodh::params::listener_package_name], + { ensure => $package_ensure, + tag => ['openstack', 'aodh-package'] } + ) + + if $manage_service { + if $enabled { + $service_ensure = 'running' + } else { + $service_ensure = 'stopped' + } + } + + Package['aodh'] -> Service['aodh-listener'] + service { 'aodh-listener': + ensure => $service_ensure, + name => $::aodh::params::listener_service_name, + enable => $enabled, + hasstatus => true, + hasrestart => true, + tag => 'aodh-service', + } +} diff --git a/aodh/manifests/logging.pp b/aodh/manifests/logging.pp index 88f7cb608..13ece3135 100644 --- a/aodh/manifests/logging.pp +++ b/aodh/manifests/logging.pp @@ -6,59 +6,60 @@ # # [*verbose*] # (Optional) Should the daemons log verbose messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*debug*] # (Optional) Should the daemons log debug messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_syslog*] # (Optional) Use syslog for logging. -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_stderr*] # (optional) Use stderr for logging -# Defaults to 'true' +# Defaults to $::os_service_default # # [*log_facility*] # (Optional) Syslog facility to receive log lines. -# Defaults to 'LOG_USER' +# Defaults to $::os_service_default # # [*log_dir*] # (optional) Directory where logs should be stored. -# If set to boolean false, it will not log to any directory. -# Defaults to '/var/log/aodh' +# If set to boolean false or the $::os_service_default, it will not log to +# any directory. +# Defaults to '/var/log/aodh'. # # [*logging_context_format_string*] # (optional) Format string to use for log messages with context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [%(request_id)s %(user_identity)s] %(instance)s%(message)s' # # [*logging_default_format_string*] # (optional) Format string to use for log messages without context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [-] %(instance)s%(message)s' # # [*logging_debug_format_suffix*] # (optional) Formatted data to append to log format when level is DEBUG. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(funcName)s %(pathname)s:%(lineno)d' # # [*logging_exception_prefix*] # (optional) Prefix each line of exception output with this format. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s' # # [*log_config_append*] # The name of an additional logging configuration file. -# Defaults to undef. +# Defaults to $::os_service_default # See https://docs.python.org/2/howto/logging.html # # [*default_log_levels*] # (optional) Hash of logger (keys) and level (values) pairs. -# Defaults to undef. +# Defaults to $::os_service_default # Example: # { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', # 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', @@ -67,191 +68,81 @@ # # [*publish_errors*] # (optional) Publish error events (boolean value). -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*fatal_deprecations*] # (optional) Make deprecations fatal (boolean value) -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*instance_format*] # (optional) If an instance is passed with the log message, format it # like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: '[instance: %(uuid)s] ' # # [*instance_uuid_format*] # (optional) If an instance UUID is passed with the log message, format # it like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: instance_uuid_format='[instance: %(uuid)s] ' # [*log_date_format*] # (optional) Format string for %%(asctime)s in log records. -# Defaults to undef. +# Defaults to $::os_service_default # Example: 'Y-%m-%d %H:%M:%S' class aodh::logging( - $use_syslog = false, - $use_stderr = true, - $log_facility = 'LOG_USER', + $use_syslog = $::os_service_default, + $use_stderr = $::os_service_default, + $log_facility = $::os_service_default, $log_dir = '/var/log/aodh', - $verbose = false, - $debug = false, - $logging_context_format_string = undef, - $logging_default_format_string = undef, - $logging_debug_format_suffix = undef, - $logging_exception_prefix = undef, - $log_config_append = undef, - $default_log_levels = undef, - $publish_errors = undef, - $fatal_deprecations = undef, - $instance_format = undef, - $instance_uuid_format = undef, - $log_date_format = undef, + $verbose = $::os_service_default, + $debug = $::os_service_default, + $logging_context_format_string = $::os_service_default, + $logging_default_format_string = $::os_service_default, + $logging_debug_format_suffix = $::os_service_default, + $logging_exception_prefix = $::os_service_default, + $log_config_append = $::os_service_default, + $default_log_levels = $::os_service_default, + $publish_errors = $::os_service_default, + $fatal_deprecations = $::os_service_default, + $instance_format = $::os_service_default, + $instance_uuid_format = $::os_service_default, + $log_date_format = $::os_service_default, ) { # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function # to use aodh:: first then aodh::logging::. - $use_syslog_real = pick($::aodh::use_syslog,$use_syslog) - $use_stderr_real = pick($::aodh::use_stderr,$use_stderr) + $use_syslog_real = pick($::aodh::use_syslog,$use_syslog) + $use_stderr_real = pick($::aodh::use_stderr,$use_stderr) $log_facility_real = pick($::aodh::log_facility,$log_facility) - $log_dir_real = pick($::aodh::log_dir,$log_dir) - $verbose_real = pick($::aodh::verbose,$verbose) - $debug_real = pick($::aodh::debug,$debug) + $log_dir_real = pick($::aodh::log_dir,$log_dir) + $verbose_real = pick($::aodh::verbose,$verbose) + $debug_real = pick($::aodh::debug,$debug) + + if is_service_default($default_log_levels) { + $default_log_levels_real = $default_log_levels + } else { + $default_log_levels_real = join(sort(join_keys_to_values($default_log_levels, '=')), ',') + } aodh_config { - 'DEFAULT/debug' : value => $debug_real; - 'DEFAULT/verbose' : value => $verbose_real; - 'DEFAULT/use_stderr' : value => $use_stderr_real; - 'DEFAULT/use_syslog' : value => $use_syslog_real; - 'DEFAULT/log_dir' : value => $log_dir_real; - 'DEFAULT/syslog_log_facility': value => $log_facility_real; + 'DEFAULT/debug' : value => $debug_real; + 'DEFAULT/verbose' : value => $verbose_real; + 'DEFAULT/use_stderr' : value => $use_stderr_real; + 'DEFAULT/use_syslog' : value => $use_syslog_real; + 'DEFAULT/log_dir' : value => $log_dir_real; + 'DEFAULT/syslog_log_facility' : value => $log_facility_real; + 'DEFAULT/logging_context_format_string' : value => $logging_context_format_string; + 'DEFAULT/logging_default_format_string' : value => $logging_default_format_string; + 'DEFAULT/logging_debug_format_suffix' : value => $logging_debug_format_suffix; + 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; + 'DEFAULT/log_config_append' : value => $log_config_append; + 'DEFAULT/default_log_levels' : value => $default_log_levels_real; + 'DEFAULT/publish_errors' : value => $publish_errors; + 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; + 'DEFAULT/instance_format' : value => $instance_format; + 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; + 'DEFAULT/log_date_format' : value => $log_date_format; } - - if $logging_context_format_string { - aodh_config { - 'DEFAULT/logging_context_format_string' : - value => $logging_context_format_string; - } - } - else { - aodh_config { - 'DEFAULT/logging_context_format_string' : ensure => absent; - } - } - - if $logging_default_format_string { - aodh_config { - 'DEFAULT/logging_default_format_string' : - value => $logging_default_format_string; - } - } - else { - aodh_config { - 'DEFAULT/logging_default_format_string' : ensure => absent; - } - } - - if $logging_debug_format_suffix { - aodh_config { - 'DEFAULT/logging_debug_format_suffix' : - value => $logging_debug_format_suffix; - } - } - else { - aodh_config { - 'DEFAULT/logging_debug_format_suffix' : ensure => absent; - } - } - - if $logging_exception_prefix { - aodh_config { - 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; - } - } - else { - aodh_config { - 'DEFAULT/logging_exception_prefix' : ensure => absent; - } - } - - if $log_config_append { - aodh_config { - 'DEFAULT/log_config_append' : value => $log_config_append; - } - } - else { - aodh_config { - 'DEFAULT/log_config_append' : ensure => absent; - } - } - - if $default_log_levels { - aodh_config { - 'DEFAULT/default_log_levels' : - value => join(sort(join_keys_to_values($default_log_levels, '=')), ','); - } - } - else { - aodh_config { - 'DEFAULT/default_log_levels' : ensure => absent; - } - } - - if $publish_errors { - aodh_config { - 'DEFAULT/publish_errors' : value => $publish_errors; - } - } - else { - aodh_config { - 'DEFAULT/publish_errors' : ensure => absent; - } - } - - if $fatal_deprecations { - aodh_config { - 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; - } - } - else { - aodh_config { - 'DEFAULT/fatal_deprecations' : ensure => absent; - } - } - - if $instance_format { - aodh_config { - 'DEFAULT/instance_format' : value => $instance_format; - } - } - else { - aodh_config { - 'DEFAULT/instance_format' : ensure => absent; - } - } - - if $instance_uuid_format { - aodh_config { - 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; - } - } - else { - aodh_config { - 'DEFAULT/instance_uuid_format' : ensure => absent; - } - } - - if $log_date_format { - aodh_config { - 'DEFAULT/log_date_format' : value => $log_date_format; - } - } - else { - aodh_config { - 'DEFAULT/log_date_format' : ensure => absent; - } - } - - } diff --git a/aodh/manifests/notifier.pp b/aodh/manifests/notifier.pp new file mode 100644 index 000000000..f4629738b --- /dev/null +++ b/aodh/manifests/notifier.pp @@ -0,0 +1,49 @@ +# Installs the aodh notifier service +# +# == Params +# [*enabled*] +# (optional) Should the service be enabled. +# Defaults to true. +# +# [*manage_service*] +# (optional) Whether the service should be managed by Puppet. +# Defaults to true. +# +# [*package_ensure*] +# (optional) ensure state for package. +# Defaults to 'present' +# +class aodh::notifier ( + $manage_service = true, + $enabled = true, + $package_ensure = 'present', +) { + + include ::aodh::params + + Aodh_config<||> ~> Service['aodh-notifier'] + + Package[$::aodh::params::notifier_package_name] -> Service['aodh-notifier'] + ensure_resource( 'package', [$::aodh::params::notifier_package_name], + { ensure => $package_ensure, + tag => ['openstack', 'aodh-package'] } + ) + + if $manage_service { + if $enabled { + $service_ensure = 'running' + } else { + $service_ensure = 'stopped' + } + } + + Package['aodh'] -> Service['aodh-notifier'] + service { 'aodh-notifier': + ensure => $service_ensure, + name => $::aodh::params::notifier_service_name, + enable => $enabled, + hasstatus => true, + hasrestart => true, + tag => 'aodh-service', + } +} diff --git a/aodh/manifests/params.pp b/aodh/manifests/params.pp index 1b7d1575b..2bb273d99 100644 --- a/aodh/manifests/params.pp +++ b/aodh/manifests/params.pp @@ -2,6 +2,8 @@ # class aodh::params { + $client_package_name = 'python-ceilometerclient' + case $::osfamily { 'RedHat': { $common_package_name = 'openstack-aodh-common' @@ -17,6 +19,7 @@ $expirer_package_serice = 'openstack-aodh-expirer' $listener_package_name = 'openstack-aodh-listener' $listener_service_name = 'openstack-aodh-listener' + $pymongo_package_name = 'python-pymongo' $aodh_wsgi_script_path = '/var/www/cgi-bin/aodh' $aodh_wsgi_script_source = '/usr/lib/python2.7/site-packages/aodh/api/app.wsgi' } @@ -34,6 +37,7 @@ $expirer_package_serice = 'aodh-expirer' $listener_package_name = 'aodh-listener' $listener_service_name = 'aodh-listener' + $pymongo_package_name = 'python-pymongo' $aodh_wsgi_script_path = '/usr/lib/cgi-bin/aodh' $aodh_wsgi_script_source = '/usr/share/aodh-common/app.wsgi' } diff --git a/aodh/metadata.json b/aodh/metadata.json index 8e41b1553..6557087db 100644 --- a/aodh/metadata.json +++ b/aodh/metadata.json @@ -1,6 +1,6 @@ { - "name": "puppet-aodh", - "version": "0.0.1", + "name": "openstack-aodh", + "version": "7.0.0", "author": "OpenStack Contributors", "summary": "Puppet module for OpenStack Aodh", "license": "Apache-2.0", @@ -29,6 +29,6 @@ "dependencies": [ { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">= 4.2.0 <5.0.0" }, - { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=7.0.0 <8.0.0" } ] } diff --git a/aodh/spec/acceptance/aodh_wsgi_apache_spec.rb b/aodh/spec/acceptance/aodh_wsgi_apache_spec.rb index 0fe2083db..12d29e5d7 100644 --- a/aodh/spec/acceptance/aodh_wsgi_apache_spec.rb +++ b/aodh/spec/acceptance/aodh_wsgi_apache_spec.rb @@ -6,59 +6,11 @@ it 'should work with no errors' do pp= <<-EOS - Exec { logoutput => 'on_failure' } - - # Common resources - case $::osfamily { - 'Debian': { - include ::apt - class { '::openstack_extras::repo::debian::ubuntu': - release => 'liberty', - repo => 'proposed', - package_require => true, - } - $package_provider = 'apt' - } - 'RedHat': { - class { '::openstack_extras::repo::redhat::redhat': - manage_rdo => false, - repo_hash => { - 'openstack-common-testing' => { - 'baseurl' => 'http://cbs.centos.org/repos/cloud7-openstack-common-testing/x86_64/os/', - 'descr' => 'openstack-common-testing', - 'gpgcheck' => 'no', - }, - 'openstack-liberty-testing' => { - 'baseurl' => 'http://cbs.centos.org/repos/cloud7-openstack-liberty-testing/x86_64/os/', - 'descr' => 'openstack-liberty-testing', - 'gpgcheck' => 'no', - }, - 'openstack-liberty-trunk' => { - 'baseurl' => 'http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/', - 'descr' => 'openstack-liberty-trunk', - 'gpgcheck' => 'no', - }, - }, - } - package { 'openstack-selinux': ensure => 'latest' } - $package_provider = 'yum' - } - default: { - fail("Unsupported osfamily (${::osfamily})") - } - } - - class { '::mysql::server': } - - class { '::rabbitmq': - delete_guest_user => true, - package_provider => $package_provider, - } - - rabbitmq_vhost { '/': - provider => 'rabbitmqctl', - require => Class['rabbitmq'], - } + include ::openstack_integration + include ::openstack_integration::repos + include ::openstack_integration::rabbitmq + include ::openstack_integration::mysql + include ::openstack_integration::keystone rabbitmq_user { 'aodh': admin => true, @@ -75,26 +27,6 @@ class { '::rabbitmq': require => Class['rabbitmq'], } - - # Keystone resources, needed by Ceilometer to run - class { '::keystone::db::mysql': - password => 'keystone', - } - class { '::keystone': - verbose => true, - debug => true, - database_connection => 'mysql://keystone:keystone@127.0.0.1/keystone', - admin_token => 'admin_token', - enabled => true, - } - class { '::keystone::roles::admin': - email => 'test@example.tld', - password => 'a_big_secret', - } - class { '::keystone::endpoint': - public_url => "https://${::fqdn}:5000/", - admin_url => "https://${::fqdn}:35357/", - } class { '::aodh': rabbit_userid => 'aodh', rabbit_password => 'an_even_bigger_secret', @@ -119,6 +51,22 @@ class { '::aodh::api': class { '::aodh::wsgi::apache': ssl => false, } + class { '::aodh::auth': + auth_url => 'http://127.0.0.1:5000/v2.0', + auth_password => 'a_big_secret', + } + class { '::aodh::client': } + class { '::aodh::notifier': } + class { '::aodh::listener': } + case $::osfamily { + 'Debian': { + warning('aodh-evaluator cannot be run on ubuntu system, package is broken. See LP#1508463') + } + 'RedHat': { + class { '::aodh::evaluator': } + class { '::aodh::db::sync': } + } + } EOS diff --git a/aodh/spec/classes/aodh_api_spec.rb b/aodh/spec/classes/aodh_api_spec.rb index 284ccef40..dd320b4d0 100644 --- a/aodh/spec/classes/aodh_api_spec.rb +++ b/aodh/spec/classes/aodh_api_spec.rb @@ -14,7 +14,7 @@ :keystone_tenant => 'services', :keystone_user => 'aodh', :package_ensure => 'latest', - :port => '8777', + :port => '8042', :host => '0.0.0.0', } end @@ -123,12 +123,14 @@ class { 'aodh': }" context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ + :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => '8.0', :concat_basedir => '/var/lib/puppet/concat', :fqdn => 'some.host.tld', - :processorcount => 2 } + :processorcount => 2, + }) end let :platform_params do @@ -141,12 +143,14 @@ class { 'aodh': }" context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ + :osfamily => 'RedHat', :operatingsystem => 'RedHat', :operatingsystemrelease => '7.1', :fqdn => 'some.host.tld', :concat_basedir => '/var/lib/puppet/concat', - :processorcount => 2 } + :processorcount => 2, + }) end let :platform_params do @@ -159,7 +163,7 @@ class { 'aodh': }" describe 'with custom auth_uri' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end before do params.merge!({ @@ -173,10 +177,10 @@ class { 'aodh': }" describe "with custom keystone identity_uri" do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end before do - params.merge!({ + params.merge!({ :keystone_identity_uri => 'https://foo.bar:1234/', }) end @@ -187,10 +191,10 @@ class { 'aodh': }" describe "with custom keystone identity_uri and auth_uri" do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end before do - params.merge!({ + params.merge!({ :keystone_identity_uri => 'https://foo.bar:35357/', :keystone_auth_uri => 'https://foo.bar:5000/v2.0/', }) diff --git a/aodh/spec/classes/aodh_auth_spec.rb b/aodh/spec/classes/aodh_auth_spec.rb new file mode 100644 index 000000000..9813451c3 --- /dev/null +++ b/aodh/spec/classes/aodh_auth_spec.rb @@ -0,0 +1,55 @@ +require 'spec_helper' + +describe 'aodh::auth' do + + let :params do + { :auth_url => 'http://localhost:5000/v2.0', + :auth_region => 'RegionOne', + :auth_user => 'aodh', + :auth_password => 'password', + :auth_tenant_name => 'services', + } + end + + shared_examples_for 'aodh-auth' do + + it 'configures authentication' do + is_expected.to contain_aodh_config('service_credentials/os_auth_url').with_value('http://localhost:5000/v2.0') + is_expected.to contain_aodh_config('service_credentials/os_region_name').with_value('RegionOne') + is_expected.to contain_aodh_config('service_credentials/os_username').with_value('aodh') + is_expected.to contain_aodh_config('service_credentials/os_password').with_value('password') + is_expected.to contain_aodh_config('service_credentials/os_password').with_value(params[:auth_password]).with_secret(true) + is_expected.to contain_aodh_config('service_credentials/os_tenant_name').with_value('services') + is_expected.to contain_aodh_config('service_credentials/os_cacert').with(:ensure => 'absent') + end + + context 'when overriding parameters' do + before do + params.merge!( + :auth_cacert => '/tmp/dummy.pem', + :auth_endpoint_type => 'internalURL', + ) + end + it { is_expected.to contain_aodh_config('service_credentials/os_cacert').with_value(params[:auth_cacert]) } + it { is_expected.to contain_aodh_config('service_credentials/os_endpoint_type').with_value(params[:auth_endpoint_type]) } + end + + end + + context 'on Debian platforms' do + let :facts do + { :osfamily => 'Debian' } + end + + it_configures 'aodh-auth' + end + + context 'on RedHat platforms' do + let :facts do + { :osfamily => 'RedHat' } + end + + it_configures 'aodh-auth' + end + +end diff --git a/aodh/spec/classes/aodh_client_spec.rb b/aodh/spec/classes/aodh_client_spec.rb new file mode 100644 index 000000000..8c43670f5 --- /dev/null +++ b/aodh/spec/classes/aodh_client_spec.rb @@ -0,0 +1,33 @@ +require 'spec_helper' + +describe 'aodh::client' do + + shared_examples_for 'aodh client' do + + it { is_expected.to contain_class('aodh::params') } + + it 'installs aodh client package' do + is_expected.to contain_package('python-ceilometerclient').with( + :ensure => 'present', + :name => 'python-ceilometerclient', + :tag => 'openstack', + ) + end + end + + context 'on Debian platforms' do + let :facts do + { :osfamily => 'Debian' } + end + + it_configures 'aodh client' + end + + context 'on RedHat platforms' do + let :facts do + { :osfamily => 'RedHat' } + end + + it_configures 'aodh client' + end +end diff --git a/aodh/spec/classes/aodh_db_spec.rb b/aodh/spec/classes/aodh_db_spec.rb new file mode 100644 index 000000000..26e030230 --- /dev/null +++ b/aodh/spec/classes/aodh_db_spec.rb @@ -0,0 +1,112 @@ +require 'spec_helper' + +describe 'aodh::db' do + + shared_examples 'aodh::db' do + + context 'with default parameters' do + + it { is_expected.to contain_class('aodh::params') } + it { is_expected.to contain_aodh_config('database/connection').with_value('sqlite:////var/lib/aodh/aodh.sqlite') } + it { is_expected.to contain_aodh_config('database/idle_timeout').with_value('3600') } + it { is_expected.to contain_aodh_config('database/min_pool_size').with_value('1') } + it { is_expected.to contain_aodh_config('database/max_retries').with_value('10') } + it { is_expected.to contain_aodh_config('database/retry_interval').with_value('10') } + + end + + context 'with specific parameters' do + let :params do + { :database_connection => 'mysql://aodh:aodh@localhost/aodh', + :database_idle_timeout => '3601', + :database_min_pool_size => '2', + :database_max_retries => '11', + :database_retry_interval => '11', + } + end + + it { is_expected.to contain_class('aodh::params') } + it { is_expected.to contain_aodh_config('database/connection').with_value('mysql://aodh:aodh@localhost/aodh').with_secret(true) } + it { is_expected.to contain_aodh_config('database/idle_timeout').with_value('3601') } + it { is_expected.to contain_aodh_config('database/min_pool_size').with_value('2') } + it { is_expected.to contain_aodh_config('database/max_retries').with_value('11') } + it { is_expected.to contain_aodh_config('database/retry_interval').with_value('11') } + + end + + context 'with postgresql backend' do + let :params do + { :database_connection => 'postgresql://localhost:1234/aodh', } + end + + it 'install the proper backend package' do + is_expected.to contain_package('python-psycopg2').with(:ensure => 'present') + end + + end + + context 'with mongodb backend' do + let :params do + { :database_connection => 'mongodb://localhost:1234/aodh', } + end + + it 'installs python-mongodb package' do + is_expected.to contain_package('aodh-backend-package').with( + :ensure => 'present', + :name => 'python-pymongo', + :tag => 'openstack' + ) + is_expected.to contain_aodh_config('database/connection').with_value('mongodb://localhost:1234/aodh') + is_expected.to contain_aodh_config('database/connection').with_value( params[:database_connection] ).with_secret(true) + end + + end + + context 'with incorrect database_connection string' do + let :params do + { :database_connection => 'redis://aodh:aodh@localhost/aodh', } + end + + it_raises 'a Puppet::Error', /validate_re/ + end + + end + + context 'on Debian platforms' do + let :facts do + { :osfamily => 'Debian', + :operatingsystem => 'Debian', + :operatingsystemrelease => 'jessie', + } + end + + it_configures 'aodh::db' + + context 'with sqlite backend' do + let :params do + { :database_connection => 'sqlite:///var/lib/aodh/aodh.sqlite', } + end + + it 'install the proper backend package' do + is_expected.to contain_package('aodh-backend-package').with( + :ensure => 'present', + :name => 'python-pysqlite2', + :tag => 'openstack' + ) + end + + end + end + + context 'on Redhat platforms' do + let :facts do + { :osfamily => 'RedHat', + :operatingsystemrelease => '7.1', + } + end + + it_configures 'aodh::db' + end + +end + diff --git a/aodh/spec/classes/aodh_evaluator_spec.rb b/aodh/spec/classes/aodh_evaluator_spec.rb new file mode 100644 index 000000000..07450b76e --- /dev/null +++ b/aodh/spec/classes/aodh_evaluator_spec.rb @@ -0,0 +1,113 @@ +require 'spec_helper' +# LP1492636 - Cohabitation of compile matcher and webmock +WebMock.disable_net_connect!(:allow => "169.254.169.254") + +describe 'aodh::evaluator' do + + let :pre_condition do + "class { '::aodh': }" + end + + let :params do + { :enabled => true } + end + + shared_examples_for 'aodh-evaluator' do + + context 'with coordination' do + before do + params.merge!({ :coordination_url => 'redis://localhost:6379' }) + end + + it 'configures backend_url' do + is_expected.to contain_aodh_config('coordination/backend_url').with_value('redis://localhost:6379') + end + end + + context 'when enabled' do + it { is_expected.to contain_class('aodh::params') } + + it 'installs aodh-evaluator package' do + is_expected.to contain_package(platform_params[:evaluator_package_name]).with( + :ensure => 'present', + :tag => ['openstack', 'aodh-package'] + ) + end + + it 'configures aodh-evaluator service' do + is_expected.to contain_service('aodh-evaluator').with( + :ensure => 'running', + :name => platform_params[:evaluator_service_name], + :enable => true, + :hasstatus => true, + :hasrestart => true, + :tag => ['aodh-service','aodh-db-sync-service'] + ) + end + + end + + context 'when disabled' do + let :params do + { :enabled => false } + end + + # Catalog compilation does not crash for lack of aodh::db + it { is_expected.to compile } + it 'configures aodh-evaluator service' do + is_expected.to contain_service('aodh-evaluator').with( + :ensure => 'stopped', + :name => platform_params[:evaluator_service_name], + :enable => false, + :hasstatus => true, + :hasrestart => true, + :tag => ['aodh-service','aodh-db-sync-service'] + ) + end + end + + context 'when service management is disabled' do + let :params do + { :enabled => false, + :manage_service => false } + end + + it 'configures aodh-evaluator service' do + is_expected.to contain_service('aodh-evaluator').with( + :ensure => nil, + :name => platform_params[:evaluator_service_name], + :enable => false, + :hasstatus => true, + :hasrestart => true, + :tag => ['aodh-service','aodh-db-sync-service'] + ) + end + end + end + + context 'on Debian platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'Debian' }) + end + + let :platform_params do + { :evaluator_package_name => 'aodh-evaluator', + :evaluator_service_name => 'aodh-evaluator' } + end + + it_configures 'aodh-evaluator' + end + + context 'on RedHat platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'RedHat' }) + end + + let :platform_params do + { :evaluator_package_name => 'openstack-aodh-evaluator', + :evaluator_service_name => 'openstack-aodh-evaluator' } + end + + it_configures 'aodh-evaluator' + end +end diff --git a/aodh/spec/classes/aodh_init_spec.rb b/aodh/spec/classes/aodh_init_spec.rb index f7461b2dc..4ca9f3dad 100644 --- a/aodh/spec/classes/aodh_init_spec.rb +++ b/aodh/spec/classes/aodh_init_spec.rb @@ -210,72 +210,14 @@ is_expected.to contain_aodh_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') end end - - context 'with qpid rpc_backend' do - let :params do - { :rpc_backend => 'qpid' } - end - - context 'with default parameters' do - it 'configures qpid' do - is_expected.to contain_aodh_config('DEFAULT/rpc_backend').with_value('qpid') - is_expected.to contain_aodh_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost') - is_expected.to contain_aodh_config('oslo_messaging_qpid/qpid_port').with_value('5672') - is_expected.to contain_aodh_config('oslo_messaging_qpid/qpid_username').with_value('guest') - is_expected.to contain_aodh_config('oslo_messaging_qpid/qpid_password').with_value('guest').with_secret(true) - is_expected.to contain_aodh_config('oslo_messaging_qpid/qpid_heartbeat').with_value('60') - is_expected.to contain_aodh_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') - is_expected.to contain_aodh_config('oslo_messaging_qpid/qpid_tcp_nodelay').with_value(true) - end - end - - context 'with qpid_password parameter (without qpid_sasl_mechanisms)' do - before do - params.merge!({ :qpid_password => 'guest' }) - end - it { is_expected.to contain_aodh_config('oslo_messaging_qpid/qpid_sasl_mechanisms').with_ensure('absent') } - end - - context 'with qpid_password parameter (with qpid_sasl_mechanisms)' do - before do - params.merge!({ - :qpid_password => 'guest', - :qpid_sasl_mechanisms => 'A' - }) - end - it { is_expected.to contain_aodh_config('oslo_messaging_qpid/qpid_sasl_mechanisms').with_value('A') } - end - - context 'with qpid_password parameter (with array of qpid_sasl_mechanisms)' do - before do - params.merge!({ - :qpid_password => 'guest', - :qpid_sasl_mechanisms => [ 'DIGEST-MD5', 'GSSAPI', 'PLAIN' ] - }) - end - it { is_expected.to contain_aodh_config('oslo_messaging_qpid/qpid_sasl_mechanisms').with_value('DIGEST-MD5 GSSAPI PLAIN') } - end - end - - context 'with qpid rpc_backend with old parameter' do - let :params do - { :rpc_backend => 'aodh.openstack.common.rpc.impl_qpid' } - end - it { is_expected.to contain_aodh_config('DEFAULT/rpc_backend').with_value('aodh.openstack.common.rpc.impl_qpid') } - end - - context 'with rabbitmq rpc_backend with old parameter' do - let :params do - { :rpc_backend => 'aodh.openstack.common.rpc.impl_kombu' } - end - it { is_expected.to contain_aodh_config('DEFAULT/rpc_backend').with_value('aodh.openstack.common.rpc.impl_kombu') } - end end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', - :operatingsystem => 'Debian' } + @default_facts.merge({ + :osfamily => 'Debian', + :operatingsystem => 'Debian', + }) end let :platform_params do @@ -287,7 +229,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/aodh/spec/classes/aodh_listener_spec.rb b/aodh/spec/classes/aodh_listener_spec.rb new file mode 100644 index 000000000..32f1076e1 --- /dev/null +++ b/aodh/spec/classes/aodh_listener_spec.rb @@ -0,0 +1,99 @@ +require 'spec_helper' +# LP1492636 - Cohabitation of compile matcher and webmock +WebMock.disable_net_connect!(:allow => "169.254.169.254") + +describe 'aodh::listener' do + + let :pre_condition do + "class { '::aodh': }" + end + + shared_examples_for 'aodh-listener' do + + context 'when enabled' do + it { is_expected.to contain_class('aodh::params') } + + it 'installs aodh-listener package' do + is_expected.to contain_package(platform_params[:listener_package_name]).with( + :ensure => 'present', + :tag => ['openstack', 'aodh-package'] + ) + end + + it 'configures aodh-listener service' do + is_expected.to contain_service('aodh-listener').with( + :ensure => 'running', + :name => platform_params[:listener_service_name], + :enable => true, + :hasstatus => true, + :hasrestart => true, + :tag => 'aodh-service', + ) + end + + end + + context 'when disabled' do + let :params do + { :enabled => false } + end + + # Catalog compilation does not crash for lack of aodh::db + it { is_expected.to compile } + it 'configures aodh-listener service' do + is_expected.to contain_service('aodh-listener').with( + :ensure => 'stopped', + :name => platform_params[:listener_service_name], + :enable => false, + :hasstatus => true, + :hasrestart => true, + :tag => 'aodh-service', + ) + end + end + + context 'when service management is disabled' do + let :params do + { :enabled => false, + :manage_service => false } + end + + it 'configures aodh-listener service' do + is_expected.to contain_service('aodh-listener').with( + :ensure => nil, + :name => platform_params[:listener_service_name], + :enable => false, + :hasstatus => true, + :hasrestart => true, + :tag => 'aodh-service', + ) + end + end + end + + context 'on Debian platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'Debian' }) + end + + let :platform_params do + { :listener_package_name => 'aodh-listener', + :listener_service_name => 'aodh-listener' } + end + + it_configures 'aodh-listener' + end + + context 'on RedHat platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'RedHat' }) + end + + let :platform_params do + { :listener_package_name => 'openstack-aodh-listener', + :listener_service_name => 'openstack-aodh-listener' } + end + + it_configures 'aodh-listener' + end +end diff --git a/aodh/spec/classes/aodh_logging_spec.rb b/aodh/spec/classes/aodh_logging_spec.rb index 17d5e000d..4cb4f1a1b 100644 --- a/aodh/spec/classes/aodh_logging_spec.rb +++ b/aodh/spec/classes/aodh_logging_spec.rb @@ -56,12 +56,13 @@ end shared_examples 'basic default logging settings' do - it 'configures aodh logging settins with default values' do - is_expected.to contain_aodh_config('DEFAULT/use_syslog').with(:value => 'false') - is_expected.to contain_aodh_config('DEFAULT/use_stderr').with(:value => 'true') + it 'configures aodh logging settings with default values' do + is_expected.to contain_aodh_config('DEFAULT/use_syslog').with(:value => '') + is_expected.to contain_aodh_config('DEFAULT/use_stderr').with(:value => '') + is_expected.to contain_aodh_config('DEFAULT/syslog_log_facility').with(:value => '') is_expected.to contain_aodh_config('DEFAULT/log_dir').with(:value => '/var/log/aodh') - is_expected.to contain_aodh_config('DEFAULT/verbose').with(:value => 'false') - is_expected.to contain_aodh_config('DEFAULT/debug').with(:value => 'false') + is_expected.to contain_aodh_config('DEFAULT/verbose').with(:value => '') + is_expected.to contain_aodh_config('DEFAULT/debug').with(:value => '') end end @@ -120,13 +121,13 @@ :default_log_levels, :fatal_deprecations, :instance_format, :instance_uuid_format, :log_date_format, ].each { |param| - it { is_expected.to contain_aodh_config("DEFAULT/#{param}").with_ensure('absent') } + it { is_expected.to contain_aodh_config("DEFAULT/#{param}").with(:value => '') } } end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'aodh-logging' @@ -134,7 +135,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'aodh-logging' diff --git a/aodh/spec/classes/aodh_notifier_spec.rb b/aodh/spec/classes/aodh_notifier_spec.rb new file mode 100644 index 000000000..6c786c2ab --- /dev/null +++ b/aodh/spec/classes/aodh_notifier_spec.rb @@ -0,0 +1,99 @@ +require 'spec_helper' +# LP1492636 - Cohabitation of compile matcher and webmock +WebMock.disable_net_connect!(:allow => "169.254.169.254") + +describe 'aodh::notifier' do + + let :pre_condition do + "class { '::aodh': }" + end + + shared_examples_for 'aodh-notifier' do + + context 'when enabled' do + it { is_expected.to contain_class('aodh::params') } + + it 'installs aodh-notifier package' do + is_expected.to contain_package(platform_params[:notifier_package_name]).with( + :ensure => 'present', + :tag => ['openstack', 'aodh-package'] + ) + end + + it 'configures aodh-notifier service' do + is_expected.to contain_service('aodh-notifier').with( + :ensure => 'running', + :name => platform_params[:notifier_service_name], + :enable => true, + :hasstatus => true, + :hasrestart => true, + :tag => 'aodh-service', + ) + end + + end + + context 'when disabled' do + let :params do + { :enabled => false } + end + + # Catalog compilation does not crash for lack of aodh::db + it { is_expected.to compile } + it 'configures aodh-notifier service' do + is_expected.to contain_service('aodh-notifier').with( + :ensure => 'stopped', + :name => platform_params[:notifier_service_name], + :enable => false, + :hasstatus => true, + :hasrestart => true, + :tag => 'aodh-service', + ) + end + end + + context 'when service management is disabled' do + let :params do + { :enabled => false, + :manage_service => false } + end + + it 'configures aodh-notifier service' do + is_expected.to contain_service('aodh-notifier').with( + :ensure => nil, + :name => platform_params[:notifier_service_name], + :enable => false, + :hasstatus => true, + :hasrestart => true, + :tag => 'aodh-service', + ) + end + end + end + + context 'on Debian platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'Debian' }) + end + + let :platform_params do + { :notifier_package_name => 'aodh-notifier', + :notifier_service_name => 'aodh-notifier' } + end + + it_configures 'aodh-notifier' + end + + context 'on RedHat platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'RedHat' }) + end + + let :platform_params do + { :notifier_package_name => 'openstack-aodh-notifier', + :notifier_service_name => 'openstack-aodh-notifier' } + end + + it_configures 'aodh-notifier' + end +end diff --git a/aodh/spec/spec_helper.rb b/aodh/spec/spec_helper.rb index 3df4cede1..9bc7bcf96 100644 --- a/aodh/spec/spec_helper.rb +++ b/aodh/spec/spec_helper.rb @@ -5,6 +5,9 @@ RSpec.configure do |c| c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' + c.before :each do + @default_facts = { :os_service_default => '' } + end end at_exit { RSpec::Puppet::Coverage.report! } diff --git a/apache/CHANGELOG.md b/apache/CHANGELOG.md index fb33a1363..db27b34ba 100644 --- a/apache/CHANGELOG.md +++ b/apache/CHANGELOG.md @@ -1,3 +1,19 @@ +## UNRELEASED +### Summary + +TODO + +### + +#### Security + +* apache::mod::php now uses FilesMatch to configure the php handler. This is following the recommended upstream configuration guidelines (http://php.net/manual/en/install.unix.apache2.php#example-20) and distribution's default config (e.g.: http://bazaar.launchpad.net/~ubuntu-branches/ubuntu/vivid/php5/vivid/view/head:/debian/php5.conf). It avoids inadvertently exposing the PHP handler to executing uploads with names like 'file.php.jpg', but might impact setups with unusual requirements. + +## Supported Release 1.7.1 +###Summary + +Small release for support of newer PE versions. This increments the version of PE in the metadata.json file. + ## 2015-11-17 - Supported Release 1.7.0 ### Summary This release includes many new features and bugfixes. There are test, documentation and misc improvements. diff --git a/apache/README.md b/apache/README.md index a8c4b47df..57ba525bf 100644 --- a/apache/README.md +++ b/apache/README.md @@ -51,6 +51,7 @@ [`apache::mod::ext_filter`]: #class-apachemodext_filter [`apache::mod::geoip`]: #class-apachemodgeoip [`apache::mod::itk`]: #class-apachemoditk +[`apache::mod::ldap`]: #class-apachemodldap [`apache::mod::passenger`]: #class-apachemodpassenger [`apache::mod::peruser`]: #class-apachemodperuser [`apache::mod::prefork`]: #class-apachemodprefork @@ -64,6 +65,7 @@ [`apache::params`]: #class-apacheparams [`apache::version`]: #class-apacheversion [`apache::vhost`]: #define-apachevhost +[`apache::vhost::custom`]: #define-apachevhostcustom [`apache::vhost::WSGIImportScript`]: #wsgiimportscript [Apache HTTP Server]: http://httpd.apache.org [Apache modules]: http://httpd.apache.org/docs/current/mod/ @@ -123,6 +125,7 @@ [`KeepAliveTimeout`]: http://httpd.apache.org/docs/current/mod/core.html#keepalivetimeout [`keepalive` parameter]: #keepalive [`keepalive_timeout`]: #keepalive_timeout +[`limitreqfieldsize`]: https://httpd.apache.org/docs/current/mod/core.html#limitrequestfieldsize [`lib`]: #lib [`lib_path`]: #lib_path @@ -154,6 +157,7 @@ [`mod_fcgid`]: https://httpd.apache.org/mod_fcgid/mod/mod_fcgid.html [`mod_geoip`]: http://dev.maxmind.com/geoip/legacy/mod_geoip2/ [`mod_info`]: https://httpd.apache.org/docs/current/mod/mod_info.html +[`mod_ldap`]: https://httpd.apache.org/docs/2.2/mod/mod_ldap.html [`mod_mpm_event`]: https://httpd.apache.org/docs/current/mod/event.html [`mod_negotiation`]: http://httpd.apache.org/docs/current/mod/mod_negotiation.html [`mod_pagespeed`]: https://developers.google.com/speed/pagespeed/module/?hl=en @@ -740,6 +744,7 @@ apache::balancer { 'puppet01': - [Define: apache::mod](#define-apachemod) - [Define: apache::namevirtualhost](#define-apachenamevirtualhost) - [Define: apache::vhost](#define-apachevhost) + - [Define: apache::vhost::custom](#define-apachevhostcustom) - [**Private Defines**](#private-defines) - [Define: apache::default_mods::load](#define-default_mods-load) - [Define: apache::peruser::multiplexer](#define-apacheperusermultiplexer) @@ -1308,7 +1313,7 @@ Installs and manages [`mod_alias`][]. #### Class: `apache::mod::disk_cache` -Installs and configures [`mod_disk_cache`][] on Apache 2.2, or [`mod_cache_disk`][] on Apache 2.4. The default cache root depends on the Apache version and operating system: +Installs and configures [`mod_disk_cache`][] on Apache 2.2, or [`mod_cache_disk`][] on Apache 2.4. The default cache root depends on the Apache version and operating system: - **Debian**: `/var/cache/apache2/mod_cache_disk` - **FreeBSD**: `/var/cache/mod_cache_disk` @@ -1476,6 +1481,23 @@ Installs and manages [`mod_info`][], which provides a comprehensive overview of - `apache_version`: Default: `$::apache::apache_version`, - `restrict_access`: Determines whether to enable access restrictions. If 'false', the `allow_from` whitelist is ignored and any IP address can access `/server-info`. Valid options: Boolean. Default: 'true'. +##### Class: `apache::mod::ldap` + +Installs and configures [`mod_ldap`][]. Allows you to modify the +[`LDAPTrustedGlobalCert`](https://httpd.apache.org/docs/2.2/mod/mod_ldap.html#ldaptrustedglobalcert) Directive: + +~~~puppet +class { 'apache::mod::ldap': + ldap_trusted_global_cert_file => '/etc/pki/tls/certs/ldap-trust.crt' + ldap_trusted_global_cert_type => 'CA_DER', +} +~~~ + +**Parameters within `apache::mod::ldap`:** + +- `ldap_trusted_global_cert_file`: Path and file name of the trusted CA certificates to use when establishing SSL or TLS connections to an LDAP server. +- `ldap_trusted_global_cert_type`: The global trust certificate format. Defaults to 'CA_BASE64'. + ##### Class: `apache::mod::negotiation` Installs and configures [`mod_negotiation`][]. @@ -1491,7 +1513,7 @@ Installs and manages [`mod_pagespeed`], a Google module that rewrites web pages While this Apache module requires the `mod-pagespeed-stable` package, Puppet **doesn't** manage the software repositories required to automatically install the package. If you declare this class when the package is either not installed or not available to your package manager, your Puppet run will fail. -**Parameters within `apache::mod::info`**: +**Parameters within `apache::mod::pagespeed`**: - `inherit_vhost_config`: Default: 'on'. - `filter_xhtml`: Default: false. @@ -1525,7 +1547,7 @@ While this Apache module requires the `mod-pagespeed-stable` package, Puppet **d - `allow_pagespeed_console`: Default: []. - `allow_pagespeed_message`: Default: []. - `message_buffer_size`: Default: 100000. -- `additional_configuration`: Default: { }. +- `additional_configuration`: Default: { }. A hash of directive/value pairs or an array of lines to insert at the end of the pagespeed configuration. The class's parameters correspond to the module's directives. See the [module's documentation][`mod_pagespeed`] for details. @@ -2084,10 +2106,6 @@ Specifies the service name that will be used by Apache for authentication. Corre This option enables credential saving functionality. Default is 'off' -##### `limit_request_field_size` - -[Limits](http://httpd.apache.org/docs/2.4/mod/core.html#limitrequestfieldsize) the size of the HTTP request header allowed from the client. Default is 'undef'. - ##### `logroot` Specifies the location of the virtual host's logfiles. Defaults to '/var/log//'. @@ -3167,6 +3185,15 @@ Sets the [SSLProxyMachineCertificateFile](http://httpd.apache.org/docs/current/m } ~~~ +##### `ssl_proxy_check_peer_cn` + +Sets the [SSLProxyMachinePeerCN](http://httpd.apache.org/docs/current/mod/mod_ssl.html#sslproxycheckpeercn) directive, which specified whether the remote server certificate's CN field is compared against the hostname of the request URL . Defaults to 'undef'. + + +##### `ssl_proxy_check_peer_name` + +Sets the [SSLProxyMachinePeerName](http://httpd.apache.org/docs/current/mod/mod_ssl.html#sslproxycheckpeername) directive, which specified whether the remote server certificate's CN field is compared against the hostname of the request URL . Defaults to 'undef'. + ##### `ssl_options` Sets the [SSLOptions](http://httpd.apache.org/docs/current/mod/mod_ssl.html#ssloptions) directive, which configures various SSL engine run-time options. This is the global setting for the given vhost and can be a string or an array. Defaults to 'undef'. @@ -3201,6 +3228,8 @@ Specifies whether or not to use [SSLProxyEngine](http://httpd.apache.org/docs/cu This type is intended for use with mod_fastcgi. It allows you to define one or more external FastCGI servers to handle specific file types. +** Note ** If using Ubuntu 10.04+, you'll need to manually enable the multiverse repository. + Ex: ~~~ puppet @@ -3248,6 +3277,26 @@ A unique alias. This is used internally to link the action with the FastCGI serv The MIME-type of the file to be processed by the FastCGI server. +#### Define: `apache::vhost::custom` + +The `apache::vhost::custom` is a thin wrapper to the `apache::custom_config`` +define. We are simply overriding some of the default settings specifc to the +vhost directory in Apache. + +**Parameters within `apache::vhost::custom`**: + +##### `content` + +Sets the configuration file's content. + +##### `ensure` + +Specifies if the vhost file is present or absent. Defaults to 'present'. + +##### `priority` + +Sets the relative load-order for Apache HTTPD VirtualHost configuration files. Defaults to '25'. + ### Private Defines #### Define: `apache::peruser::multiplexer` diff --git a/apache/manifests/init.pp b/apache/manifests/init.pp index 5a865b761..bb50d0b2e 100644 --- a/apache/manifests/init.pp +++ b/apache/manifests/init.pp @@ -63,6 +63,7 @@ $keepalive = $::apache::params::keepalive, $keepalive_timeout = $::apache::params::keepalive_timeout, $max_keepalive_requests = $::apache::params::max_keepalive_requests, + $limitreqfieldsize = '8190', $logroot = $::apache::params::logroot, $logroot_mode = $::apache::params::logroot_mode, $log_level = $::apache::params::log_level, @@ -92,7 +93,7 @@ default => '(event|itk|prefork|worker)' } - if $mpm_module { + if $mpm_module and $mpm_module != 'false' { # lint:ignore:quoted_booleans validate_re($mpm_module, $valid_mpms_re) } @@ -346,7 +347,7 @@ class { '::apache::default_confd_files': all => $default_confd_files } - if $mpm_module { + if $mpm_module and $mpm_module != 'false' { # lint:ignore:quoted_booleans class { "::apache::mod::${mpm_module}": } } diff --git a/apache/manifests/mod/itk.pp b/apache/manifests/mod/itk.pp index 2be7d832d..990f853ea 100644 --- a/apache/manifests/mod/itk.pp +++ b/apache/manifests/mod/itk.pp @@ -17,6 +17,17 @@ if defined(Class['apache::mod::prefork']) { fail('May not include both apache::mod::itk and apache::mod::prefork on the same node') } + } else { + # prefork is a requirement for itk in 2.4; except on FreeBSD and Gentoo, which are special + if $::osfamily =~ /^(FreeBSD|Gentoo)/ { + if defined(Class['apache::mod::prefork']) { + fail('May not include both apache::mod::itk and apache::mod::prefork on the same node') + } + } else { + if ! defined(Class['apache::mod::prefork']) { + fail('apache::mod::prefork is a prerequisite for apache::mod::itk, please arrange for it to be included.') + } + } } if defined(Class['apache::mod::worker']) { fail('May not include both apache::mod::itk and apache::mod::worker on the same node') diff --git a/apache/manifests/mod/ldap.pp b/apache/manifests/mod/ldap.pp index fbd56d539..d08418671 100644 --- a/apache/manifests/mod/ldap.pp +++ b/apache/manifests/mod/ldap.pp @@ -1,6 +1,11 @@ class apache::mod::ldap ( - $apache_version = $::apache::apache_version, + $apache_version = $::apache::apache_version, + $ldap_trusted_global_cert_file = undef, + $ldap_trusted_global_cert_type = 'CA_BASE64', ){ + if ($ldap_trusted_global_cert_file) { + validate_string($ldap_trusted_global_cert_type) + } ::apache::mod { 'ldap': } # Template uses $apache_version file { 'ldap.conf': diff --git a/apache/manifests/mpm.pp b/apache/manifests/mpm.pp index 9e7734945..153540910 100644 --- a/apache/manifests/mpm.pp +++ b/apache/manifests/mpm.pp @@ -47,17 +47,34 @@ } if $mpm == 'itk' { - file { "${lib_path}/mod_mpm_itk.so": - ensure => link, - target => "${lib_path}/mpm_itk.so" - } + file { "${lib_path}/mod_mpm_itk.so": + ensure => link, + target => "${lib_path}/mpm_itk.so", + require => Package['httpd'], + before => Class['apache::service'], + } } } - if versioncmp($apache_version, '2.4') < 0 { + if $mpm == 'itk' and $::operatingsystem == 'Ubuntu' and $::operatingsystemrelease == '14.04' { + # workaround https://bugs.launchpad.net/ubuntu/+source/mpm-itk/+bug/1286882 + exec { + '/usr/sbin/a2dismod mpm_event': + onlyif => '/usr/bin/test -e /etc/apache2/mods-enabled/mpm_event.load', + require => Package['httpd'], + before => Package['apache2-mpm-itk'], + } + } + + if versioncmp($apache_version, '2.4') < 0 or $mpm == 'itk' { package { "apache2-mpm-${mpm}": ensure => present, } + if $::apache::mod_enable_dir { + Package["apache2-mpm-${mpm}"] { + before => File[$::apache::mod_enable_dir], + } + } } } 'freebsd': { @@ -65,6 +82,9 @@ mpm_module => $mpm } } + 'gentoo': { + # so we don't fail + } 'redhat': { # so we don't fail } @@ -87,10 +107,10 @@ } if $mpm == 'itk' { - file { "${lib_path}/mod_mpm_itk.so": - ensure => link, - target => "${lib_path}/mpm_itk.so" - } + file { "${lib_path}/mod_mpm_itk.so": + ensure => link, + target => "${lib_path}/mpm_itk.so" + } } } diff --git a/apache/manifests/vhost.pp b/apache/manifests/vhost.pp index e2725b8ae..7e48317a8 100644 --- a/apache/manifests/vhost.pp +++ b/apache/manifests/vhost.pp @@ -25,6 +25,8 @@ $ssl_honorcipherorder = undef, $ssl_verify_client = undef, $ssl_verify_depth = undef, + $ssl_proxy_check_peer_cn = undef, + $ssl_proxy_check_peer_name = undef, $ssl_proxy_machine_cert = undef, $ssl_options = undef, $ssl_openssl_conf_cmd = undef, @@ -134,7 +136,6 @@ $krb_verify_kdc = 'on', $krb_servicename = 'HTTP', $krb_save_credentials = 'off', - $limit_request_field_size = undef, ) { # The base class must be included first because it is used by parameter defaults if ! defined(Class['apache']) { @@ -227,15 +228,19 @@ validate_bool($auth_kerb) - if $limit_request_field_size { - validate_integer($limit_request_field_size) - } - # Validate the docroot as a string if: # - $manage_docroot is true if $manage_docroot { validate_string($docroot) } + + if $ssl_proxy_check_peer_cn { + validate_re($ssl_proxy_check_peer_cn,'(^on$|^off$)',"${ssl_proxy_check_peer_cn} is not permitted for ssl_proxy_check_peer_cn. Allowed values are 'on' or 'off'.") + } + if $ssl_proxy_check_peer_name { + validate_re($ssl_proxy_check_peer_name,'(^on$|^off$)',"${ssl_proxy_check_peer_name} is not permitted for ssl_proxy_check_peer_name. Allowed values are 'on' or 'off'.") + } + # Input validation ends if $ssl and $ensure == 'present' { @@ -497,6 +502,8 @@ require => Package['httpd'], notify => Class['apache::service'], } + # NOTE(pabelanger): This code is duplicated in ::apache::vhost::custom and + # needs to be converted into something generic. if $::apache::vhost_enable_dir { $vhost_enable_dir = $::apache::vhost_enable_dir $vhost_symlink_ensure = $ensure ? { @@ -778,13 +785,11 @@ # - $ssl_crl_path # - $ssl_crl # - $ssl_crl_check - # - $ssl_proxyengine # - $ssl_protocol # - $ssl_cipher # - $ssl_honorcipherorder # - $ssl_verify_client # - $ssl_verify_depth - # - $ssl_proxy_machine_cert # - $ssl_options # - $ssl_openssl_conf_cmd # - $apache_version @@ -796,6 +801,19 @@ } } + # Template uses: + # - $ssl_proxyengine + # - $ssl_proxy_check_peer_cn + # - $ssl_proxy_check_peer_name + # - $ssl_proxy_machine_cert + if $ssl_proxyengine { + concat::fragment { "${name}-sslproxy": + target => "${priority_real}${filename}.conf", + order => 210, + content => template('apache/vhost/_sslproxy.erb'), + } + } + # Template uses: # - $auth_kerb # - $krb_method_negotiate @@ -963,15 +981,6 @@ content => template('apache/vhost/_filters.erb'), } } - # Template uses: - # - $limit_request_field_size - if $limit_request_field_size { - concat::fragment { "${name}-limits": - target => "${priority_real}${filename}.conf", - order => 330, - content => template('apache/vhost/_limits.erb'), - } - } # Template uses no variables concat::fragment { "${name}-file_footer": diff --git a/apache/manifests/vhost/custom.pp b/apache/manifests/vhost/custom.pp new file mode 100644 index 000000000..d85e4d091 --- /dev/null +++ b/apache/manifests/vhost/custom.pp @@ -0,0 +1,37 @@ +# See README.md for usage information +define apache::vhost::custom( + $content, + $ensure = 'present', + $priority = '25', +) { + include ::apache + + ## Apache include does not always work with spaces in the filename + $filename = regsubst($name, ' ', '_', 'G') + + ::apache::custom_config { $filename: + ensure => $ensure, + confdir => $::apache::vhost_dir, + content => $content, + priority => $priority, + } + + # NOTE(pabelanger): This code is duplicated in ::apache::vhost and needs to + # converted into something generic. + if $::apache::vhost_enable_dir { + $vhost_symlink_ensure = $ensure ? { + present => link, + default => $ensure, + } + + file { "${priority}-${filename}.conf symlink": + ensure => $vhost_symlink_ensure, + path => "${::apache::vhost_enable_dir}/${priority}-${filename}.conf", + target => "${::apache::vhost_dir}/${priority}-${filename}.conf", + owner => 'root', + group => $::apache::params::root_group, + mode => '0644', + require => Apache::Custom_config[$filename], + } + } +} diff --git a/apache/metadata.json b/apache/metadata.json index f06e6d529..85e688949 100644 --- a/apache/metadata.json +++ b/apache/metadata.json @@ -1,6 +1,6 @@ { "name": "puppetlabs-apache", - "version": "1.7.0", + "version": "1.7.1", "author": "puppetlabs", "summary": "Installs, configures, and manages Apache virtual hosts, web services, and modules.", "license": "Apache-2.0", @@ -68,7 +68,7 @@ "requirements": [ { "name": "pe", - "version_requirement": ">= 3.7.0 < 2015.3.0" + "version_requirement": ">= 3.7.0 < 2015.4.0" }, { "name": "puppet", diff --git a/apache/spec/acceptance/apache_parameters_spec.rb b/apache/spec/acceptance/apache_parameters_spec.rb index c46a22043..00e486fb6 100644 --- a/apache/spec/acceptance/apache_parameters_spec.rb +++ b/apache/spec/acceptance/apache_parameters_spec.rb @@ -1,7 +1,7 @@ require 'spec_helper_acceptance' require_relative './version.rb' -describe 'apache parameters', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache parameters' do # Currently this test only does something on FreeBSD. describe 'default_confd_files => false' do @@ -354,6 +354,20 @@ class { 'apache': end end + describe 'limitrequestfieldsize' do + describe 'setup' do + it 'applies cleanly' do + pp = "class { 'apache': limitreqfieldsize => '16830' }" + apply_manifest(pp, :catch_failures => true) + end + end + + describe file($conf_file) do + it { is_expected.to be_file } + it { is_expected.to contain 'LimitRequestFieldSize 16830' } + end + end + describe 'logging' do describe 'setup' do it 'applies cleanly' do diff --git a/apache/spec/acceptance/apache_ssl_spec.rb b/apache/spec/acceptance/apache_ssl_spec.rb index f8023fa24..ccf65c727 100644 --- a/apache/spec/acceptance/apache_ssl_spec.rb +++ b/apache/spec/acceptance/apache_ssl_spec.rb @@ -8,7 +8,7 @@ vhostd = '/etc/apache2/sites-available' end -describe 'apache ssl', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache ssl' do describe 'ssl parameters' do it 'runs without error' do diff --git a/apache/spec/acceptance/class_spec.rb b/apache/spec/acceptance/class_spec.rb index 0cf1c36f9..47b0d36fa 100644 --- a/apache/spec/acceptance/class_spec.rb +++ b/apache/spec/acceptance/class_spec.rb @@ -1,6 +1,6 @@ require 'spec_helper_acceptance' -describe 'apache class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache class' do case fact('osfamily') when 'RedHat' package_name = 'httpd' diff --git a/apache/spec/acceptance/custom_config_spec.rb b/apache/spec/acceptance/custom_config_spec.rb index 683e6b648..fe73d111b 100644 --- a/apache/spec/acceptance/custom_config_spec.rb +++ b/apache/spec/acceptance/custom_config_spec.rb @@ -1,7 +1,7 @@ require 'spec_helper_acceptance' require_relative './version.rb' -describe 'apache::custom_config define', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::custom_config define' do context 'invalid config' do it 'should not add the config' do pp = <<-EOS diff --git a/apache/spec/acceptance/default_mods_spec.rb b/apache/spec/acceptance/default_mods_spec.rb index 0199f180b..c2d6a8c26 100644 --- a/apache/spec/acceptance/default_mods_spec.rb +++ b/apache/spec/acceptance/default_mods_spec.rb @@ -15,7 +15,7 @@ servicename = 'apache2' end -describe 'apache::default_mods class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::default_mods class' do describe 'no default mods' do # Using puppet_apply as a helper it 'should apply with no errors' do diff --git a/apache/spec/acceptance/itk_spec.rb b/apache/spec/acceptance/itk_spec.rb index 2dde8f407..c220a70f8 100644 --- a/apache/spec/acceptance/itk_spec.rb +++ b/apache/spec/acceptance/itk_spec.rb @@ -3,26 +3,41 @@ case fact('osfamily') when 'Debian' service_name = 'apache2' + majrelease = fact('operatingsystemmajrelease') + if [ '6', '7', '10.04', '12.04'].include?(majrelease) + variant = :itk_only + else + variant = :prefork + end when 'FreeBSD' service_name = 'apache24' -else - # Not implemented yet - service_name = :skip + majrelease = fact('operatingsystemmajrelease') + variant = :prefork end -describe 'apache::mod::itk class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) or service_name.equal? :skip do +describe 'apache::mod::itk class', :if => service_name do describe 'running puppet code' do # Using puppet_apply as a helper it 'should work with no errors' do - pp = <<-EOS - class { 'apache': - mpm_module => 'itk', - } - EOS + pp = case variant + when :prefork + <<-EOS + class { 'apache': + mpm_module => 'prefork', + } + class { 'apache::mod::itk': } + EOS + when :itk_only + <<-EOS + class { 'apache': + mpm_module => 'itk', + } + EOS + end # Run it twice and test for idempotency apply_manifest(pp, :catch_failures => true) - expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + apply_manifest(pp, :catch_changes => true) end end diff --git a/apache/spec/acceptance/mod_fcgid_spec.rb b/apache/spec/acceptance/mod_fcgid_spec.rb index e99a7f299..ce3b5b5b2 100644 --- a/apache/spec/acceptance/mod_fcgid_spec.rb +++ b/apache/spec/acceptance/mod_fcgid_spec.rb @@ -1,7 +1,7 @@ require 'spec_helper_acceptance' -describe 'apache::mod::fcgid class', :unless => (UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) or (fact('operatingsystem') == 'OracleLinux' and fact('operatingsystemmajrelease') == '7')) do - context "default fcgid config", :if => (fact('osfamily') == 'RedHat' and fact('operatingsystemmajrelease') != '5') do +describe 'apache::mod::fcgid class', :if => ((fact('osfamily') == 'RedHat' and fact('operatingsystemmajrelease') != '5') and !(fact('operatingsystem') == 'OracleLinux' and fact('operatingsystemmajrelease') == '7')) do + context "default fcgid config" do it 'succeeds in puppeting fcgid' do pp = <<-EOS class { 'epel': } # mod_fcgid lives in epel diff --git a/apache/spec/acceptance/mod_mime_spec.rb b/apache/spec/acceptance/mod_mime_spec.rb index 89b3c800f..e47360b5e 100644 --- a/apache/spec/acceptance/mod_mime_spec.rb +++ b/apache/spec/acceptance/mod_mime_spec.rb @@ -1,6 +1,6 @@ require 'spec_helper_acceptance' -describe 'apache::mod::mime class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::mod::mime class' do case fact('osfamily') when 'Debian' mod_dir = '/etc/apache2/mods-available' diff --git a/apache/spec/acceptance/mod_negotiation_spec.rb b/apache/spec/acceptance/mod_negotiation_spec.rb index a44edbf26..48eb896b6 100644 --- a/apache/spec/acceptance/mod_negotiation_spec.rb +++ b/apache/spec/acceptance/mod_negotiation_spec.rb @@ -1,6 +1,6 @@ require 'spec_helper_acceptance' -describe 'apache::mod::negotiation class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::mod::negotiation class' do case fact('osfamily') when 'Debian' vhost_dir = '/etc/apache2/sites-enabled' diff --git a/apache/spec/acceptance/mod_pagespeed_spec.rb b/apache/spec/acceptance/mod_pagespeed_spec.rb index 5fa342e9a..f8060a167 100644 --- a/apache/spec/acceptance/mod_pagespeed_spec.rb +++ b/apache/spec/acceptance/mod_pagespeed_spec.rb @@ -1,6 +1,6 @@ require 'spec_helper_acceptance' -describe 'apache::mod::pagespeed class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::mod::pagespeed class' do case fact('osfamily') when 'Debian' vhost_dir = '/etc/apache2/sites-enabled' diff --git a/apache/spec/acceptance/mod_passenger_spec.rb b/apache/spec/acceptance/mod_passenger_spec.rb index 5af9113e5..df9cd9e13 100644 --- a/apache/spec/acceptance/mod_passenger_spec.rb +++ b/apache/spec/acceptance/mod_passenger_spec.rb @@ -1,6 +1,6 @@ require 'spec_helper_acceptance' -describe 'apache::mod::passenger class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::mod::passenger class' do case fact('osfamily') when 'Debian' service_name = 'apache2' diff --git a/apache/spec/acceptance/mod_php_spec.rb b/apache/spec/acceptance/mod_php_spec.rb index 502ec1ddc..a42f52373 100644 --- a/apache/spec/acceptance/mod_php_spec.rb +++ b/apache/spec/acceptance/mod_php_spec.rb @@ -1,6 +1,6 @@ require 'spec_helper_acceptance' -describe 'apache::mod::php class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::mod::php class' do case fact('osfamily') when 'Debian' vhost_dir = '/etc/apache2/sites-enabled' diff --git a/apache/spec/acceptance/mod_proxy_html_spec.rb b/apache/spec/acceptance/mod_proxy_html_spec.rb index e45600089..840ea563f 100644 --- a/apache/spec/acceptance/mod_proxy_html_spec.rb +++ b/apache/spec/acceptance/mod_proxy_html_spec.rb @@ -1,6 +1,6 @@ require 'spec_helper_acceptance' -describe 'apache::mod::proxy_html class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::mod::proxy_html class' do case fact('osfamily') when 'Debian' service_name = 'apache2' diff --git a/apache/spec/acceptance/mod_security_spec.rb b/apache/spec/acceptance/mod_security_spec.rb index 67ad7d5b8..4fcf0f551 100644 --- a/apache/spec/acceptance/mod_security_spec.rb +++ b/apache/spec/acceptance/mod_security_spec.rb @@ -1,6 +1,6 @@ require 'spec_helper_acceptance' -describe 'apache::mod::security class', :unless => (UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) or (fact('osfamily') == 'Debian' and (fact('lsbdistcodename') == 'squeeze' or fact('lsbdistcodename') == 'lucid' or fact('lsbdistcodename') == 'precise' or fact('lsbdistcodename') == 'wheezy'))) do +describe 'apache::mod::security class', :unless => (fact('osfamily') == 'Debian' and (fact('lsbdistcodename') == 'squeeze' or fact('lsbdistcodename') == 'lucid' or fact('lsbdistcodename') == 'precise' or fact('lsbdistcodename') == 'wheezy')) do case fact('osfamily') when 'Debian' mod_dir = '/etc/apache2/mods-available' diff --git a/apache/spec/acceptance/mod_suphp_spec.rb b/apache/spec/acceptance/mod_suphp_spec.rb index 33f57fba6..da2c6042e 100644 --- a/apache/spec/acceptance/mod_suphp_spec.rb +++ b/apache/spec/acceptance/mod_suphp_spec.rb @@ -1,11 +1,9 @@ require 'spec_helper_acceptance' -describe 'apache::mod::suphp class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do - case fact('operatingsystem') - when 'Ubuntu' - context "default suphp config" do - it 'succeeds in puppeting suphp' do - pp = <<-EOS +describe 'apache::mod::suphp class', :if => fact('operatingsystem') == 'Ubuntu' do + context "default suphp config" do + it 'succeeds in puppeting suphp' do + pp = <<-EOS class { 'apache': mpm_module => 'prefork', } @@ -24,32 +22,32 @@ class { 'apache': } class { 'apache::mod::php': } class { 'apache::mod::suphp': } - EOS - apply_manifest(pp, :catch_failures => true) - end + EOS + apply_manifest(pp, :catch_failures => true) + end - describe service('apache2') do - it { is_expected.to be_enabled } - it { is_expected.to be_running } - end + describe service('apache2') do + it { is_expected.to be_enabled } + it { is_expected.to be_running } + end - it 'should answer to suphp.example.com' do - timeout = 0 - loop do - r = shell('curl suphp.example.com:80') - timeout += 1 - break if r.stdout =~ /^daemon$/ - if timeout > 40 - expect(timeout < 40).to be true - break - end - sleep(1) - end - shell("/usr/bin/curl suphp.example.com:80") do |r| - expect(r.stdout).to match(/^daemon$/) - expect(r.exit_code).to eq(0) - end + it 'should answer to suphp.example.com' do + timeout = 0 + loop do + r = shell('curl suphp.example.com:80') + timeout += 1 + break if r.stdout =~ /^daemon$/ + if timeout > 40 + expect(timeout < 40).to be true + break end + sleep(1) end + shell("/usr/bin/curl suphp.example.com:80") do |r| + expect(r.stdout).to match(/^daemon$/) + expect(r.exit_code).to eq(0) + end + end + end end diff --git a/apache/spec/acceptance/prefork_worker_spec.rb b/apache/spec/acceptance/prefork_worker_spec.rb index 0ac27232d..234b6acef 100644 --- a/apache/spec/acceptance/prefork_worker_spec.rb +++ b/apache/spec/acceptance/prefork_worker_spec.rb @@ -36,7 +36,7 @@ class { 'apache': end end -describe 'apache::mod::worker class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::mod::worker class' do describe 'running puppet code' do # Using puppet_apply as a helper it 'should work with no errors' do @@ -58,7 +58,7 @@ class { 'apache': end end -describe 'apache::mod::prefork class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::mod::prefork class' do describe 'running puppet code' do # Using puppet_apply as a helper it 'should work with no errors' do diff --git a/apache/spec/acceptance/service_spec.rb b/apache/spec/acceptance/service_spec.rb index b51ca386f..c3124c846 100644 --- a/apache/spec/acceptance/service_spec.rb +++ b/apache/spec/acceptance/service_spec.rb @@ -1,6 +1,6 @@ require 'spec_helper_acceptance' -describe 'apache::service class', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::service class' do describe 'adding dependencies in between the base class and service class' do it 'should work with no errors' do pp = <<-EOS diff --git a/apache/spec/acceptance/unsupported_spec.rb b/apache/spec/acceptance/unsupported_spec.rb deleted file mode 100644 index 085845dbf..000000000 --- a/apache/spec/acceptance/unsupported_spec.rb +++ /dev/null @@ -1,13 +0,0 @@ -require 'spec_helper_acceptance' - -describe 'unsupported distributions and OSes', :if => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do - it 'should fail' do - pp = <<-EOS - class { 'apache': } - apache::vhost { 'test.lan': - docroot => '/var/www', - } - EOS - expect(apply_manifest(pp, :expect_failures => true).stderr).to match(/unsupported/i) - end -end diff --git a/apache/spec/acceptance/vhost_spec.rb b/apache/spec/acceptance/vhost_spec.rb index 5fa91d5dc..a51ab5822 100644 --- a/apache/spec/acceptance/vhost_spec.rb +++ b/apache/spec/acceptance/vhost_spec.rb @@ -1,7 +1,7 @@ require 'spec_helper_acceptance' require_relative './version.rb' -describe 'apache::vhost define', :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do +describe 'apache::vhost define' do context 'no default vhosts' do it 'should create no default vhosts' do pp = <<-EOS @@ -202,8 +202,8 @@ class { 'apache': ip_based => true, docroot => '/var/www/html', } - host { 'ipv4.example.com': ip => '127.0.0.1', } - host { 'ipv6.example.com': ip => '127.0.0.2', } + host { 'host1.example.com': ip => '127.0.0.1', } + host { 'host2.example.com': ip => '127.0.0.2', } file { '/var/www/html/index.html': ensure => file, content => "Hello from vhost\\n", @@ -230,20 +230,20 @@ class { 'apache': it { is_expected.not_to contain 'NameVirtualHost 127.0.0.2:80' } end - it 'should answer to ipv4.example.com' do - shell("/usr/bin/curl ipv4.example.com:80", {:acceptable_exit_codes => 0}) do |r| + it 'should answer to host1.example.com' do + shell("/usr/bin/curl host1.example.com:80", {:acceptable_exit_codes => 0}) do |r| expect(r.stdout).to eq("Hello from vhost\n") end end - it 'should answer to ipv6.example.com' do - shell("/usr/bin/curl ipv6.example.com:80", {:acceptable_exit_codes => 0}) do |r| + it 'should answer to host2.example.com' do + shell("/usr/bin/curl host2.example.com:80", {:acceptable_exit_codes => 0}) do |r| expect(r.stdout).to eq("Hello from vhost\n") end end end - context 'new vhost with IPv6 address on port 80' do + context 'new vhost with IPv6 address on port 80', :ipv6 do it 'should configure one apache vhost with an ipv6 address' do pp = <<-EOS class { 'apache': @@ -505,8 +505,8 @@ class { 'apache': } case fact('lsbdistcodename') when 'precise', 'wheezy' - context 'vhost fallbackresource example' do - it 'should configure a vhost with Fallbackresource' do + context 'vhost FallbackResource example' do + it 'should configure a vhost with FallbackResource' do pp = <<-EOS class { 'apache': } apache::vhost { 'fallback.example.net': @@ -1232,54 +1232,58 @@ class { 'apache': } end describe 'wsgi' do - it 'import_script applies cleanly' do - pp = <<-EOS - class { 'apache': } - class { 'apache::mod::wsgi': } - host { 'test.server': ip => '127.0.0.1' } - apache::vhost { 'test.server': - docroot => '/tmp', - wsgi_application_group => '%{GLOBAL}', - wsgi_daemon_process => 'wsgi', - wsgi_daemon_process_options => {processes => '2'}, - wsgi_process_group => 'nobody', - wsgi_script_aliases => { '/test' => '/test1' }, - wsgi_pass_authorization => 'On', - } - EOS - apply_manifest(pp, :catch_failures => true) + context 'on lucid', :if => fact('lsbdistcodename') == 'lucid' do + it 'import_script applies cleanly' do + pp = <<-EOS + class { 'apache': } + class { 'apache::mod::wsgi': } + host { 'test.server': ip => '127.0.0.1' } + apache::vhost { 'test.server': + docroot => '/tmp', + wsgi_application_group => '%{GLOBAL}', + wsgi_daemon_process => 'wsgi', + wsgi_daemon_process_options => {processes => '2'}, + wsgi_process_group => 'nobody', + wsgi_script_aliases => { '/test' => '/test1' }, + wsgi_pass_authorization => 'On', + } + EOS + apply_manifest(pp, :catch_failures => true) + end end - it 'import_script applies cleanly', :unless => (fact('lsbdistcodename') == 'lucid' or UNSUPPORTED_PLATFORMS.include?(fact('osfamily'))) do - pp = <<-EOS - class { 'apache': } - class { 'apache::mod::wsgi': } - host { 'test.server': ip => '127.0.0.1' } - apache::vhost { 'test.server': - docroot => '/tmp', - wsgi_application_group => '%{GLOBAL}', - wsgi_daemon_process => 'wsgi', - wsgi_daemon_process_options => {processes => '2'}, - wsgi_import_script => '/test1', - wsgi_import_script_options => { application-group => '%{GLOBAL}', process-group => 'wsgi' }, - wsgi_process_group => 'nobody', - wsgi_script_aliases => { '/test' => '/test1' }, - wsgi_pass_authorization => 'On', - wsgi_chunked_request => 'On', - } - EOS - apply_manifest(pp, :catch_failures => true) - end + context 'on everything but lucid', :unless => fact('lsbdistcodename') == 'lucid' do + it 'import_script applies cleanly' do + pp = <<-EOS + class { 'apache': } + class { 'apache::mod::wsgi': } + host { 'test.server': ip => '127.0.0.1' } + apache::vhost { 'test.server': + docroot => '/tmp', + wsgi_application_group => '%{GLOBAL}', + wsgi_daemon_process => 'wsgi', + wsgi_daemon_process_options => {processes => '2'}, + wsgi_import_script => '/test1', + wsgi_import_script_options => { application-group => '%{GLOBAL}', process-group => 'wsgi' }, + wsgi_process_group => 'nobody', + wsgi_script_aliases => { '/test' => '/test1' }, + wsgi_pass_authorization => 'On', + wsgi_chunked_request => 'On', + } + EOS + apply_manifest(pp, :catch_failures => true) + end - describe file("#{$vhost_dir}/25-test.server.conf"), :unless => (fact('lsbdistcodename') == 'lucid' or UNSUPPORTED_PLATFORMS.include?(fact('osfamily'))) do - it { is_expected.to be_file } - it { is_expected.to contain 'WSGIApplicationGroup %{GLOBAL}' } - it { is_expected.to contain 'WSGIDaemonProcess wsgi processes=2' } - it { is_expected.to contain 'WSGIImportScript /test1 application-group=%{GLOBAL} process-group=wsgi' } - it { is_expected.to contain 'WSGIProcessGroup nobody' } - it { is_expected.to contain 'WSGIScriptAlias /test "/test1"' } - it { is_expected.to contain 'WSGIPassAuthorization On' } - it { is_expected.to contain 'WSGIChunkedRequest On' } + describe file("#{$vhost_dir}/25-test.server.conf") do + it { is_expected.to be_file } + it { is_expected.to contain 'WSGIApplicationGroup %{GLOBAL}' } + it { is_expected.to contain 'WSGIDaemonProcess wsgi processes=2' } + it { is_expected.to contain 'WSGIImportScript /test1 application-group=%{GLOBAL} process-group=wsgi' } + it { is_expected.to contain 'WSGIProcessGroup nobody' } + it { is_expected.to contain 'WSGIScriptAlias /test "/test1"' } + it { is_expected.to contain 'WSGIPassAuthorization On' } + it { is_expected.to contain 'WSGIChunkedRequest On' } + end end end @@ -1326,6 +1330,13 @@ class { 'apache': } describe 'fastcgi' do it 'applies cleanly' do pp = <<-EOS + if ($::operatingsystem == 'Ubuntu' and versioncpm($::operatingsystemrelease, '10.04' >= 0)) { + include ::apt + apt::ppa { 'multiverse': + before => Class['Apache::Mod::Fastcgi'], + } + } + class { 'apache': } class { 'apache::mod::fastcgi': } host { 'test.server': ip => '127.0.0.1' } diff --git a/apache/spec/classes/dev_spec.rb b/apache/spec/classes/dev_spec.rb index eb3d76593..933d67703 100644 --- a/apache/spec/classes/dev_spec.rb +++ b/apache/spec/classes/dev_spec.rb @@ -77,7 +77,6 @@ :osfamily => 'Gentoo', :operatingsystem => 'Gentoo', :operatingsystemrelease => '3.16.1-gentoo', - :concat_basedir => '/dne', :is_pe => false, :concat_basedir => '/foo', :id => 'root', diff --git a/apache/spec/classes/mod/itk_spec.rb b/apache/spec/classes/mod/itk_spec.rb index 164440876..cd2e6c67a 100644 --- a/apache/spec/classes/mod/itk_spec.rb +++ b/apache/spec/classes/mod/itk_spec.rb @@ -37,6 +37,10 @@ end context "with Apache version >= 2.4" do + let :pre_condition do + 'class { "apache": mpm_module => prefork, }' + end + let :params do { :apache_version => '2.4', @@ -52,6 +56,10 @@ end end context "on a FreeBSD OS" do + let :pre_condition do + 'class { "apache": mpm_module => false, }' + end + let :facts do { :osfamily => 'FreeBSD', diff --git a/apache/spec/classes/mod/ldap_spec.rb b/apache/spec/classes/mod/ldap_spec.rb new file mode 100644 index 000000000..2b82d8d1b --- /dev/null +++ b/apache/spec/classes/mod/ldap_spec.rb @@ -0,0 +1,78 @@ +require 'spec_helper' + +describe 'apache::mod::ldap', :type => :class do + let :pre_condition do + 'include apache' + end + + context "on a Debian OS" do + let :facts do + { + :lsbdistcodename => 'squeeze', + :osfamily => 'Debian', + :operatingsystemrelease => '6', + :concat_basedir => '/dne', + :id => 'root', + :kernel => 'Linux', + :operatingsystem => 'Debian', + :path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', + :is_pe => false, + } + end + it { is_expected.to contain_class("apache::params") } + it { is_expected.to contain_class("apache::mod::ldap") } + it { is_expected.to contain_apache__mod('ldap') } + + context 'default ldap_trusted_global_cert_file' do + it { is_expected.to contain_file('ldap.conf').without_content(/^LDAPTrustedGlobalCert/) } + end + + context 'ldap_trusted_global_cert_file param' do + let(:params) { { :ldap_trusted_global_cert_file => 'ca.pem' } } + it { is_expected.to contain_file('ldap.conf').with_content(/^LDAPTrustedGlobalCert CA_BASE64 ca\.pem$/) } + end + + context 'ldap_trusted_global_cert_file and ldap_trusted_global_cert_type params' do + let(:params) {{ + :ldap_trusted_global_cert_file => 'ca.pem', + :ldap_trusted_global_cert_type => 'CA_DER' + }} + it { is_expected.to contain_file('ldap.conf').with_content(/^LDAPTrustedGlobalCert CA_DER ca\.pem$/) } + end + end #Debian + + context "on a RedHat OS" do + let :facts do + { + :osfamily => 'RedHat', + :operatingsystemrelease => '6', + :concat_basedir => '/dne', + :id => 'root', + :kernel => 'Linux', + :operatingsystem => 'RedHat', + :path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', + :is_pe => false, + } + end + it { is_expected.to contain_class("apache::params") } + it { is_expected.to contain_class("apache::mod::ldap") } + it { is_expected.to contain_apache__mod('ldap') } + + context 'default ldap_trusted_global_cert_file' do + it { is_expected.to contain_file('ldap.conf').without_content(/^LDAPTrustedGlobalCert/) } + end + + context 'ldap_trusted_global_cert_file param' do + let(:params) { { :ldap_trusted_global_cert_file => 'ca.pem' } } + it { is_expected.to contain_file('ldap.conf').with_content(/^LDAPTrustedGlobalCert CA_BASE64 ca\.pem$/) } + end + + context 'ldap_trusted_global_cert_file and ldap_trusted_global_cert_type params' do + let(:params) {{ + :ldap_trusted_global_cert_file => 'ca.pem', + :ldap_trusted_global_cert_type => 'CA_DER' + }} + it { is_expected.to contain_file('ldap.conf').with_content(/^LDAPTrustedGlobalCert CA_DER ca\.pem$/) } + end + end # Redhat +end diff --git a/apache/spec/classes/mod/pagespeed_spec.rb b/apache/spec/classes/mod/pagespeed_spec.rb index c3f5a4144..44c60053e 100644 --- a/apache/spec/classes/mod/pagespeed_spec.rb +++ b/apache/spec/classes/mod/pagespeed_spec.rb @@ -21,7 +21,16 @@ it { is_expected.to contain_class("apache::params") } it { is_expected.to contain_apache__mod('pagespeed') } it { is_expected.to contain_package("mod-pagespeed-stable") } - it { is_expected.to contain_file('pagespeed.conf') } + + context "when setting additional_configuration to a Hash" do + let :params do { :additional_configuration => { 'Key' => 'Value' } } end + it { is_expected.to contain_file('pagespeed.conf').with_content /Key Value/ } + end + + context "when setting additional_configuration to an Array" do + let :params do { :additional_configuration => [ 'Key Value' ] } end + it { is_expected.to contain_file('pagespeed.conf').with_content /Key Value/ } + end end context "on a RedHat OS" do diff --git a/apache/spec/classes/mod/passenger_spec.rb b/apache/spec/classes/mod/passenger_spec.rb index c438241e0..34dcc5c6f 100644 --- a/apache/spec/classes/mod/passenger_spec.rb +++ b/apache/spec/classes/mod/passenger_spec.rb @@ -14,7 +14,6 @@ :lsbdistcodename => 'squeeze', :operatingsystem => 'Debian', :id => 'root', - :kernel => 'Linux', :path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', :is_pe => false, } diff --git a/apache/spec/classes/mod/php_spec.rb b/apache/spec/classes/mod/php_spec.rb index 246b3bbd4..6814b8a8f 100644 --- a/apache/spec/classes/mod/php_spec.rb +++ b/apache/spec/classes/mod/php_spec.rb @@ -88,7 +88,7 @@ let :params do { :extensions => ['.php','.php5']} end - it { is_expected.to contain_file("php5.conf").with_content(/AddHandler php5-script .php .php5\n/) } + it { is_expected.to contain_file("php5.conf").with_content(Regexp.new(Regexp.escape(''))) } end context "with specific version" do let :pre_condition do diff --git a/apache/spec/defines/vhost_custom_spec.rb b/apache/spec/defines/vhost_custom_spec.rb new file mode 100644 index 000000000..804be86b8 --- /dev/null +++ b/apache/spec/defines/vhost_custom_spec.rb @@ -0,0 +1,99 @@ +require 'spec_helper' + +describe 'apache::vhost::custom', :type => :define do + let :title do + 'rspec.example.com' + end + let :default_params do + { + :content => 'foobar' + } + end + describe 'os-dependent items' do + context "on RedHat based systems" do + let :default_facts do + { + :osfamily => 'RedHat', + :operatingsystemrelease => '6', + :operatingsystem => 'RedHat', + :concat_basedir => '/dne', + :id => 'root', + :kernel => 'Linux', + :path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', + :is_pe => false, + } + end + let :params do default_params end + let :facts do default_facts end + end + context "on Debian based systems" do + let :default_facts do + { + :osfamily => 'Debian', + :operatingsystemrelease => '6', + :lsbdistcodename => 'squeeze', + :operatingsystem => 'Debian', + :concat_basedir => '/dne', + :id => 'root', + :kernel => 'Linux', + :path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', + :is_pe => false, + } + end + let :params do default_params end + let :facts do default_facts end + it { is_expected.to contain_file("apache_rspec.example.com").with( + :ensure => 'present', + :content => 'foobar', + :path => '/etc/apache2/sites-available/25-rspec.example.com.conf', + ) } + it { is_expected.to contain_file("25-rspec.example.com.conf symlink").with( + :ensure => 'link', + :path => '/etc/apache2/sites-enabled/25-rspec.example.com.conf', + :target => '/etc/apache2/sites-available/25-rspec.example.com.conf' + ) } + end + context "on FreeBSD systems" do + let :default_facts do + { + :osfamily => 'FreeBSD', + :operatingsystemrelease => '9', + :operatingsystem => 'FreeBSD', + :concat_basedir => '/dne', + :id => 'root', + :kernel => 'FreeBSD', + :path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', + :is_pe => false, + } + end + let :params do default_params end + let :facts do default_facts end + it { is_expected.to contain_file("apache_rspec.example.com").with( + :ensure => 'present', + :content => 'foobar', + :path => '/usr/local/etc/apache24/Vhosts/25-rspec.example.com.conf', + ) } + end + context "on Gentoo systems" do + let :default_facts do + { + :osfamily => 'Gentoo', + :operatingsystem => 'Gentoo', + :operatingsystemrelease => '3.16.1-gentoo', + :concat_basedir => '/dne', + :id => 'root', + :kernel => 'Linux', + :path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin', + :is_pe => false, + } + end + let :params do default_params end + let :facts do default_facts end + it { is_expected.to contain_file("apache_rspec.example.com").with( + :ensure => 'present', + :content => 'foobar', + :path => '/etc/apache2/vhosts.d/25-rspec.example.com.conf', + ) } + end + end +end diff --git a/apache/spec/defines/vhost_spec.rb b/apache/spec/defines/vhost_spec.rb index 3f5347254..9dd563a9c 100644 --- a/apache/spec/defines/vhost_spec.rb +++ b/apache/spec/defines/vhost_spec.rb @@ -154,7 +154,10 @@ 'ssl_verify_depth' => '3', 'ssl_options' => '+ExportCertData', 'ssl_openssl_conf_cmd' => 'DHParameters "foo.pem"', + 'ssl_proxy_check_peer_cn' => 'on', + 'ssl_proxy_check_peer_name' => 'on', 'ssl_proxyengine' => true, + 'priority' => '30', 'default_vhost' => true, 'servername' => 'example.com', @@ -323,8 +326,7 @@ 'krb_authoritative' => 'off', 'krb_auth_realms' => ['EXAMPLE.ORG','EXAMPLE.NET'], 'krb_5keytab' => '/tmp/keytab5', - 'krb_local_user_mapping' => 'off', - 'limit_request_field_size' => '54321', + 'krb_local_user_mapping' => 'off' } end let :facts do @@ -432,6 +434,13 @@ it { is_expected.to contain_concat__fragment('rspec.example.com-ssl') } it { is_expected.to contain_concat__fragment('rspec.example.com-ssl').with( :content => /^\s+SSLOpenSSLConfCmd\s+DHParameters "foo.pem"$/ ) } + it { is_expected.to contain_concat__fragment('rspec.example.com-sslproxy') } + it { is_expected.to contain_concat__fragment('rspec.example.com-sslproxy').with( + :content => /^\s+SSLProxyEngine On$/ ) } + it { is_expected.to contain_concat__fragment('rspec.example.com-sslproxy').with( + :content => /^\s+SSLProxyCheckPeerCN\s+on$/ ) } + it { is_expected.to contain_concat__fragment('rspec.example.com-sslproxy').with( + :content => /^\s+SSLProxyCheckPeerName\s+on$/ ) } it { is_expected.to contain_concat__fragment('rspec.example.com-suphp') } it { is_expected.to contain_concat__fragment('rspec.example.com-php_admin') } it { is_expected.to contain_concat__fragment('rspec.example.com-header') } @@ -462,8 +471,6 @@ :content => /^\s+KrbSaveCredentials\soff$/)} it { is_expected.to contain_concat__fragment('rspec.example.com-auth_kerb').with( :content => /^\s+KrbVerifyKDC\son$/)} - it { is_expected.to contain_concat__fragment('rspec.example.com-limits').with( - :content => /^\s+LimitRequestFieldSize\s54321$/)} end context 'vhost with multiple ip addresses' do let :params do @@ -674,6 +681,7 @@ it { is_expected.to_not contain_concat__fragment('rspec.example.com-serveralias') } it { is_expected.to_not contain_concat__fragment('rspec.example.com-setenv') } it { is_expected.to_not contain_concat__fragment('rspec.example.com-ssl') } + it { is_expected.to_not contain_concat__fragment('rspec.example.com-sslproxy') } it { is_expected.to_not contain_concat__fragment('rspec.example.com-suphp') } it { is_expected.to_not contain_concat__fragment('rspec.example.com-php_admin') } it { is_expected.to_not contain_concat__fragment('rspec.example.com-header') } @@ -696,6 +704,18 @@ it { is_expected.to compile } it { is_expected.not_to contain_concat__fragment('rspec.example.com-docroot') } end + context 'ssl_proxyengine without ssl' do + let :params do + { + 'docroot' => '/rspec/docroot', + 'ssl' => false, + 'ssl_proxyengine' => true, + } + end + it { is_expected.to compile } + it { is_expected.not_to contain_concat__fragment('rspec.example.com-ssl') } + it { is_expected.to contain_concat__fragment('rspec.example.com-sslproxy') } + end end describe 'access logs' do let :facts do diff --git a/apache/spec/spec_helper_acceptance.rb b/apache/spec/spec_helper_acceptance.rb index d767b1e60..6bfdefd71 100644 --- a/apache/spec/spec_helper_acceptance.rb +++ b/apache/spec/spec_helper_acceptance.rb @@ -4,9 +4,12 @@ run_puppet_install_helper -UNSUPPORTED_PLATFORMS = ['Suse','windows','AIX','Solaris'] - RSpec.configure do |c| + # apache on Ubuntu 10.04 and 12.04 doesn't like IPv6 VirtualHosts, so we skip ipv6 tests on those systems + if fact('operatingsystem') == 'Ubuntu' and (fact('operatingsystemrelease') == '10.04' or fact('operatingsystemrelease') == '12.04') + c.filter_run_excluding :ipv6 => true + end + # Project root proj_root = File.expand_path(File.join(File.dirname(__FILE__), '..')) @@ -24,27 +27,32 @@ apply_manifest_on(agents, pp, :catch_failures => false) end + if fact('osfamily') == 'Debian' + # Make sure snake-oil certs are installed. + shell 'apt-get install -y ssl-cert' + end + # Install module and dependencies hosts.each do |host| copy_module_to(host, :source => proj_root, :module_name => 'apache') + + on host, puppet('module','install','puppetlabs-stdlib') + on host, puppet('module','install','puppetlabs-concat', '--version 1.1.1', '--force') + # Required for mod_passenger tests. if fact('osfamily') == 'RedHat' - on host, puppet('module','install','stahnma/epel'), { :acceptable_exit_codes => [0,1] } - on host, puppet('module','install','puppetlabs/inifile'), { :acceptable_exit_codes => [0,1] } + on host, puppet('module','install','stahnma/epel') + on host, puppet('module','install','puppetlabs/inifile') end + # Required for manifest to make mod_pagespeed repository available if fact('osfamily') == 'Debian' - on host, puppet('module','install','puppetlabs-apt', '--version 1.8.0', '--force'), { :acceptable_exit_codes => [0,1] } - end - on host, puppet('module','install','puppetlabs-stdlib'), { :acceptable_exit_codes => [0,1] } - on host, puppet('module','install','puppetlabs-concat', '--version 1.1.1', '--force'), { :acceptable_exit_codes => [0,1] } - - # Make sure selinux is disabled before each test or apache won't work. - if ! UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) - on host, puppet('apply', '-e', - %{"exec { 'setenforce 0': path => '/bin:/sbin:/usr/bin:/usr/sbin', onlyif => 'which setenforce && getenforce | grep Enforcing', }"}), - { :acceptable_exit_codes => [0] } + on host, puppet('module','install','puppetlabs-apt', '--version 1.8.0', '--force') end + + # Make sure selinux is disabled so the tests work. + on host, puppet('apply', '-e', + %{"exec { 'setenforce 0': path => '/bin:/sbin:/usr/bin:/usr/sbin', onlyif => 'which setenforce && getenforce | grep Enforcing', }"}) end end end diff --git a/apache/templates/httpd.conf.erb b/apache/templates/httpd.conf.erb index 6fb989244..cc6998b9b 100644 --- a/apache/templates/httpd.conf.erb +++ b/apache/templates/httpd.conf.erb @@ -10,6 +10,7 @@ Timeout <%= @timeout %> KeepAlive <%= @keepalive %> MaxKeepAliveRequests <%= @max_keepalive_requests %> KeepAliveTimeout <%= @keepalive_timeout %> +LimitRequestFieldSize <%= @limitreqfieldsize %> <%- if @rewrite_lock and scope.function_versioncmp([@apache_version, '2.2']) <= 0 -%> RewriteLock <%= @rewrite_lock %> diff --git a/apache/templates/mod/fastcgi.conf.erb b/apache/templates/mod/fastcgi.conf.erb index 8d94a2361..93c8d86ab 100644 --- a/apache/templates/mod/fastcgi.conf.erb +++ b/apache/templates/mod/fastcgi.conf.erb @@ -1,6 +1,8 @@ # The Fastcgi Apache module configuration file is being # managed by Puppet and changes will be overwritten. - AddHandler fastcgi-script .fcgi + + SetHandler fastcgi-script + FastCgiIpcDir "<%= @fastcgi_lib_path %>" diff --git a/apache/templates/mod/ldap.conf.erb b/apache/templates/mod/ldap.conf.erb index 001977617..fbb4b9213 100644 --- a/apache/templates/mod/ldap.conf.erb +++ b/apache/templates/mod/ldap.conf.erb @@ -9,3 +9,6 @@ Satisfy all <%- end -%> +<% if @ldap_trusted_global_cert_file -%> +LDAPTrustedGlobalCert <%= @ldap_trusted_global_cert_type %> <%= @ldap_trusted_global_cert_file %> +<% end -%> diff --git a/apache/templates/mod/nss.conf.erb b/apache/templates/mod/nss.conf.erb index 22762ccac..b6ea50487 100644 --- a/apache/templates/mod/nss.conf.erb +++ b/apache/templates/mod/nss.conf.erb @@ -84,7 +84,7 @@ NSSRequireSafeNegotiation off ## SSL Virtual Host Context ## -> +> # General setup for the virtual host #DocumentRoot "/etc/httpd/htdocs" diff --git a/apache/templates/mod/pagespeed.conf.erb b/apache/templates/mod/pagespeed.conf.erb index a1b6f117a..051cf5bed 100644 --- a/apache/templates/mod/pagespeed.conf.erb +++ b/apache/templates/mod/pagespeed.conf.erb @@ -7,7 +7,7 @@ AddOutputFilterByType MOD_PAGESPEED_OUTPUT_FILTER application/xhtml+xml <% end -%> ModPagespeedFileCachePath "<%= @cache_path %>" ModPagespeedLogDir "<%= @log_dir %>" - + <% @memcache_servers.each do |server| -%> ModPagespeedMemcachedServers <%= server %> <% end -%> @@ -17,7 +17,7 @@ ModPagespeedRewriteLevel <%= @rewrite_level -%> <% @disable_filters.each do |filter| -%> ModPagespeedDisableFilters <%= filter %> <% end -%> - + <% @enable_filters.each do |filter| -%> ModPagespeedEnableFilters <%= filter %> <% end -%> @@ -93,6 +93,10 @@ ModPagespeedMessageBufferSize <%= @message_buffer_size %> SetHandler mod_pagespeed_message +<% if @additional_configuration.is_a? Array -%> +<%= @additional_configuration.join('\n') %> +<% else -%> <% @additional_configuration.each_pair do |key, value| -%> <%= key %> <%= value %> <% end -%> +<% end -%> diff --git a/apache/templates/mod/php5.conf.erb b/apache/templates/mod/php5.conf.erb index 44df2ae06..3fd100039 100644 --- a/apache/templates/mod/php5.conf.erb +++ b/apache/templates/mod/php5.conf.erb @@ -14,8 +14,9 @@ # # Cause the PHP interpreter to handle files with a .php extension. # -AddHandler php5-script <%= @extensions.flatten.compact.join(' ') %> -AddType text/html .php +)$"> + SetHandler php5-script + # # Add index.php to the list of files that will be served as directory diff --git a/apache/templates/vhost/_directories.erb b/apache/templates/vhost/_directories.erb index b1475513c..c95fda5a1 100644 --- a/apache/templates/vhost/_directories.erb +++ b/apache/templates/vhost/_directories.erb @@ -106,7 +106,9 @@ <%- end -%> <%- if directory['addhandlers'] and ! directory['addhandlers'].empty? -%> <%- [directory['addhandlers']].flatten.compact.each do |addhandler| -%> - AddHandler <%= addhandler['handler'] %> <%= Array(addhandler['extensions']).join(' ') %> + )$"> + SetHandler <%= addhandler['handler'] %> + <%- end -%> <%- end -%> <%- if directory['sethandler'] and directory['sethandler'] != '' -%> diff --git a/apache/templates/vhost/_limits.erb b/apache/templates/vhost/_limits.erb deleted file mode 100644 index 0bd56db35..000000000 --- a/apache/templates/vhost/_limits.erb +++ /dev/null @@ -1,5 +0,0 @@ - - ## Limit Request Values -<% if @limit_request_field_size -%> - LimitRequestFieldSize <%= @limit_request_field_size %> -<% end -%> diff --git a/apache/templates/vhost/_ssl.erb b/apache/templates/vhost/_ssl.erb index a3d76fb13..797435cc1 100644 --- a/apache/templates/vhost/_ssl.erb +++ b/apache/templates/vhost/_ssl.erb @@ -22,9 +22,6 @@ <%- if @ssl_crl_check && scope.function_versioncmp([@apache_version, '2.4']) >= 0 -%> SSLCARevocationCheck "<%= @ssl_crl_check %>" <%- end -%> - <%- if @ssl_proxyengine -%> - SSLProxyEngine On - <%- end -%> <%- if @ssl_protocol -%> SSLProtocol <%= [@ssl_protocol].flatten.compact.join(' ') %> <%- end -%> @@ -40,9 +37,6 @@ <%- if @ssl_verify_depth -%> SSLVerifyDepth <%= @ssl_verify_depth %> <%- end -%> - <%- if @ssl_proxy_machine_cert -%> - SSLProxyMachineCertificateFile "<%= @ssl_proxy_machine_cert %>" - <%- end -%> <%- if @ssl_options -%> SSLOptions <%= Array(@ssl_options).join(' ') %> <%- end -%> diff --git a/apache/templates/vhost/_sslproxy.erb b/apache/templates/vhost/_sslproxy.erb new file mode 100644 index 000000000..e58d52d0b --- /dev/null +++ b/apache/templates/vhost/_sslproxy.erb @@ -0,0 +1,14 @@ +<% if @ssl_proxyengine -%> + + # SSL Proxy directives + SSLProxyEngine On + <%- if @ssl_proxy_check_peer_cn -%> + SSLProxyCheckPeerCN <%= @ssl_proxy_check_peer_cn %> + <%- end -%> + <%- if @ssl_proxy_check_peer_name -%> + SSLProxyCheckPeerName <%= @ssl_proxy_check_peer_name %> + <%- end -%> + <%- if @ssl_proxy_machine_cert -%> + SSLProxyMachineCertificateFile "<%= @ssl_proxy_machine_cert %>" + <%- end -%> +<% end -%> diff --git a/ceilometer/CHANGELOG.md b/ceilometer/CHANGELOG.md index 0dc1407d8..461000390 100644 --- a/ceilometer/CHANGELOG.md +++ b/ceilometer/CHANGELOG.md @@ -1,3 +1,50 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes +- change section name for AMQP qpid parameters +- remove deprecated mysql_module + +####Features +- keystone/auth: make service description configurable +- add support for RabbitMQ connection heartbeat +- simplify parameters for rpc_backend +- add tag to package and service resources +- enable support for memcached_servers +- add ability to specify ttl and timeout parameters +- add ability to manage use_stderr parameter +- creation of ceilometer::db::sync +- reflect provider change in puppet-openstacklib +- make 'alarm_history_time_to_live' parameter configurable +- update ceilometer::db class to match other module pattern +- implement auth_endpoint_type parameter +- stop managing File resources +- put all the logging related parameters to the logging class +- add mongodb_replica_set option +- allow customization of db sync command line + +####Bugfixes +- rely on autorequire for config resource ordering +- compute agent: do not try to configure nova.conf +- agent/auth: bring consistent how we manage empty parameters +- remove the api service subscription on db sync +- wsgi: make sure eventlet process is stopped before httpd +- auth: drop service dependency for Keystone_user_role + +####Maintenance +- fix rspec 3.x syntax +- initial msync run for all Puppet OpenStack modules +- acceptance: enable debug & verbosity for OpenStack logs +- acceptance/eventlet: make sure apache is stopped +- acceptance: use common bits from puppet-openstack-integration +- rspec: run tests for ::ceilometer::agent::auth +- try to use zuul-cloner to prepare fixtures +- spec: Enable webmock connect to IPv4 link-local +- db: Use postgresql lib class for psycopg package +- remove class_parameter_defaults puppet-lint check + ##2015-10-10 - 6.1.0 ###Summary diff --git a/ceilometer/README.md b/ceilometer/README.md index 8c0b40719..620255734 100644 --- a/ceilometer/README.md +++ b/ceilometer/README.md @@ -1,7 +1,7 @@ Ceilometer ========== -6.1.0 - 2015.1 - Kilo +7.0.0 - 2015.2 - Liberty #### Table of Contents @@ -41,12 +41,7 @@ Setup ### Beginning with ceilometer -To utilize the ceilometer module's functionality you will need to declare multiple resources. -The following is a modified excerpt from the [openstack module](httpd://github.com/stackforge/puppet --openstack). -This is not an exhaustive list of all the components needed. We recommend that you consult and under -stand the [openstack module](https://github.com/stackforge/puppet-openstack) and the [core](http://d -ocs.openstack.org) documentation to assist you in understanding the available deployment options. +To utilize the ceilometer module's functionality you will need to declare multiple resources. This is not an exhaustive list of all the components needed. We recommend that you consult and understand the [core openstack](http://docs.openstack.org) documentation to assist you in understanding the available deployment options. ```puppet class { '::ceilometer': diff --git a/ceilometer/examples/ceilometer_with_gnocchi.pp b/ceilometer/examples/ceilometer_with_gnocchi.pp new file mode 100644 index 000000000..be8132ef0 --- /dev/null +++ b/ceilometer/examples/ceilometer_with_gnocchi.pp @@ -0,0 +1,40 @@ +class { '::ceilometer': + metering_secret => 'secrete', + rabbit_userid => 'ceilometer', + rabbit_password => 'an_even_bigger_secret', + rabbit_host => '127.0.0.1', +} +class { '::ceilometer::db::mysql': + password => 'a_big_secret', +} +class { '::ceilometer::db': + database_connection => 'mysql://ceilometer:a_big_secret@127.0.0.1/ceilometer?charset=utf8', +} +class { '::ceilometer::keystone::auth': + password => 'a_big_secret', +} +class { '::ceilometer::client': } +class { '::ceilometer::expirer': } +class { '::ceilometer::agent::central': } +class { '::ceilometer::agent::notification': } +class { '::ceilometer::api': + enabled => true, + keystone_password => 'a_big_secret', + keystone_identity_uri => 'http://127.0.0.1:35357/', + service_name => 'httpd', +} +include ::apache +class { '::ceilometer::wsgi::apache': + ssl => false, +} + +class { '::ceilometer::collector': + meter_dispatcher => ['gnocchi'], +} +class { '::ceilometer::dispatcher::gnocchi': + filter_service_activity => false, + filter_project => true, + url => 'https://gnocchi:8041', + archive_policy => 'high', + resources_definition_file => 'gnocchi.yaml', +} diff --git a/ceilometer/manifests/agent/notification.pp b/ceilometer/manifests/agent/notification.pp index 00702aa2c..0d0af2f23 100644 --- a/ceilometer/manifests/agent/notification.pp +++ b/ceilometer/manifests/agent/notification.pp @@ -39,17 +39,22 @@ # (optional) Save event details. # Defaults to false # +# [*disable_non_metric_meters*] +# (optional) Disable or enable the collection of non-metric meters. +# Default to $::os_service_default +# # [*package_ensure*] # (optional) ensure state for package. # Defaults to 'present' # class ceilometer::agent::notification ( - $manage_service = true, - $enabled = true, - $ack_on_event_error = true, - $store_events = false, - $package_ensure = 'present', + $manage_service = true, + $enabled = true, + $ack_on_event_error = true, + $store_events = false, + $disable_non_metric_meters = $::os_service_default, + $package_ensure = 'present', ) { include ::ceilometer::params @@ -84,8 +89,9 @@ } ceilometer_config { - 'notification/ack_on_event_error': value => $ack_on_event_error; - 'notification/store_events' : value => $store_events; + 'notification/ack_on_event_error' : value => $ack_on_event_error; + 'notification/store_events' : value => $store_events; + 'notification/disable_non_metric_meters': value => $disable_non_metric_meters; } } diff --git a/ceilometer/manifests/collector.pp b/ceilometer/manifests/collector.pp index 8e22a93eb..bcddc7e46 100644 --- a/ceilometer/manifests/collector.pp +++ b/ceilometer/manifests/collector.pp @@ -22,12 +22,24 @@ # (optional) the ceilometer collector udp bind port. # Defaults to '4952' # +# [*meter_dispatcher*] +# (optional) dispatcher driver(s) to process meter data. +# Can be an array or a string. +# Defaults to 'database' +# +# [*event_dispatcher*] +# (optional) dispatcher driver(s) to process event data. +# Can be an array or a string. +# Defaults to 'database' +# class ceilometer::collector ( - $manage_service = true, - $enabled = true, - $package_ensure = 'present', - $udp_address = '0.0.0.0', - $udp_port = '4952', + $manage_service = true, + $enabled = true, + $package_ensure = 'present', + $udp_address = '0.0.0.0', + $udp_port = '4952', + $meter_dispatcher = 'database', + $event_dispatcher = 'database', ) { include ::ceilometer::params @@ -41,8 +53,10 @@ } ceilometer_config { - 'collector/udp_address' : value => $udp_address; - 'collector/udp_port' : value => $udp_port; + 'collector/udp_address': value => $udp_address; + 'collector/udp_port': value => $udp_port; + 'DEFAULT/meter_dispatcher': value => join(any2array($meter_dispatcher), ','); + 'DEFAULT/event_dispatcher': value => join(any2array($event_dispatcher), ','); } Package[$::ceilometer::params::collector_package_name] -> Service['ceilometer-collector'] diff --git a/ceilometer/manifests/db.pp b/ceilometer/manifests/db.pp index 6ac0218f5..9a139790c 100644 --- a/ceilometer/manifests/db.pp +++ b/ceilometer/manifests/db.pp @@ -12,28 +12,28 @@ # # [*database_idle_timeout*] # Timeout when db connections should be reaped. -# (Optional) Defaults to 3600. +# (Optional) Defaults to $::os_service_default # # [*database_min_pool_size*] # Minimum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 1. +# (Optional) Defaults to $::os_service_default # # [*database_max_pool_size*] # Maximum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_max_retries*] # Maximum db connection retries during startup. # Setting -1 implies an infinite retry count. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_retry_interval*] # Interval between retries of opening a sql connection. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_max_overflow*] # If set, use this value for max_overflow with sqlalchemy. -# (Optional) Defaults to 20. +# (Optional) Defaults to $::os_service_default # # [*mongodb_replica_set*] # The name of the replica set which is used to connect to MongoDB @@ -46,12 +46,12 @@ # class ceilometer::db ( $database_connection = 'mysql://ceilometer:ceilometer@localhost/ceilometer', - $database_idle_timeout = 3600, - $database_min_pool_size = 1, - $database_max_pool_size = 10, - $database_max_retries = 10, - $database_retry_interval = 10, - $database_max_overflow = 20, + $database_idle_timeout = $::os_service_default, + $database_min_pool_size = $::os_service_default, + $database_max_pool_size = $::os_service_default, + $database_max_retries = $::os_service_default, + $database_retry_interval = $::os_service_default, + $database_max_overflow = $::os_service_default, $sync_db = true, $mongodb_replica_set = undef, ) { @@ -61,13 +61,17 @@ Package<| title == 'ceilometer-common' |> -> Class['ceilometer::db'] validate_re($database_connection, - '(sqlite|mysql|postgresql|mongodb):\/\/(\S+:\S+@\S+\/\S+)?') + '^(sqlite|mysql(\+pymysql)?|postgresql|mongodb):\/\/(\S+:\S+@\S+\/\S+)?') case $database_connection { - /^mysql:\/\//: { - $backend_package = false + /^mysql(\+pymysql)?:\/\//: { require 'mysql::bindings' require 'mysql::bindings::python' + if $database_connection =~ /^mysql\+pymysql/ { + $backend_package = $::ceilometer::params::pymysql_package_name + } else { + $backend_package = false + } } /^postgresql:\/\//: { $backend_package = false diff --git a/ceilometer/manifests/dispatcher/gnocchi.pp b/ceilometer/manifests/dispatcher/gnocchi.pp new file mode 100644 index 000000000..bc5621799 --- /dev/null +++ b/ceilometer/manifests/dispatcher/gnocchi.pp @@ -0,0 +1,43 @@ +# Configure Gnocchi dispatcher for Ceilometer +# +# == Params +# +# [*filter_service_activity*] +# (optional) Filter out samples generated by Gnocchi service activity. +# Defaults to $::os_service_default. +# +# [*filter_project*] +# (optional) Gnocchi project used to filter out samples +# generated by Gnocchi service activity +# Defaults to $::os_service_default. +# +# [*url*] +# (optional) Gnocchi URL +# Defaults to $::os_service_default. +# +# [*archive_policy*] +# (optional) The archive policy to use when the dispatcher +# Defaults to $::os_service_default. +# +# [*resources_definition_file*] +# (optional) The Yaml file that defines mapping between samples +# and gnocchi resources/metrics. +# Defaults to $::os_service_default. +# +class ceilometer::dispatcher::gnocchi ( + $filter_service_activity = $::os_service_default, + $filter_project = $::os_service_default, + $url = $::os_service_default, + $archive_policy = $::os_service_default, + $resources_definition_file = $::os_service_default, +) { + + ceilometer_config { + 'dispatcher_gnocchi/filter_service_activity': value => $filter_service_activity; + 'dispatcher_gnocchi/filter_project': value => $filter_project; + 'dispatcher_gnocchi/url': value => $url; + 'dispatcher_gnocchi/archive_policy': value => $archive_policy; + 'dispatcher_gnocchi/resources_definition_file': value => $resources_definition_file; + } + +} diff --git a/ceilometer/manifests/init.pp b/ceilometer/manifests/init.pp index 2cfdf0910..1c8ca9436 100644 --- a/ceilometer/manifests/init.pp +++ b/ceilometer/manifests/init.pp @@ -96,6 +96,8 @@ # (optional) A list of memcached server(s) to use for caching. # Defaults to undef # +# DEPRECATED PARAMETERS +# # [*qpid_hostname*] # [*qpid_port*] # [*qpid_username*] @@ -109,7 +111,6 @@ # [*qpid_reconnect_interval*] # [*qpid_reconnect_interval_min*] # [*qpid_reconnect_interval_max*] -# (optional) various QPID options # class ceilometer( $http_timeout = '600', @@ -140,19 +141,20 @@ $kombu_ssl_keyfile = undef, $kombu_ssl_version = 'TLSv1', $memcached_servers = undef, - $qpid_hostname = 'localhost', - $qpid_port = 5672, - $qpid_username = 'guest', - $qpid_password = 'guest', - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, - $qpid_reconnect = true, - $qpid_reconnect_timeout = 0, - $qpid_reconnect_limit = 0, - $qpid_reconnect_interval_min = 0, - $qpid_reconnect_interval_max = 0, - $qpid_reconnect_interval = 0, + # DEPRECATED PARAMETERS + $qpid_hostname = undef, + $qpid_port = undef, + $qpid_username = undef, + $qpid_password = undef, + $qpid_heartbeat = undef, + $qpid_protocol = undef, + $qpid_tcp_nodelay = undef, + $qpid_reconnect = undef, + $qpid_reconnect_timeout = undef, + $qpid_reconnect_limit = undef, + $qpid_reconnect_interval_min = undef, + $qpid_reconnect_interval_max = undef, + $qpid_reconnect_interval = undef, ) { validate_string($metering_secret) @@ -262,23 +264,7 @@ # we keep "ceilometer.openstack.common.rpc.impl_qpid" for backward compatibility if $rpc_backend == 'ceilometer.openstack.common.rpc.impl_qpid' or $rpc_backend == 'qpid' { - - ceilometer_config { - 'oslo_messaging_qpid/qpid_hostname' : value => $qpid_hostname; - 'oslo_messaging_qpid/qpid_port' : value => $qpid_port; - 'oslo_messaging_qpid/qpid_username' : value => $qpid_username; - 'oslo_messaging_qpid/qpid_password' : value => $qpid_password, secret => true; - 'oslo_messaging_qpid/qpid_heartbeat' : value => $qpid_heartbeat; - 'oslo_messaging_qpid/qpid_protocol' : value => $qpid_protocol; - 'oslo_messaging_qpid/qpid_tcp_nodelay' : value => $qpid_tcp_nodelay; - 'oslo_messaging_qpid/qpid_reconnect' : value => $qpid_reconnect; - 'oslo_messaging_qpid/qpid_reconnect_timeout' : value => $qpid_reconnect_timeout; - 'oslo_messaging_qpid/qpid_reconnect_limit' : value => $qpid_reconnect_limit; - 'oslo_messaging_qpid/qpid_reconnect_interval_min': value => $qpid_reconnect_interval_min; - 'oslo_messaging_qpid/qpid_reconnect_interval_max': value => $qpid_reconnect_interval_max; - 'oslo_messaging_qpid/qpid_reconnect_interval' : value => $qpid_reconnect_interval; - } - + warning('Qpid driver is removed from Oslo.messaging in the Mitaka release') } # Once we got here, we can act as an honey badger on the rpc used. diff --git a/ceilometer/manifests/logging.pp b/ceilometer/manifests/logging.pp index 4121e8fa3..660729169 100644 --- a/ceilometer/manifests/logging.pp +++ b/ceilometer/manifests/logging.pp @@ -6,23 +6,23 @@ # # [*verbose*] # (Optional) Should the daemons log verbose messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*debug*] # (Optional) Should the daemons log debug messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_syslog*] # (Optional) Use syslog for logging. -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_stderr*] # (optional) Use stderr for logging -# Defaults to 'true' +# Defaults to $::os_service_default # # [*log_facility*] # (Optional) Syslog facility to receive log lines. -# Defaults to 'LOG_USER' +# Defaults to $::os_service_default # # [*log_dir*] # (optional) Directory where logs should be stored. @@ -31,34 +31,34 @@ # # [*logging_context_format_string*] # (optional) Format string to use for log messages with context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [%(request_id)s %(user_identity)s] %(instance)s%(message)s' # # [*logging_default_format_string*] # (optional) Format string to use for log messages without context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [-] %(instance)s%(message)s' # # [*logging_debug_format_suffix*] # (optional) Formatted data to append to log format when level is DEBUG. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(funcName)s %(pathname)s:%(lineno)d' # # [*logging_exception_prefix*] # (optional) Prefix each line of exception output with this format. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s' # # [*log_config_append*] # The name of an additional logging configuration file. -# Defaults to undef. +# Defaults to $::os_service_default # See https://docs.python.org/2/howto/logging.html # # [*default_log_levels*] # (optional) Hash of logger (keys) and level (values) pairs. -# Defaults to undef. +# Defaults to $::os_service_default # Example: # { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', # 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', @@ -67,47 +67,47 @@ # # [*publish_errors*] # (optional) Publish error events (boolean value). -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*fatal_deprecations*] # (optional) Make deprecations fatal (boolean value) -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*instance_format*] # (optional) If an instance is passed with the log message, format it # like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: '[instance: %(uuid)s] ' # # [*instance_uuid_format*] # (optional) If an instance UUID is passed with the log message, format # it like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: instance_uuid_format='[instance: %(uuid)s] ' - +# # [*log_date_format*] # (optional) Format string for %%(asctime)s in log records. -# Defaults to undef. +# Defaults to $::os_service_default # Example: 'Y-%m-%d %H:%M:%S' class ceilometer::logging( - $use_syslog = false, - $use_stderr = true, - $log_facility = 'LOG_USER', + $use_syslog = $::os_service_default, + $use_stderr = $::os_service_default, + $log_facility = $::os_service_default, $log_dir = '/var/log/ceilometer', - $verbose = false, - $debug = false, - $logging_context_format_string = undef, - $logging_default_format_string = undef, - $logging_debug_format_suffix = undef, - $logging_exception_prefix = undef, - $log_config_append = undef, - $default_log_levels = undef, - $publish_errors = undef, - $fatal_deprecations = undef, - $instance_format = undef, - $instance_uuid_format = undef, - $log_date_format = undef, + $verbose = $::os_service_default, + $debug = $::os_service_default, + $logging_context_format_string = $::os_service_default, + $logging_default_format_string = $::os_service_default, + $logging_debug_format_suffix = $::os_service_default, + $logging_exception_prefix = $::os_service_default, + $log_config_append = $::os_service_default, + $default_log_levels = $::os_service_default, + $publish_errors = $::os_service_default, + $fatal_deprecations = $::os_service_default, + $instance_format = $::os_service_default, + $instance_uuid_format = $::os_service_default, + $log_date_format = $::os_service_default, ) { # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function @@ -119,139 +119,29 @@ $verbose_real = pick($::ceilometer::verbose,$verbose) $debug_real = pick($::ceilometer::debug,$debug) - ceilometer_config { - 'DEFAULT/debug' : value => $debug_real; - 'DEFAULT/verbose' : value => $verbose_real; - 'DEFAULT/use_stderr' : value => $use_stderr_real; - 'DEFAULT/use_syslog' : value => $use_syslog_real; - 'DEFAULT/log_dir' : value => $log_dir_real; - 'DEFAULT/syslog_log_facility': value => $log_facility_real; + if is_service_default($default_log_levels) { + $default_log_levels_real = $default_log_levels + } else { + $default_log_levels_real = join(sort(join_keys_to_values($default_log_levels, '=')), ',') } - if $logging_context_format_string { - ceilometer_config { - 'DEFAULT/logging_context_format_string' : - value => $logging_context_format_string; - } - } - else { - ceilometer_config { - 'DEFAULT/logging_context_format_string' : ensure => absent; - } - } - - if $logging_default_format_string { - ceilometer_config { - 'DEFAULT/logging_default_format_string' : - value => $logging_default_format_string; - } - } - else { - ceilometer_config { - 'DEFAULT/logging_default_format_string' : ensure => absent; - } - } - - if $logging_debug_format_suffix { - ceilometer_config { - 'DEFAULT/logging_debug_format_suffix' : - value => $logging_debug_format_suffix; - } - } - else { - ceilometer_config { - 'DEFAULT/logging_debug_format_suffix' : ensure => absent; - } - } - - if $logging_exception_prefix { - ceilometer_config { - 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; - } - } - else { - ceilometer_config { - 'DEFAULT/logging_exception_prefix' : ensure => absent; - } - } - - if $log_config_append { - ceilometer_config { - 'DEFAULT/log_config_append' : value => $log_config_append; - } - } - else { - ceilometer_config { - 'DEFAULT/log_config_append' : ensure => absent; - } - } - - if $default_log_levels { - ceilometer_config { - 'DEFAULT/default_log_levels' : - value => join(sort(join_keys_to_values($default_log_levels, '=')), ','); - } - } - else { - ceilometer_config { - 'DEFAULT/default_log_levels' : ensure => absent; - } - } - - if $publish_errors { - ceilometer_config { - 'DEFAULT/publish_errors' : value => $publish_errors; - } - } - else { - ceilometer_config { - 'DEFAULT/publish_errors' : ensure => absent; - } - } - - if $fatal_deprecations { - ceilometer_config { - 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; - } - } - else { - ceilometer_config { - 'DEFAULT/fatal_deprecations' : ensure => absent; - } - } - - if $instance_format { - ceilometer_config { - 'DEFAULT/instance_format' : value => $instance_format; - } - } - else { - ceilometer_config { - 'DEFAULT/instance_format' : ensure => absent; - } - } - - if $instance_uuid_format { - ceilometer_config { - 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; - } - } - else { - ceilometer_config { - 'DEFAULT/instance_uuid_format' : ensure => absent; - } - } - - if $log_date_format { - ceilometer_config { - 'DEFAULT/log_date_format' : value => $log_date_format; - } - } - else { - ceilometer_config { - 'DEFAULT/log_date_format' : ensure => absent; - } - } - - + ceilometer_config { + 'DEFAULT/debug': value => $debug_real; + 'DEFAULT/verbose': value => $verbose_real; + 'DEFAULT/use_stderr': value => $use_stderr_real; + 'DEFAULT/use_syslog': value => $use_syslog_real; + 'DEFAULT/log_dir': value => $log_dir_real; + 'DEFAULT/syslog_log_facility': value => $log_facility_real; + 'DEFAULT/logging_context_format_string': value => $logging_context_format_string; + 'DEFAULT/logging_default_format_string': value => $logging_default_format_string; + 'DEFAULT/logging_debug_format_suffix': value => $logging_debug_format_suffix; + 'DEFAULT/logging_exception_prefix': value => $logging_exception_prefix; + 'DEFAULT/log_config_append': value => $log_config_append; + 'DEFAULT/default_log_levels': value => $default_log_levels_real; + 'DEFAULT/publish_errors': value => $publish_errors; + 'DEFAULT/fatal_deprecations': value => $fatal_deprecations; + 'DEFAULT/instance_format': value => $instance_format; + 'DEFAULT/instance_uuid_format': value => $instance_uuid_format; + 'DEFAULT/log_date_format': value => $log_date_format; + } } diff --git a/ceilometer/manifests/params.pp b/ceilometer/manifests/params.pp index 61bba1229..b209a5e46 100644 --- a/ceilometer/manifests/params.pp +++ b/ceilometer/manifests/params.pp @@ -31,6 +31,7 @@ $ceilometer_wsgi_script_path = '/var/www/cgi-bin/ceilometer' $ceilometer_wsgi_script_source = '/usr/lib/python2.7/site-packages/ceilometer/api/app.wsgi' $sqlite_package_name = undef + $pymysql_package_name = undef } 'Debian': { # package names @@ -55,6 +56,7 @@ # db packages $pymongo_package_name = 'python-pymongo' $sqlite_package_name = 'python-pysqlite2' + $pymysql_package_name = 'python-pymysql' # Operating system specific case $::operatingsystem { diff --git a/ceilometer/metadata.json b/ceilometer/metadata.json index 207c7d5a7..c7fc20a4b 100644 --- a/ceilometer/metadata.json +++ b/ceilometer/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-ceilometer", - "version": "6.1.0", + "version": "7.0.0", "author": "eNovance and OpenStack Contributors", "summary": "Puppet module for OpenStack Ceilometer", "license": "Apache-2.0", @@ -33,8 +33,8 @@ "dependencies": [ { "name": "puppetlabs/apache", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=7.0.0 <8.0.0" } ] } diff --git a/ceilometer/spec/acceptance/ceilometer_with_mysql_spec.rb b/ceilometer/spec/acceptance/ceilometer_with_mysql_spec.rb index 5ef941cf3..70fa9ce37 100644 --- a/ceilometer/spec/acceptance/ceilometer_with_mysql_spec.rb +++ b/ceilometer/spec/acceptance/ceilometer_with_mysql_spec.rb @@ -40,7 +40,7 @@ class { '::keystone::db::mysql': class { '::keystone': verbose => true, debug => true, - database_connection => 'mysql://keystone:keystone@127.0.0.1/keystone', + database_connection => 'mysql+pymysql://keystone:keystone@127.0.0.1/keystone', admin_token => 'admin_token', enabled => true, } @@ -66,7 +66,7 @@ class { '::ceilometer::db::mysql': password => 'a_big_secret', } class { '::ceilometer::db': - database_connection => 'mysql://ceilometer:a_big_secret@127.0.0.1/ceilometer?charset=utf8', + database_connection => 'mysql+pymysql://ceilometer:a_big_secret@127.0.0.1/ceilometer?charset=utf8', } class { '::ceilometer::keystone::auth': password => 'a_big_secret', @@ -83,6 +83,7 @@ class { '::ceilometer::api': keystone_password => 'a_big_secret', keystone_identity_uri => 'http://127.0.0.1:35357/', } + class { '::ceilometer::dispatcher::gnocchi': } EOS diff --git a/ceilometer/spec/acceptance/ceilometer_wsgi_apache_spec.rb b/ceilometer/spec/acceptance/ceilometer_wsgi_apache_spec.rb index 998fb914a..d2ba7c268 100644 --- a/ceilometer/spec/acceptance/ceilometer_wsgi_apache_spec.rb +++ b/ceilometer/spec/acceptance/ceilometer_wsgi_apache_spec.rb @@ -38,7 +38,7 @@ class { '::ceilometer::db::mysql': password => 'a_big_secret', } class { '::ceilometer::db': - database_connection => 'mysql://ceilometer:a_big_secret@127.0.0.1/ceilometer?charset=utf8', + database_connection => 'mysql+pymysql://ceilometer:a_big_secret@127.0.0.1/ceilometer?charset=utf8', } class { '::ceilometer::keystone::auth': password => 'a_big_secret', @@ -60,6 +60,7 @@ class { '::ceilometer::api': class { '::ceilometer::wsgi::apache': ssl => false, } + class { '::ceilometer::dispatcher::gnocchi': } EOS diff --git a/ceilometer/spec/classes/ceilometer_agent_auth_spec.rb b/ceilometer/spec/classes/ceilometer_agent_auth_spec.rb index 7e7c1723e..fa8032117 100644 --- a/ceilometer/spec/classes/ceilometer_agent_auth_spec.rb +++ b/ceilometer/spec/classes/ceilometer_agent_auth_spec.rb @@ -42,7 +42,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'ceilometer-agent-auth' @@ -50,7 +50,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'ceilometer-agent-auth' diff --git a/ceilometer/spec/classes/ceilometer_agent_central_spec.rb b/ceilometer/spec/classes/ceilometer_agent_central_spec.rb index b8911d4a8..b165bc853 100644 --- a/ceilometer/spec/classes/ceilometer_agent_central_spec.rb +++ b/ceilometer/spec/classes/ceilometer_agent_central_spec.rb @@ -79,7 +79,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -92,7 +92,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ceilometer/spec/classes/ceilometer_agent_compute_spec.rb b/ceilometer/spec/classes/ceilometer_agent_compute_spec.rb index dddafb0ef..bf51310f6 100644 --- a/ceilometer/spec/classes/ceilometer_agent_compute_spec.rb +++ b/ceilometer/spec/classes/ceilometer_agent_compute_spec.rb @@ -90,7 +90,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -118,7 +118,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ceilometer/spec/classes/ceilometer_agent_notification_spec.rb b/ceilometer/spec/classes/ceilometer_agent_notification_spec.rb index 550a7559a..eb7e36415 100644 --- a/ceilometer/spec/classes/ceilometer_agent_notification_spec.rb +++ b/ceilometer/spec/classes/ceilometer_agent_notification_spec.rb @@ -47,6 +47,16 @@ it 'configures notifications parameters in ceilometer.conf' do is_expected.to contain_ceilometer_config('notification/ack_on_event_error').with_value( params[:ack_on_event_error] ) is_expected.to contain_ceilometer_config('notification/store_events').with_value( params[:store_events] ) + is_expected.to contain_ceilometer_config('notification/disable_non_metric_meters').with_value('') + end + + context 'with disabled non-metric meters' do + before do + params.merge!({ :disable_non_metric_meters => true }) + end + it 'disables non-metric meters' do + is_expected.to contain_ceilometer_config('notification/disable_non_metric_meters').with_value(params[:disable_non_metric_meters]) + end end [{:enabled => true}, {:enabled => false}].each do |param_hash| @@ -89,7 +99,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -102,7 +112,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do @@ -115,10 +125,10 @@ context 'on RHEL 7' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystem => 'RedHat', :operatingsystemmajrelease => 7 - } + }) end let :platform_params do @@ -131,10 +141,10 @@ context 'on CentOS 7' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystem => 'CentOS', :operatingsystemmajrelease => 7 - } + }) end let :platform_params do @@ -147,10 +157,10 @@ context 'on Scientific 7' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystem => 'Scientific', :operatingsystemmajrelease => 7 - } + }) end let :platform_params do @@ -163,10 +173,10 @@ context 'on Fedora 20' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystem => 'Fedora', :operatingsystemrelease => 20 - } + }) end let :platform_params do diff --git a/ceilometer/spec/classes/ceilometer_agent_polling_spec.rb b/ceilometer/spec/classes/ceilometer_agent_polling_spec.rb index 43eea81e7..52bb60fab 100644 --- a/ceilometer/spec/classes/ceilometer_agent_polling_spec.rb +++ b/ceilometer/spec/classes/ceilometer_agent_polling_spec.rb @@ -104,7 +104,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -133,7 +133,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ceilometer/spec/classes/ceilometer_alarm_evaluator_spec.rb b/ceilometer/spec/classes/ceilometer_alarm_evaluator_spec.rb index 599bcf444..09557d97f 100644 --- a/ceilometer/spec/classes/ceilometer_alarm_evaluator_spec.rb +++ b/ceilometer/spec/classes/ceilometer_alarm_evaluator_spec.rb @@ -105,7 +105,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -118,7 +118,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ceilometer/spec/classes/ceilometer_alarm_notifier_spec.rb b/ceilometer/spec/classes/ceilometer_alarm_notifier_spec.rb index e0f250a64..d0f192f72 100644 --- a/ceilometer/spec/classes/ceilometer_alarm_notifier_spec.rb +++ b/ceilometer/spec/classes/ceilometer_alarm_notifier_spec.rb @@ -95,7 +95,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -108,7 +108,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ceilometer/spec/classes/ceilometer_api_spec.rb b/ceilometer/spec/classes/ceilometer_api_spec.rb index 094e2181b..8676c5a0a 100644 --- a/ceilometer/spec/classes/ceilometer_api_spec.rb +++ b/ceilometer/spec/classes/ceilometer_api_spec.rb @@ -154,12 +154,12 @@ class { 'ceilometer': metering_secret => 's3cr3t' }" context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => '8.0', :concat_basedir => '/var/lib/puppet/concat', :fqdn => 'some.host.tld', - :processorcount => 2 } + :processorcount => 2 }) end let :platform_params do @@ -172,12 +172,13 @@ class { 'ceilometer': metering_secret => 's3cr3t' }" context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystem => 'RedHat', :operatingsystemrelease => '7.1', + :operatingsystemmajrelease => '7', :fqdn => 'some.host.tld', :concat_basedir => '/var/lib/puppet/concat', - :processorcount => 2 } + :processorcount => 2 }) end let :platform_params do @@ -190,7 +191,7 @@ class { 'ceilometer': metering_secret => 's3cr3t' }" describe 'with custom auth_uri' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end before do params.merge!({ @@ -204,10 +205,10 @@ class { 'ceilometer': metering_secret => 's3cr3t' }" describe "with custom keystone identity_uri" do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end before do - params.merge!({ + params.merge!({ :keystone_identity_uri => 'https://foo.bar:1234/', }) end @@ -223,7 +224,7 @@ class { 'ceilometer': metering_secret => 's3cr3t' }" describe "with custom keystone identity_uri and auth_uri" do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end before do params.merge!({ diff --git a/ceilometer/spec/classes/ceilometer_client_spec.rb b/ceilometer/spec/classes/ceilometer_client_spec.rb index 0d0e01786..99f2e2915 100644 --- a/ceilometer/spec/classes/ceilometer_client_spec.rb +++ b/ceilometer/spec/classes/ceilometer_client_spec.rb @@ -17,7 +17,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -29,7 +29,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ceilometer/spec/classes/ceilometer_collector_spec.rb b/ceilometer/spec/classes/ceilometer_collector_spec.rb index 00c83d4ff..91de8a8f1 100644 --- a/ceilometer/spec/classes/ceilometer_collector_spec.rb +++ b/ceilometer/spec/classes/ceilometer_collector_spec.rb @@ -53,6 +53,8 @@ it 'configures ceilometer-collector server' do is_expected.to contain_ceilometer_config('collector/udp_address').with_value( '0.0.0.0' ) is_expected.to contain_ceilometer_config('collector/udp_port').with_value( '4952' ) + is_expected.to contain_ceilometer_config('DEFAULT/meter_dispatcher').with_value( 'database' ) + is_expected.to contain_ceilometer_config('DEFAULT/event_dispatcher').with_value( 'database' ) end it 'installs ceilometer-collector package' do @@ -111,7 +113,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -124,7 +126,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ceilometer/spec/classes/ceilometer_db_mysql_spec.rb b/ceilometer/spec/classes/ceilometer_db_mysql_spec.rb index 64a52b952..610f4fc53 100644 --- a/ceilometer/spec/classes/ceilometer_db_mysql_spec.rb +++ b/ceilometer/spec/classes/ceilometer_db_mysql_spec.rb @@ -35,7 +35,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'ceilometer mysql database' @@ -43,7 +43,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'ceilometer mysql database' @@ -51,7 +51,7 @@ describe "overriding allowed_hosts param to array" do let :facts do - { :osfamily => "Debian" } + @default_facts.merge({ :osfamily => "Debian" }) end let :params do { @@ -64,7 +64,7 @@ describe "overriding allowed_hosts param to string" do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :params do { @@ -77,7 +77,7 @@ describe "overriding allowed_hosts param equals to host param " do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :params do { diff --git a/ceilometer/spec/classes/ceilometer_db_postgresql_spec.rb b/ceilometer/spec/classes/ceilometer_db_postgresql_spec.rb index dd84edbce..bec9dad0f 100644 --- a/ceilometer/spec/classes/ceilometer_db_postgresql_spec.rb +++ b/ceilometer/spec/classes/ceilometer_db_postgresql_spec.rb @@ -12,11 +12,11 @@ context 'on a RedHat osfamily' do let :facts do - { + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.0', :concat_basedir => '/var/lib/puppet/concat' - } + }) end context 'with only required parameters' do @@ -34,12 +34,12 @@ context 'on a Debian osfamily' do let :facts do - { + @default_facts.merge({ :operatingsystemrelease => '7.8', :operatingsystem => 'Debian', :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' - } + }) end context 'with only required parameters' do diff --git a/ceilometer/spec/classes/ceilometer_db_spec.rb b/ceilometer/spec/classes/ceilometer_db_spec.rb index ab352032c..58dee2e40 100644 --- a/ceilometer/spec/classes/ceilometer_db_spec.rb +++ b/ceilometer/spec/classes/ceilometer_db_spec.rb @@ -9,10 +9,10 @@ it { is_expected.to contain_class('ceilometer::params') } it { is_expected.to contain_class('ceilometer::db::sync') } it { is_expected.to contain_ceilometer_config('database/connection').with_value('mysql://ceilometer:ceilometer@localhost/ceilometer').with_secret(true) } - it { is_expected.to contain_ceilometer_config('database/idle_timeout').with_value('3600') } - it { is_expected.to contain_ceilometer_config('database/min_pool_size').with_value('1') } - it { is_expected.to contain_ceilometer_config('database/max_retries').with_value('10') } - it { is_expected.to contain_ceilometer_config('database/retry_interval').with_value('10') } + it { is_expected.to contain_ceilometer_config('database/idle_timeout').with_value('') } + it { is_expected.to contain_ceilometer_config('database/min_pool_size').with_value('') } + it { is_expected.to contain_ceilometer_config('database/max_retries').with_value('') } + it { is_expected.to contain_ceilometer_config('database/retry_interval').with_value('') } it { is_expected.not_to contain_ceilometer_config('database/mongodb_replica_set') } end @@ -38,6 +38,16 @@ end + context 'with pymysql connection' do + let :params do + { :database_connection => 'mysql+pymysql://ceilometer:ceilometer@localhost/ceilometer' } + end + + it { is_expected.to contain_class('ceilometer::params') } + it { is_expected.to contain_class('ceilometer::db::sync') } + it { is_expected.to contain_ceilometer_config('database/connection').with_value('mysql+pymysql://ceilometer:ceilometer@localhost/ceilometer').with_secret(true) } + end + context 'with mongodb backend and replica set' do let :params do { :database_connection => 'mongodb://localhost:1234/ceilometer', @@ -75,18 +85,40 @@ end end + context 'with incorrect pymysql database_connection string' do + let :params do + { :database_connection => 'foo+pymysql://ceilometer:ceilometer@localhost/ceilometer', } + end + + it_raises 'a Puppet::Error', /validate_re/ + end + end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => 'jessie', - } + }) end it_configures 'ceilometer::db' + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql:///ceilometer:ceilometer@localhost/ceilometer', } + end + + it 'install the proper backend package' do + is_expected.to contain_package('ceilometer-backend-package').with( + :ensure => 'present', + :name => 'python-pymysql', + :tag => 'openstack' + ) + end + end + context 'with sqlite backend' do let :params do { :database_connection => 'sqlite:///var/lib/ceilometer.db', } @@ -105,12 +137,20 @@ context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.1', - } + }) end it_configures 'ceilometer::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql:///ceilometer:ceilometer@localhost/ceilometer', } + end + + it { is_expected.not_to contain_package('ceilometer-backend-package') } + end end end diff --git a/ceilometer/spec/classes/ceilometer_db_sync_spec.rb b/ceilometer/spec/classes/ceilometer_db_sync_spec.rb index 139d4a31e..79f3186d0 100644 --- a/ceilometer/spec/classes/ceilometer_db_sync_spec.rb +++ b/ceilometer/spec/classes/ceilometer_db_sync_spec.rb @@ -36,11 +36,11 @@ context 'on a RedHat osfamily' do let :facts do - { + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.0', :concat_basedir => '/var/lib/puppet/concat' - } + }) end it_configures 'ceilometer-dbsync' diff --git a/ceilometer/spec/classes/ceilometer_dispatcher_gnocchi_spec.rb b/ceilometer/spec/classes/ceilometer_dispatcher_gnocchi_spec.rb new file mode 100644 index 000000000..d9e71c9a6 --- /dev/null +++ b/ceilometer/spec/classes/ceilometer_dispatcher_gnocchi_spec.rb @@ -0,0 +1,54 @@ +require 'spec_helper' + +describe 'ceilometer::dispatcher::gnocchi' do + + let :pre_condition do + "class { 'ceilometer': metering_secret => 's3cr3t' }" + end + + let :params do + {} + end + + shared_examples_for 'ceilometer-gnocchi-dispatcher' do + it 'configures gnocchi dispatcher' do + is_expected.to contain_ceilometer_config('dispatcher_gnocchi/filter_service_activity').with_value('') + is_expected.to contain_ceilometer_config('dispatcher_gnocchi/filter_project').with_value('') + is_expected.to contain_ceilometer_config('dispatcher_gnocchi/url').with_value('') + is_expected.to contain_ceilometer_config('dispatcher_gnocchi/archive_policy').with_value('') + is_expected.to contain_ceilometer_config('dispatcher_gnocchi/resources_definition_file').with_value('') + end + + context 'when overriding parameters' do + before do + params.merge!(:filter_service_activity => false, + :filter_project => true, + :url => 'http://foo', + :archive_policy => 'high', + :resources_definition_file => 'foo') + end + it { is_expected.to contain_ceilometer_config('dispatcher_gnocchi/filter_service_activity').with_value('false') } + it { is_expected.to contain_ceilometer_config('dispatcher_gnocchi/filter_project').with_value('true') } + it { is_expected.to contain_ceilometer_config('dispatcher_gnocchi/url').with_value('http://foo') } + it { is_expected.to contain_ceilometer_config('dispatcher_gnocchi/archive_policy').with_value('high') } + it { is_expected.to contain_ceilometer_config('dispatcher_gnocchi/resources_definition_file').with_value('foo') } + end + end + + context 'on Debian platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'Debian' }) + end + + it_configures 'ceilometer-gnocchi-dispatcher' + end + + context 'on RedHat platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'RedHat' }) + end + + it_configures 'ceilometer-gnocchi-dispatcher' + end + +end diff --git a/ceilometer/spec/classes/ceilometer_expirer_spec.rb b/ceilometer/spec/classes/ceilometer_expirer_spec.rb index 89ba0a301..28f5525d2 100644 --- a/ceilometer/spec/classes/ceilometer_expirer_spec.rb +++ b/ceilometer/spec/classes/ceilometer_expirer_spec.rb @@ -70,7 +70,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -82,7 +82,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ceilometer/spec/classes/ceilometer_init_spec.rb b/ceilometer/spec/classes/ceilometer_init_spec.rb index 948294987..eb8672a93 100644 --- a/ceilometer/spec/classes/ceilometer_init_spec.rb +++ b/ceilometer/spec/classes/ceilometer_init_spec.rb @@ -27,16 +27,6 @@ } end - let :qpid_params do - { - :rpc_backend => 'qpid', - :qpid_hostname => 'localhost', - :qpid_port => 5672, - :qpid_username => 'guest', - :qpid_password => 'guest', - } - end - shared_examples_for 'ceilometer' do it 'configures time to live for events, meters and alarm histories' do @@ -78,12 +68,6 @@ end end - context 'with qpid' do - before {params.merge!( qpid_params ) } - it_configures 'a ceilometer base installation' - it_configures 'qpid support' - end - end shared_examples_for 'a ceilometer base installation' do @@ -260,39 +244,6 @@ end end - shared_examples_for 'qpid support' do - context("with default parameters") do - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_reconnect').with_value(true) } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_reconnect_timeout').with_value('0') } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_reconnect_limit').with_value('0') } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_reconnect_interval_min').with_value('0') } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_reconnect_interval_max').with_value('0') } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_reconnect_interval').with_value('0') } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_heartbeat').with_value('60') } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_tcp_nodelay').with_value(true) } - end - - context("with mandatory parameters set") do - it { is_expected.to contain_ceilometer_config('DEFAULT/rpc_backend').with_value('qpid') } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_hostname').with_value( params[:qpid_hostname] ) } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_port').with_value( params[:qpid_port] ) } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_username').with_value( params[:qpid_username]) } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_password').with_value(params[:qpid_password]) } - it { is_expected.to contain_ceilometer_config('oslo_messaging_qpid/qpid_password').with_value( params[:qpid_password] ).with_secret(true) } - end - - context("with legacy rpc_backend value") do - before { params.merge!( qpid_params ).merge!(:rpc_backend => 'ceilometer.openstack.common.rpc.impl_qpid') } - it { is_expected.to contain_ceilometer_config('DEFAULT/rpc_backend').with_value('ceilometer.openstack.common.rpc.impl_qpid') } - end - - context("failing if the rpc_backend is not present") do - before { params.delete( :rpc_backend) } - it { expect { is_expected.to raise_error(Puppet::Error) } } - end - end - shared_examples_for 'memcached support' do context "with memcached enabled" do before { params.merge!( @@ -305,7 +256,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -317,7 +268,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ceilometer/spec/classes/ceilometer_keystone_auth_spec.rb b/ceilometer/spec/classes/ceilometer_keystone_auth_spec.rb index 649b55a24..e6e700281 100644 --- a/ceilometer/spec/classes/ceilometer_keystone_auth_spec.rb +++ b/ceilometer/spec/classes/ceilometer_keystone_auth_spec.rb @@ -198,7 +198,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'ceilometer keystone auth' @@ -206,7 +206,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'ceilometer keystone auth' diff --git a/ceilometer/spec/classes/ceilometer_logging_spec.rb b/ceilometer/spec/classes/ceilometer_logging_spec.rb index 21033526e..94e37b482 100644 --- a/ceilometer/spec/classes/ceilometer_logging_spec.rb +++ b/ceilometer/spec/classes/ceilometer_logging_spec.rb @@ -15,7 +15,7 @@ :logging_exception_prefix => '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s', :log_config_append => '/etc/ceilometer/logging.conf', :publish_errors => true, - :default_log_levels => { + :default_log_levels => { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', 'iso8601' => 'WARN', @@ -57,11 +57,11 @@ shared_examples 'basic default logging settings' do it 'configures ceilometer logging settins with default values' do - is_expected.to contain_ceilometer_config('DEFAULT/use_syslog').with(:value => 'false') - is_expected.to contain_ceilometer_config('DEFAULT/use_stderr').with(:value => 'true') + is_expected.to contain_ceilometer_config('DEFAULT/use_syslog').with(:value => '') + is_expected.to contain_ceilometer_config('DEFAULT/use_stderr').with(:value => '') is_expected.to contain_ceilometer_config('DEFAULT/log_dir').with(:value => '/var/log/ceilometer') - is_expected.to contain_ceilometer_config('DEFAULT/verbose').with(:value => 'false') - is_expected.to contain_ceilometer_config('DEFAULT/debug').with(:value => 'false') + is_expected.to contain_ceilometer_config('DEFAULT/verbose').with(:value => '') + is_expected.to contain_ceilometer_config('DEFAULT/debug').with(:value => '') end end @@ -120,13 +120,13 @@ :default_log_levels, :fatal_deprecations, :instance_format, :instance_uuid_format, :log_date_format, ].each { |param| - it { is_expected.to contain_ceilometer_config("DEFAULT/#{param}").with_ensure('absent') } + it { is_expected.to contain_ceilometer_config("DEFAULT/#{param}").with_value('') } } end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'ceilometer-logging' @@ -134,7 +134,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'ceilometer-logging' diff --git a/ceilometer/spec/classes/ceilometer_policy_spec.rb b/ceilometer/spec/classes/ceilometer_policy_spec.rb index 46c0b1890..7c1b5b0ef 100644 --- a/ceilometer/spec/classes/ceilometer_policy_spec.rb +++ b/ceilometer/spec/classes/ceilometer_policy_spec.rb @@ -25,7 +25,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'ceilometer policies' @@ -33,7 +33,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'ceilometer policies' diff --git a/ceilometer/spec/classes/ceilometer_wsgi_apache_spec.rb b/ceilometer/spec/classes/ceilometer_wsgi_apache_spec.rb index ad4c07654..54fcfb3a5 100644 --- a/ceilometer/spec/classes/ceilometer_wsgi_apache_spec.rb +++ b/ceilometer/spec/classes/ceilometer_wsgi_apache_spec.rb @@ -87,10 +87,10 @@ context 'on RedHat platforms' do let :facts do - global_facts.merge({ + @default_facts.merge(global_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.0' - }) + })) end let :platform_parameters do @@ -107,11 +107,11 @@ context 'on Debian platforms' do let :facts do - global_facts.merge({ + @default_facts.merge(global_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => '7.0' - }) + })) end let :platform_parameters do diff --git a/ceilometer/spec/spec_helper.rb b/ceilometer/spec/spec_helper.rb index 3df4cede1..9bc7bcf96 100644 --- a/ceilometer/spec/spec_helper.rb +++ b/ceilometer/spec/spec_helper.rb @@ -5,6 +5,9 @@ RSpec.configure do |c| c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' + c.before :each do + @default_facts = { :os_service_default => '' } + end end at_exit { RSpec::Puppet::Coverage.report! } diff --git a/ceph/.gitreview b/ceph/.gitreview index 206cfb9e0..9c7dba51a 100644 --- a/ceph/.gitreview +++ b/ceph/.gitreview @@ -1,4 +1,4 @@ [gerrit] host=review.openstack.org port=29418 -project=stackforge/puppet-ceph.git +project=openstack/puppet-ceph.git diff --git a/ceph/Puppetfile b/ceph/Puppetfile index f84861ad4..38e09fc04 100644 --- a/ceph/Puppetfile +++ b/ceph/Puppetfile @@ -2,7 +2,7 @@ forge "http://forge.puppetlabs.com" mod 'puppetlabs/stdlib', :git => 'git://github.com/puppetlabs/puppetlabs-stdlib', - :ref => 'origin/4.x' + :ref => 'origin/4.9.x' mod 'puppetlabs/apt', :git => 'git://github.com/puppetlabs/puppetlabs-apt', diff --git a/ceph/README.md b/ceph/README.md index 1d711e6df..a10d3ef71 100644 --- a/ceph/README.md +++ b/ceph/README.md @@ -45,7 +45,7 @@ Development ----------- ``` -git clone https://github.com/stackforge/puppet-ceph.git +git clone https://github.com/openstack/puppet-ceph.git cd puppet-ceph sudo gem install bundler bundle install @@ -188,13 +188,13 @@ chmod +x ./ci.sh GEM_HOME=~/.gems screen -dmS puppet-ceph gerritexec \ --timeout 14400 --hostname review.openstack.org \ --verbose --username puppetceph --script "../ci.sh > /tmp/out$$ 2>&1 ; r=$? ; pastebinit /tmp/out$$ ; exit $r #" \ - --project stackforge/puppet-ceph + --project openstack/puppet-ceph ``` Contributors ------------ -* https://github.com/stackforge/puppet-ceph/graphs/contributors +* https://github.com/openstack/puppet-ceph/graphs/contributors Release Notes ------------- diff --git a/ceph/Rakefile b/ceph/Rakefile index 8cac3021d..69a1e2ae5 100644 --- a/ceph/Rakefile +++ b/ceph/Rakefile @@ -10,4 +10,3 @@ end PuppetLint.configuration.fail_on_warnings = true PuppetLint.configuration.send('disable_80chars') -PuppetLint.configuration.send('disable_class_parameter_defaults') diff --git a/ceph/examples/common.yaml b/ceph/examples/common.yaml index 6ef85e388..4244dc063 100644 --- a/ceph/examples/common.yaml +++ b/ceph/examples/common.yaml @@ -44,6 +44,6 @@ ceph::profile::params::client_keys: ceph::profile::params::osds: '/dev/sdc': - journal: '/dev/sdb1' + journal: '/dev/sdb' '/dev/sdd': - journal: '/dev/sdb2' + journal: '/dev/sdb' diff --git a/ceph/examples/nodes/first.yaml b/ceph/examples/nodes/first.yaml index a57312071..02ea11dbb 100644 --- a/ceph/examples/nodes/first.yaml +++ b/ceph/examples/nodes/first.yaml @@ -2,5 +2,5 @@ ######## OSD ceph::profile::params::osds: '/dev/sdb': - journal: '/tmp/journal' + journal: '/srv/journal' diff --git a/ceph/examples/nodes/second.yaml b/ceph/examples/nodes/second.yaml index a57312071..02ea11dbb 100644 --- a/ceph/examples/nodes/second.yaml +++ b/ceph/examples/nodes/second.yaml @@ -2,5 +2,5 @@ ######## OSD ceph::profile::params::osds: '/dev/sdb': - journal: '/tmp/journal' + journal: '/srv/journal' diff --git a/ceph/manifests/repo.pp b/ceph/manifests/repo.pp index f3a4e1a83..5f791efa4 100644 --- a/ceph/manifests/repo.pp +++ b/ceph/manifests/repo.pp @@ -180,7 +180,7 @@ name => 'ext-ceph-fastcgi', baseurl => "http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel${el}-x86_64-basic/ref/master", gpgcheck => '1', - gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc', + gpgkey => 'https://git.ceph.com/autobuild.asc', mirrorlist => absent, priority => '20', # prefer ceph repos over EPEL tag => 'ceph', diff --git a/ceph/metadata.json b/ceph/metadata.json index 8fee7ef4b..fd79dc1c4 100644 --- a/ceph/metadata.json +++ b/ceph/metadata.json @@ -1,10 +1,10 @@ { - "name": "stackforge-ceph", + "name": "openstack-ceph", "version": "1.0.0", - "author": "Puppet Ceph community and StackForge Contributors", + "author": "Puppet Ceph community and OpenStack (StackForge) Contributors", "summary": "Community Developed Ceph Module", "license": "Apache-2.0", - "source": "git://github.com/stackforge/puppet-ceph.git", + "source": "git://github.com/openstack/puppet-ceph.git", "project_page": "https://launchpad.net/puppet-ceph", "issues_url": "https://bugs.launchpad.net/puppet-ceph", "requirements": [ diff --git a/ceph/spec/acceptance/ceph_rgw_spec.rb b/ceph/spec/acceptance/ceph_rgw_spec.rb index 1f242ab7c..590026792 100644 --- a/ceph/spec/acceptance/ceph_rgw_spec.rb +++ b/ceph/spec/acceptance/ceph_rgw_spec.rb @@ -102,6 +102,7 @@ class { 'ceph::repo': class { 'ceph': fsid => '#{fsid}', mon_host => $::ipaddress, + mon_initial_members => 'a', osd_pool_default_size => '1', osd_pool_default_min_size => '1', } @@ -201,10 +202,6 @@ class { 'ceph': Keystone_user_role<||> -> Ceph::Rgw::Keystone['radosgw.gateway'] EOS - apply_manifest(pp, :catch_failures => true) - # Enable as soon as remaining changes are fixed - #apply_manifest(pp, :catch_changes => true) - osfamily = fact 'osfamily' servicequery = { @@ -212,14 +209,21 @@ class { 'ceph': 'RedHat' => 'service ceph-radosgw status id=radosgw.gateway', } - shell servicequery[osfamily] do |r| - expect(r.exit_code).to be_zero - end + # RGW on CentOS is currently broken, so lets disable tests for now. + if osfamily != 'RedHat' + apply_manifest(pp, :catch_failures => true) + # Enable as soon as remaining changes are fixed + #apply_manifest(pp, :catch_changes => true) + + shell servicequery[osfamily] do |r| + expect(r.exit_code).to be_zero + end - shell "swift -V 2.0 -A http://127.0.0.1:5000/v2.0 -U #{test_tenant}:#{test_user} -K #{test_password} stat" do |r| - expect(r.exit_code).to be_zero - expect(r.stdout).to match(/Content-Type: text\/plain; charset=utf-8/) - expect(r.stdout).not_to match(/401 Unauthorized/) + shell "swift -V 2.0 -A http://127.0.0.1:5000/v2.0 -U #{test_tenant}:#{test_user} -K #{test_password} stat" do |r| + expect(r.exit_code).to be_zero + expect(r.stdout).to match(/Content-Type: text\/plain; charset=utf-8/) + expect(r.stdout).not_to match(/401 Unauthorized/) + end end end @@ -275,7 +279,12 @@ class { 'apache': } EOS - apply_manifest(purge, :catch_failures => true) + osfamily = fact 'osfamily' + + # RGW on CentOS is currently broken, so lets disable tests for now. + if osfamily != 'RedHat' + apply_manifest(purge, :catch_failures => true) + end end end end diff --git a/ceph/spec/classes/ceph_osds_spec.rb b/ceph/spec/classes/ceph_osds_spec.rb index 7721e8d40..33ba80282 100644 --- a/ceph/spec/classes/ceph_osds_spec.rb +++ b/ceph/spec/classes/ceph_osds_spec.rb @@ -24,7 +24,7 @@ { :args => { '/dev/sdb' => { - 'journal' => '/tmp/journal', + 'journal' => '/srv/journal', }, '/srv/data' => { }, @@ -38,7 +38,7 @@ it { is_expected.to contain_ceph__osd('/dev/sdb').with( :ensure => 'present', - :journal => '/tmp/journal', + :journal => '/srv/journal', :cluster => 'CLUSTER' ) is_expected.to contain_ceph__osd('/srv/data').with( diff --git a/ceph/spec/classes/ceph_profile_osd_spec.rb b/ceph/spec/classes/ceph_profile_osd_spec.rb index 0b309bdd3..aaa175655 100644 --- a/ceph/spec/classes/ceph_profile_osd_spec.rb +++ b/ceph/spec/classes/ceph_profile_osd_spec.rb @@ -27,8 +27,8 @@ end it { is_expected.to contain_class('ceph::profile::client') } - it { is_expected.to contain_ceph__osd('/dev/sdc').with(:journal => '/dev/sdb1') } - it { is_expected.to contain_ceph__osd('/dev/sdd').with(:journal => '/dev/sdb2') } + it { is_expected.to contain_ceph__osd('/dev/sdc').with(:journal => '/dev/sdb') } + it { is_expected.to contain_ceph__osd('/dev/sdd').with(:journal => '/dev/sdb') } end context 'with the host specific first.yaml' do @@ -38,7 +38,7 @@ end it { is_expected.to contain_class('ceph::profile::client') } - it { is_expected.to contain_ceph__osd('/dev/sdb').with( :journal => '/tmp/journal') } + it { is_expected.to contain_ceph__osd('/dev/sdb').with( :journal => '/srv/journal') } end end diff --git a/ceph/spec/classes/ceph_repo_spec.rb b/ceph/spec/classes/ceph_repo_spec.rb index b6678f64e..4ff9dd820 100644 --- a/ceph/spec/classes/ceph_repo_spec.rb +++ b/ceph/spec/classes/ceph_repo_spec.rb @@ -332,7 +332,7 @@ :name => 'ext-ceph-fastcgi', :baseurl => 'http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel6-x86_64-basic/ref/master', :gpgcheck => '1', - :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc', + :gpgkey => 'https://git.ceph.com/autobuild.asc', :mirrorlist => 'absent', :priority => '20' ) } @@ -440,7 +440,7 @@ :name => 'ext-ceph-fastcgi', :baseurl => 'http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel6-x86_64-basic/ref/master', :gpgcheck => '1', - :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc', + :gpgkey => 'https://git.ceph.com/autobuild.asc', :mirrorlist => 'absent', :priority => '20' ) } @@ -588,7 +588,7 @@ :name => 'ext-ceph-fastcgi', :baseurl => 'http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel7-x86_64-basic/ref/master', :gpgcheck => '1', - :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc', + :gpgkey => 'https://git.ceph.com/autobuild.asc', :mirrorlist => 'absent', :priority => '20' ) } @@ -686,7 +686,7 @@ :name => 'ext-ceph-fastcgi', :baseurl => 'http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel7-x86_64-basic/ref/master', :gpgcheck => '1', - :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc', + :gpgkey => 'https://git.ceph.com/autobuild.asc', :mirrorlist => 'absent', :priority => '20' ) } diff --git a/ceph/spec/defines/ceph_osd_spec.rb b/ceph/spec/defines/ceph_osd_spec.rb index cf57ab8f1..01ff1e509 100644 --- a/ceph/spec/defines/ceph_osd_spec.rb +++ b/ceph/spec/defines/ceph_osd_spec.rb @@ -26,39 +26,39 @@ describe "with default params" do let :title do - '/tmp' + '/srv' end - it { is_expected.to contain_exec('ceph-osd-prepare-/tmp').with( + it { is_expected.to contain_exec('ceph-osd-prepare-/srv').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex -if ! test -b /tmp ; then - mkdir -p /tmp +if ! test -b /srv ; then + mkdir -p /srv fi -ceph-disk prepare /tmp +ceph-disk prepare /srv ", 'unless' => "/bin/true # comment to satisfy puppet syntax requirements set -ex -ceph-disk list | grep -E ' */tmp1? .*ceph data, (prepared|active)' || -ls -l /var/lib/ceph/osd/ceph-* | grep ' /tmp\$' +ceph-disk list | grep -E ' */srv1? .*ceph data, (prepared|active)' || +ls -l /var/lib/ceph/osd/ceph-* | grep ' /srv\$' ", 'logoutput' => true ) } - it { is_expected.to contain_exec('ceph-osd-activate-/tmp').with( + it { is_expected.to contain_exec('ceph-osd-activate-/srv').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex -if ! test -b /tmp ; then - mkdir -p /tmp +if ! test -b /srv ; then + mkdir -p /srv fi # activate happens via udev when using the entire device -if ! test -b /tmp || ! test -b /tmp1 ; then - ceph-disk activate /tmp || true +if ! test -b /srv || ! test -b /srv1 ; then + ceph-disk activate /srv || true fi ", 'unless' => "/bin/true # comment to satisfy puppet syntax requirements set -ex -ceph-disk list | grep -E ' */tmp1? .*ceph data, active' || -ls -ld /var/lib/ceph/osd/ceph-* | grep ' /tmp\$' +ceph-disk list | grep -E ' */srv1? .*ceph data, active' || +ls -ld /var/lib/ceph/osd/ceph-* | grep ' /srv\$' ", 'logoutput' => true ) } @@ -67,46 +67,46 @@ describe "with custom params" do let :title do - '/tmp/data' + '/srv/data' end let :params do { :cluster => 'testcluster', - :journal => '/tmp/journal', + :journal => '/srv/journal', } end - it { is_expected.to contain_exec('ceph-osd-prepare-/tmp/data').with( + it { is_expected.to contain_exec('ceph-osd-prepare-/srv/data').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex -if ! test -b /tmp/data ; then - mkdir -p /tmp/data +if ! test -b /srv/data ; then + mkdir -p /srv/data fi -ceph-disk prepare --cluster testcluster /tmp/data /tmp/journal +ceph-disk prepare --cluster testcluster /srv/data /srv/journal ", 'unless' => "/bin/true # comment to satisfy puppet syntax requirements set -ex -ceph-disk list | grep -E ' */tmp/data1? .*ceph data, (prepared|active)' || -ls -l /var/lib/ceph/osd/testcluster-* | grep ' /tmp/data\$' +ceph-disk list | grep -E ' */srv/data1? .*ceph data, (prepared|active)' || +ls -l /var/lib/ceph/osd/testcluster-* | grep ' /srv/data\$' ", 'logoutput' => true ) } - it { is_expected.to contain_exec('ceph-osd-activate-/tmp/data').with( + it { is_expected.to contain_exec('ceph-osd-activate-/srv/data').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex -if ! test -b /tmp/data ; then - mkdir -p /tmp/data +if ! test -b /srv/data ; then + mkdir -p /srv/data fi # activate happens via udev when using the entire device -if ! test -b /tmp/data || ! test -b /tmp/data1 ; then - ceph-disk activate /tmp/data || true +if ! test -b /srv/data || ! test -b /srv/data1 ; then + ceph-disk activate /srv/data || true fi ", 'unless' => "/bin/true # comment to satisfy puppet syntax requirements set -ex -ceph-disk list | grep -E ' */tmp/data1? .*ceph data, active' || -ls -ld /var/lib/ceph/osd/testcluster-* | grep ' /tmp/data\$' +ceph-disk list | grep -E ' */srv/data1? .*ceph data, active' || +ls -ld /var/lib/ceph/osd/testcluster-* | grep ' /srv/data\$' ", 'logoutput' => true ) } @@ -115,7 +115,7 @@ describe "with ensure absent" do let :title do - '/tmp' + '/srv' end let :params do @@ -124,14 +124,14 @@ } end - it { is_expected.to contain_exec('remove-osd-/tmp').with( + it { is_expected.to contain_exec('remove-osd-/srv').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex if [ -z \"\$id\" ] ; then - id=\$(ceph-disk list | sed -nEe 's:^ */tmp1? .*(ceph data|mounted on).*osd\\.([0-9]+).*:\\2:p') + id=\$(ceph-disk list | sed -nEe 's:^ */srv1? .*(ceph data|mounted on).*osd\\.([0-9]+).*:\\2:p') fi if [ -z \"\$id\" ] ; then - id=\$(ls -ld /var/lib/ceph/osd/ceph-* | sed -nEe 's:.*/ceph-([0-9]+) *-> */tmp\$:\\1:p' || true) + id=\$(ls -ld /var/lib/ceph/osd/ceph-* | sed -nEe 's:.*/ceph-([0-9]+) *-> */srv\$:\\1:p' || true) fi if [ \"\$id\" ] ; then stop ceph-osd cluster=ceph id=\$id || true @@ -146,10 +146,10 @@ 'unless' => "/bin/true # comment to satisfy puppet syntax requirements set -ex if [ -z \"\$id\" ] ; then - id=\$(ceph-disk list | sed -nEe 's:^ */tmp1? .*(ceph data|mounted on).*osd\\.([0-9]+).*:\\2:p') + id=\$(ceph-disk list | sed -nEe 's:^ */srv1? .*(ceph data|mounted on).*osd\\.([0-9]+).*:\\2:p') fi if [ -z \"\$id\" ] ; then - id=\$(ls -ld /var/lib/ceph/osd/ceph-* | sed -nEe 's:.*/ceph-([0-9]+) *-> */tmp\$:\\1:p' || true) + id=\$(ls -ld /var/lib/ceph/osd/ceph-* | sed -nEe 's:.*/ceph-([0-9]+) *-> */srv\$:\\1:p' || true) fi if [ \"\$id\" ] ; then test ! -d /var/lib/ceph/osd/ceph-\$id diff --git a/ceph/spec/defines/ceph_rgw_apache_spec.rb b/ceph/spec/defines/ceph_rgw_apache_spec.rb index 28f93c626..8e4c0c754 100644 --- a/ceph/spec/defines/ceph_rgw_apache_spec.rb +++ b/ceph/spec/defines/ceph_rgw_apache_spec.rb @@ -126,13 +126,14 @@ class { 'ceph::repo': let :facts do { - :concat_basedir => '/var/lib/puppet/concat', - :fqdn => 'myhost.domain', - :hostname => 'myhost', - :osfamily => 'RedHat', - :operatingsystem => 'RedHat', - :operatingsystemrelease => '6', - :pkg_fastcgi => 'libapache2-mod-fastcgi', + :concat_basedir => '/var/lib/puppet/concat', + :fqdn => 'myhost.domain', + :hostname => 'myhost', + :osfamily => 'RedHat', + :operatingsystem => 'RedHat', + :operatingsystemrelease => '6', + :operatingsystemmajrelease => '6', + :pkg_fastcgi => 'libapache2-mod-fastcgi', } end diff --git a/ceph/spec/defines/ceph_rgw_keystone_spec.rb b/ceph/spec/defines/ceph_rgw_keystone_spec.rb index 303a4096e..0e8f1d474 100644 --- a/ceph/spec/defines/ceph_rgw_keystone_spec.rb +++ b/ceph/spec/defines/ceph_rgw_keystone_spec.rb @@ -141,13 +141,14 @@ class { 'ceph::repo': extras => true, fastcgi => true, } let :facts do { - :concat_basedir => '/var/lib/puppet/concat', - :fqdn => 'myhost.domain', - :hostname => 'myhost', - :lsbdistcodename => 'Final', - :osfamily => 'RedHat', - :operatingsystem => 'RedHat', - :operatingsystemrelease => '6', + :concat_basedir => '/var/lib/puppet/concat', + :fqdn => 'myhost.domain', + :hostname => 'myhost', + :lsbdistcodename => 'Final', + :osfamily => 'RedHat', + :operatingsystem => 'RedHat', + :operatingsystemrelease => '6', + :operatingsystemmajrelease => '6', } end diff --git a/ceph/spec/fixtures/hieradata/common.yaml b/ceph/spec/fixtures/hieradata/common.yaml index b20c9e6f9..784591418 100644 --- a/ceph/spec/fixtures/hieradata/common.yaml +++ b/ceph/spec/fixtures/hieradata/common.yaml @@ -41,6 +41,6 @@ ceph::profile::params::client_keys: cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=volumes' ceph::profile::params::osds: '/dev/sdc': - journal: '/dev/sdb1' + journal: '/dev/sdb' '/dev/sdd': - journal: '/dev/sdb2' + journal: '/dev/sdb' diff --git a/ceph/spec/fixtures/hieradata/nodes/first.yaml b/ceph/spec/fixtures/hieradata/nodes/first.yaml index a57312071..02ea11dbb 100644 --- a/ceph/spec/fixtures/hieradata/nodes/first.yaml +++ b/ceph/spec/fixtures/hieradata/nodes/first.yaml @@ -2,5 +2,5 @@ ######## OSD ceph::profile::params::osds: '/dev/sdb': - journal: '/tmp/journal' + journal: '/srv/journal' diff --git a/ceph/spec/spec_helper_acceptance.rb b/ceph/spec/spec_helper_acceptance.rb index 8c7158fc7..1639f3706 100644 --- a/ceph/spec/spec_helper_acceptance.rb +++ b/ceph/spec/spec_helper_acceptance.rb @@ -45,7 +45,7 @@ on host, puppet('module install puppetlabs/apt --version ">=1.4.0 <2.0.0"'), { :acceptable_exit_codes => [0,1] } on host, puppet('module install puppetlabs/concat --version ">=1.2.1 <2.0.0"'), { :acceptable_exit_codes => [0,1] } on host, puppet('module install puppetlabs/apache --version ">=1.4.1 <2.0.0"'), { :acceptable_exit_codes => [0,1] } - on host, puppet('module install stackforge/keystone --version ">=5.1.0 <6.0.0"'), { :acceptable_exit_codes => [0,1] } + on host, puppet('module install stackforge/keystone --version ">=5.1.0 <6.0.0"'), { :acceptable_exit_codes => [0,1] } # keystone >=5.1.0 <6.0.0 is not present in openstack/keystone puppet_module_install(:source => proj_root, :module_name => 'ceph') # Flush the firewall flushfw = <<-EOS diff --git a/ceph/spec/spec_helper_system.rb b/ceph/spec/spec_helper_system.rb index 695d12cd8..a64826b88 100644 --- a/ceph/spec/spec_helper_system.rb +++ b/ceph/spec/spec_helper_system.rb @@ -55,7 +55,7 @@ shell(:command => 'puppet module install --version ">=1.4.1 <2.0.0" puppetlabs/apache', :node => vm) shell(:command => 'puppet module install --version ">=5.1.0 <6.0.0" stackforge/keystone', - :node => vm) + :node => vm) # keystone >=5.1.0 <6.0.0 is not present in openstack/keystone rcp(:sp => File.join(proj_root, 'spec/fixtures/hieradata/hiera.yaml'), :dp => '/etc/puppet/hiera.yaml', :d => node(:name => vm)) diff --git a/ceph/spec/system/ceph_osd_spec.rb b/ceph/spec/system/ceph_osd_spec.rb index c839c4598..b613abdf6 100644 --- a/ceph/spec/system/ceph_osd_spec.rb +++ b/ceph/spec/system/ceph_osd_spec.rb @@ -228,7 +228,7 @@ class { 'ceph': authentication_type => 'none', } ceph::osd { '#{data}': - journal => '/tmp/journal' + journal => '/srv/journal' } EOS diff --git a/ceph/spec/system/ceph_profile_osd_spec.rb b/ceph/spec/system/ceph_profile_osd_spec.rb index b9fea63a2..75906b97f 100644 --- a/ceph/spec/system/ceph_profile_osd_spec.rb +++ b/ceph/spec/system/ceph_profile_osd_spec.rb @@ -135,7 +135,7 @@ class { 'ceph::repo': ceph::profile::params::authentication_type: 'none' ceph::profile::params::osds: '/dev/sdb': - journal: '/tmp/journal' + journal: '/srv/journal' EOS file = Tempfile.new('hieradata') diff --git a/ceph/spec/unit/provider/ceph_config/ini_setting_spec.rb b/ceph/spec/unit/provider/ceph_config/ini_setting_spec.rb index 8a944c691..e2b84cb23 100644 --- a/ceph/spec/unit/provider/ceph_config/ini_setting_spec.rb +++ b/ceph/spec/unit/provider/ceph_config/ini_setting_spec.rb @@ -15,7 +15,7 @@ # Author: Andrew Woodward # This is aparently one of the few ways to do this load -# see https://github.com/stackforge/puppet-nova/blob/master/spec/unit/provider/nova_config/ini_setting_spec.rb +# see https://github.com/openstack/puppet-nova/blob/master/spec/unit/provider/nova_config/ini_setting_spec.rb $LOAD_PATH.push( File.join( File.dirname(__FILE__), diff --git a/cinder/CHANGELOG.md b/cinder/CHANGELOG.md index a741ab0bb..da520ebbb 100644 --- a/cinder/CHANGELOG.md +++ b/cinder/CHANGELOG.md @@ -1,3 +1,55 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes +- rabbitmq: do not manage rabbitmq service anymore +- remove deprecated mysql_module +- change section name for AMQP qpid parameters +- change section name for AMQP rabbit parameters + +####Features +- make it possible to have multiple type_sets with the same value +- keystone/auth: make service description configurable +- add support for RabbitMQ connection heartbeat +- add tag to package and service resources +- sync cinder::db::sync with new standard +- set parameter host in vmdk backend +- add lock_path to cinder config +- add 'host' parameter to cinder.conf +- add an ability to manage use_stderr parameter +- reflect provider change in puppet-openstacklib +- introduce cinder::db class +- add support for Pure Volume Drivers +- implement class and defined type for cinder GPFS driver +- allow RBD host to be configurable +- add posibility to specify privileged user for Cinder +- switch cinder to use os_service_default +- simplify rpc_backend parameter +- put all the logging related parameters to the logging class +- specify a dedicated keystone user for cinderv2 +- db: Use postgresql lib class for psycopg package +- add kombu_reconnect_delay parameter to cinder +- allow customization of db sync command line +- add possibility to use list of qpid hosts in cinder + +####Bugfixes +- rely on autorequire for config resource ordering +- dont run losetup if it's been done already +- don't run vgcreate if vg exists +- make sure cinder user is created before creating Cinder::Type +- type: Add retry to Exec resource + +####Maintenance +- fix rspec 3.x syntax +- initial msync run for all Puppet OpenStack modules +- acceptance: enable debug & verbosity for OpenStack logs +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check +- add test coverage for cinder::db::mysql +- acceptance: use common bits from puppet-openstack-integration + ##2015-10-10 - 6.1.0 ###Summary diff --git a/cinder/README.md b/cinder/README.md index af609d1f3..a28288682 100644 --- a/cinder/README.md +++ b/cinder/README.md @@ -1,7 +1,7 @@ cinder ======= -6.1.0 - 2015.1 - Kilo +7.0.0 - 2015.2 - Liberty #### Table of Contents @@ -23,14 +23,14 @@ Module Description The cinder module is a thorough attempt to make Puppet capable of managing the entirety of cinder. This includes manifests to provision such things as keystone endpoints, RPC configurations specific to cinder, and database connections. Types are shipped as part of the cinder module to assist in manipulation of configuration files. -This module is tested in combination with other modules needed to build and leverage an entire Openstack software stack. These modules can be found, all pulled together in the [openstack module](https://github.com/stackforge/puppet-openstack). +This module is tested in combination with other modules needed to build and leverage an entire Openstack software stack. Setup ----- **What the cinder module affects** -* cinder, the block storage service for Openstack. +* [Cinder](https://wiki.openstack.org/wiki/Cinder), the block storage service for Openstack. ### Installing cinder @@ -38,7 +38,7 @@ Setup ### Beginning with cinder -To utilize the cinder module's functionality you will need to declare multiple resources. The following is a modified excerpt from the [openstack module](https://github.com/stackforge/puppet-openstack). This is not an exhaustive list of all the components needed, we recommend you consult and understand the [openstack module](https://github.com/stackforge/puppet-openstack) and the [core openstack](http://docs.openstack.org) documentation. +To utilize the cinder module's functionality you will need to declare multiple resources. This is not an exhaustive list of all the components needed, we recommend you consult and understand the [core openstack](http://docs.openstack.org) documentation. **Define a cinder control node** diff --git a/cinder/manifests/api.pp b/cinder/manifests/api.pp index 6ffd8a6f3..0155dccb5 100644 --- a/cinder/manifests/api.pp +++ b/cinder/manifests/api.pp @@ -56,6 +56,11 @@ # (optional) Auth URL associated with the OpenStack privileged account. # Defaults to $::os_service_default. # +# [*keymgr_encryption_auth_url*] +# (optional) Auth URL for keymgr authentication. Should be in format +# http://auth_url:5000/v3 +# Defaults to $::os_service_default. +# # [*os_region_name*] # (optional) Some operations require cinder to make API requests # to Nova. This sets the keystone region to be used for these @@ -169,6 +174,7 @@ $os_privileged_user_password = $::os_service_default, $os_privileged_user_tenant = $::os_service_default, $os_privileged_user_auth_url = $::os_service_default, + $keymgr_encryption_auth_url = $::os_service_default, $service_workers = $::processorcount, $package_ensure = 'present', $bind_host = '0.0.0.0', @@ -277,70 +283,56 @@ else { $auth_uri_real = $auth_uri } - cinder_api_paste_ini { 'filter:authtoken/auth_uri': value => $auth_uri_real; } + + cinder_config { + 'keystone_authtoken/auth_uri': value => $auth_uri_real; + 'keymgr/encryption_auth_url' : value => $keymgr_encryption_auth_url; + } if $keystone_enabled { cinder_config { - 'DEFAULT/auth_strategy': value => 'keystone' ; + 'DEFAULT/auth_strategy': value => 'keystone' ; + 'keystone_authtoken/admin_tenant_name': value => $keystone_tenant; + 'keystone_authtoken/admin_user': value => $keystone_user; + 'keystone_authtoken/admin_password': value => $keystone_password, secret => true; } - cinder_api_paste_ini { - 'filter:authtoken/admin_tenant_name': value => $keystone_tenant; - 'filter:authtoken/admin_user': value => $keystone_user; - 'filter:authtoken/admin_password': value => $keystone_password, secret => true; - } # if both auth_uri and identity_uri are set we skip these deprecated settings entirely if !$auth_uri or !$identity_uri { if $keystone_auth_host { warning('The keystone_auth_host parameter is deprecated. Please use auth_uri and identity_uri instead.') - cinder_api_paste_ini { - 'filter:authtoken/service_host': value => $keystone_auth_host; - 'filter:authtoken/auth_host': value => $keystone_auth_host; + cinder_config { + 'keystone_authtoken/auth_host': value => $keystone_auth_host; } } else { - cinder_api_paste_ini { - 'filter:authtoken/service_host': ensure => absent; - 'filter:authtoken/auth_host': ensure => absent; + cinder_config { + 'keystone_authtoken/auth_host': ensure => absent; } } if $keystone_auth_protocol { warning('The keystone_auth_protocol parameter is deprecated. Please use auth_uri and identity_uri instead.') - cinder_api_paste_ini { - 'filter:authtoken/service_protocol': value => $keystone_auth_protocol; - 'filter:authtoken/auth_protocol': value => $keystone_auth_protocol; + cinder_config { + 'keystone_authtoken/auth_protocol': value => $keystone_auth_protocol; } } else { - cinder_api_paste_ini { - 'filter:authtoken/service_protocol': ensure => absent; - 'filter:authtoken/auth_protocol': ensure => absent; + cinder_config { + 'keystone_authtoken/auth_protocol': ensure => absent; } } if $keystone_auth_port { warning('The keystone_auth_port parameter is deprecated. Please use auth_uri and identity_uri instead.') - cinder_api_paste_ini { - 'filter:authtoken/auth_port': value => $keystone_auth_port; + cinder_config { + 'keystone_authtoken/auth_port': value => $keystone_auth_port; } } else { - cinder_api_paste_ini { - 'filter:authtoken/auth_port': ensure => absent; + cinder_config { + 'keystone_authtoken/auth_port': ensure => absent; } } - if $service_port { - warning('The service_port parameter is deprecated. Please use auth_uri and identity_uri instead.') - cinder_api_paste_ini { - 'filter:authtoken/service_port': value => $service_port; - } - } else { - cinder_api_paste_ini { - 'filter:authtoken/service_port': ensure => absent; - } - } - - if $keystone_auth_admin_prefix { warning('The keystone_auth_admin_prefix parameter is deprecated. Please use auth_uri and identity_uri instead.') validate_re($keystone_auth_admin_prefix, '^(/.+[^/])?$') @@ -357,24 +349,21 @@ cinder_api_paste_ini { 'filter:authtoken/auth_admin_prefix': ensure => absent; } - cinder_api_paste_ini { - 'filter:authtoken/service_port': ensure => absent; - 'filter:authtoken/auth_port': ensure => absent; - 'filter:authtoken/service_host': ensure => absent; - 'filter:authtoken/auth_host': ensure => absent; - 'filter:authtoken/service_protocol': ensure => absent; - 'filter:authtoken/auth_protocol': ensure => absent; + cinder_config { + 'keystone_authtoken/auth_port': ensure => absent; + 'keystone_authtoken/auth_host': ensure => absent; + 'keystone_authtoken/auth_protocol': ensure => absent; } } + } - if $identity_uri { - cinder_api_paste_ini { - 'filter:authtoken/identity_uri': value => $identity_uri; - } - } else { - cinder_api_paste_ini { - 'filter:authtoken/identity_uri': ensure => absent; - } + if $identity_uri { + cinder_config { + 'keystone_authtoken/identity_uri': value => $identity_uri; + } + } else { + cinder_config { + 'keystone_authtoken/identity_uri': ensure => absent; } } diff --git a/cinder/manifests/backup/swift.pp b/cinder/manifests/backup/swift.pp index b17a31204..94e66d757 100644 --- a/cinder/manifests/backup/swift.pp +++ b/cinder/manifests/backup/swift.pp @@ -13,6 +13,10 @@ # Should be a valid Swift URL # Defaults to 'http://localhost:8080/v1/AUTH_' # +# [*backup_swift_auth_url*] +# (optional) The URL of the Keystone endpoint for authentication. +# Defaults to 'http://127.0.0.1:5000/v2.0/' +# # [*backup_swift_container*] # (optional) The default Swift container to use. # Defaults to 'volumes_backup' @@ -58,6 +62,7 @@ class cinder::backup::swift ( $backup_driver = 'cinder.backup.drivers.swift', $backup_swift_url = 'http://localhost:8080/v1/AUTH_', + $backup_swift_auth_url = 'http://127.0.0.1:5000/v2.0/', $backup_swift_container = 'volumes_backup', $backup_swift_object_size = $::os_service_default, $backup_swift_retry_attempts = $::os_service_default, @@ -72,6 +77,7 @@ cinder_config { 'DEFAULT/backup_driver': value => $backup_driver; 'DEFAULT/backup_swift_url': value => $backup_swift_url; + 'DEFAULT/backup_swift_auth_url': value => $backup_swift_auth_url; 'DEFAULT/backup_swift_container': value => $backup_swift_container; 'DEFAULT/backup_swift_object_size': value => $backup_swift_object_size; 'DEFAULT/backup_swift_retry_attempts': value => $backup_swift_retry_attempts; diff --git a/cinder/manifests/db.pp b/cinder/manifests/db.pp index 1e2e9231b..0c35d6b40 100644 --- a/cinder/manifests/db.pp +++ b/cinder/manifests/db.pp @@ -56,13 +56,17 @@ $database_max_overflow_real = pick($::cinder::database_max_overflow,$database_max_overflow) validate_re($database_connection_real, - '(sqlite|mysql|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') + '^(sqlite|mysql(\+pymysql)?|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') case $database_connection_real { - /^mysql:\/\//: { - $backend_package = false + /^mysql(\+pymysql)?:\/\//: { require 'mysql::bindings' require 'mysql::bindings::python' + if $database_connection_real =~ /^mysql\+pymysql/ { + $backend_package = $::cinder::params::pymysql_package_name + } else { + $backend_package = false + } } /^postgresql:\/\//: { $backend_package = false diff --git a/cinder/manifests/db/mysql.pp b/cinder/manifests/db/mysql.pp index aac2c658a..2bc5ec755 100644 --- a/cinder/manifests/db/mysql.pp +++ b/cinder/manifests/db/mysql.pp @@ -28,12 +28,11 @@ # [*collate*] # the database collation. Optional. Defaults to 'utf8_general_ci' # -# [*cluster_id*] -# (Optional) TODO -# Defaults to 'localzone'. -# # === Deprecated Parameters # +# [*cluster_id*] +# Unused. No effect. +# class cinder::db::mysql ( $password, $dbname = 'cinder', @@ -47,6 +46,10 @@ validate_string($password) + if $cluster_id != 'localzone' { + warning('The cluster_id parameter is deprecated and has no affect. It will be removed in the next release') + } + ::openstacklib::db::mysql { 'cinder': user => $user, password_hash => mysql_password($password), diff --git a/cinder/manifests/init.pp b/cinder/manifests/init.pp index 863056643..e8edf9367 100644 --- a/cinder/manifests/init.pp +++ b/cinder/manifests/init.pp @@ -96,54 +96,6 @@ # Use durable queues in amqp. # (Optional) Defaults to false. # -# [*qpid_hostname*] -# (Optional) Location of qpid server -# Defaults to 'localhost'. -# -# [*qpid_port*] -# (Optional) Port for qpid server. -# Defaults to '5672'. -# -# [*qpid_hosts*] -# (Optional) Qpid HA cluster host:port pairs. (list value) -# Defaults to false -# -# [*qpid_username*] -# (Optional) Username to use when connecting to qpid. -# Defaults to 'guest'. -# -# [*qpid_password*] -# (Optional) Password to use when connecting to qpid. -# Defaults to 'false'. -# -# [*qpid_sasl_mechanisms*] -# (Optional) ENable one or more SASL mechanisms. -# Defaults to 'false'. -# -# [*qpid_heartbeat*] -# (Optional) Seconds between connection keepalive heartbeats. -# Defaults to '60'. -# -# [*qpid_protocol*] -# (Optional) Transport to use, either 'tcp' or 'ssl'. -# Defaults to 'tcp'. -# -# [*qpid_tcp_nodelay*] -# (Optional) Disable Nagle Algorithm. -# Defaults to 'true'. -# -# [*qpid_reconnect*] -# -# [*qpid_reconnect_timeout*] -# -# [*qpid_reconnect_limit*] -# -# [*qpid_reconnect_interval*] -# -# [*qpid_reconnect_interval_min*] -# -# [*qpid_reconnect_interval_max*] -# # [*use_syslog*] # (Optional) Use syslog for logging. # Defaults to undef. @@ -236,6 +188,55 @@ # Defaults to: $::cinder::params::lock_path # # === Deprecated Parameters +# +# [*qpid_hostname*] +# (Optional) Location of qpid server +# Defaults to undef. +# +# [*qpid_port*] +# (Optional) Port for qpid server. +# Defaults to undef. +# +# [*qpid_hosts*] +# (Optional) Qpid HA cluster host:port pairs. (list value) +# Defaults to undef. +# +# [*qpid_username*] +# (Optional) Username to use when connecting to qpid. +# Defaults to undef. +# +# [*qpid_password*] +# (Optional) Password to use when connecting to qpid. +# Defaults to undef. +# +# [*qpid_sasl_mechanisms*] +# (Optional) ENable one or more SASL mechanisms. +# Defaults to undef. +# +# [*qpid_heartbeat*] +# (Optional) Seconds between connection keepalive heartbeats. +# Defaults to undef. +# +# [*qpid_protocol*] +# (Optional) Transport to use, either 'tcp' or 'ssl'. +# Defaults to undef. +# +# [*qpid_tcp_nodelay*] +# (Optional) Disable Nagle Algorithm. +# Defaults to undef. +# +# [*qpid_reconnect*] +# +# [*qpid_reconnect_timeout*] +# +# [*qpid_reconnect_limit*] +# +# [*qpid_reconnect_interval*] +# +# [*qpid_reconnect_interval_min*] +# +# [*qpid_reconnect_interval_max*] +# class cinder ( $database_connection = undef, $database_idle_timeout = undef, @@ -261,21 +262,6 @@ $kombu_ssl_version = $::os_service_default, $kombu_reconnect_delay = $::os_service_default, $amqp_durable_queues = false, - $qpid_hostname = 'localhost', - $qpid_port = '5672', - $qpid_hosts = false, - $qpid_username = 'guest', - $qpid_password = false, - $qpid_sasl_mechanisms = false, - $qpid_reconnect = true, - $qpid_reconnect_timeout = 0, - $qpid_reconnect_limit = 0, - $qpid_reconnect_interval_min = 0, - $qpid_reconnect_interval_max = 0, - $qpid_reconnect_interval = 0, - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, $package_ensure = 'present', $use_ssl = false, $ca_file = $::os_service_default, @@ -293,11 +279,27 @@ $enable_v1_api = true, $enable_v2_api = true, $lock_path = $::cinder::params::lock_path, -) { + # DEPRECATED PARAMETERS + $qpid_hostname = undef, + $qpid_port = undef, + $qpid_hosts = undef, + $qpid_username = undef, + $qpid_password = undef, + $qpid_sasl_mechanisms = undef, + $qpid_reconnect = undef, + $qpid_reconnect_timeout = undef, + $qpid_reconnect_limit = undef, + $qpid_reconnect_interval_min = undef, + $qpid_reconnect_interval_max = undef, + $qpid_reconnect_interval = undef, + $qpid_heartbeat = undef, + $qpid_protocol = undef, + $qpid_tcp_nodelay = undef, + +) inherits cinder::params { include ::cinder::db include ::cinder::logging - include ::cinder::params if $use_ssl { if !$cert_file { @@ -356,47 +358,7 @@ } if $rpc_backend == 'cinder.openstack.common.rpc.impl_qpid' or $rpc_backend == 'qpid' { - - if ! $qpid_password { - fail('Please specify a qpid_password parameter.') - } - - cinder_config { - 'oslo_messaging_qpid/qpid_username': value => $qpid_username; - 'oslo_messaging_qpid/qpid_password': value => $qpid_password, secret => true; - 'oslo_messaging_qpid/qpid_reconnect': value => $qpid_reconnect; - 'oslo_messaging_qpid/qpid_reconnect_timeout': value => $qpid_reconnect_timeout; - 'oslo_messaging_qpid/qpid_reconnect_limit': value => $qpid_reconnect_limit; - 'oslo_messaging_qpid/qpid_reconnect_interval_min': value => $qpid_reconnect_interval_min; - 'oslo_messaging_qpid/qpid_reconnect_interval_max': value => $qpid_reconnect_interval_max; - 'oslo_messaging_qpid/qpid_reconnect_interval': value => $qpid_reconnect_interval; - 'oslo_messaging_qpid/qpid_heartbeat': value => $qpid_heartbeat; - 'oslo_messaging_qpid/qpid_protocol': value => $qpid_protocol; - 'oslo_messaging_qpid/qpid_tcp_nodelay': value => $qpid_tcp_nodelay; - 'oslo_messaging_qpid/amqp_durable_queues': value => $amqp_durable_queues; - } - - if $qpid_hosts { - cinder_config { 'oslo_messaging_qpid/qpid_hosts': value => join(any2array($qpid_hosts), ',') } - } else { - cinder_config { 'oslo_messaging_qpid/qpid_hosts': value => "${qpid_hostname}:${qpid_port}" } - cinder_config { 'oslo_messaging_qpid/qpid_hostname': value => $qpid_hostname } - cinder_config { 'oslo_messaging_qpid/qpid_port': value => $qpid_port } - } - - if is_array($qpid_sasl_mechanisms) { - cinder_config { - 'DEFAULT/qpid_sasl_mechanisms': value => join($qpid_sasl_mechanisms, ' '); - } - } elsif $qpid_sasl_mechanisms { - cinder_config { - 'DEFAULT/qpid_sasl_mechanisms': value => $qpid_sasl_mechanisms; - } - } else { - cinder_config { - 'DEFAULT/qpid_sasl_mechanisms': ensure => absent; - } - } + warning('Qpid driver is removed from Oslo.messaging in the Mitaka release') } if ! $default_availability_zone { diff --git a/cinder/manifests/keystone/auth.pp b/cinder/manifests/keystone/auth.pp index df62f7fd0..7a5499946 100644 --- a/cinder/manifests/keystone/auth.pp +++ b/cinder/manifests/keystone/auth.pp @@ -239,6 +239,14 @@ $real_service_name = pick($service_name, $auth_name) $real_service_name_v2 = pick($service_name_v2, $auth_name_v2) + # TODO(mmagr): change default service names according to default_catalog in next (M) cycle + if !$service_name { + warning('Note that service_name parameter default value will be changed to "Volume Service" (according to Keystone default catalog) in a future release. In case you use different value, please update your manifests accordingly.') + } + if !$service_name_v2 { + warning('Note that service_name_v2 parameter default value will be changed to "Volume Service v2" in a future release. In case you use different value, please update your manifests accordingly.') + } + if $real_service_name == $real_service_name_v2 { fail('cinder::keystone::auth parameters service_name and service_name_v2 must be different.') } diff --git a/cinder/manifests/params.pp b/cinder/manifests/params.pp index adfa87858..8d2c6db09 100644 --- a/cinder/manifests/params.pp +++ b/cinder/manifests/params.pp @@ -21,6 +21,7 @@ $lio_package_name = 'targetcli' $lock_path = '/var/lock/cinder' $sqlite_package_name = 'python-pysqlite2' + $pymysql_package_name = 'python-pymysql' } elsif($::osfamily == 'RedHat') { @@ -41,6 +42,7 @@ $lio_package_name = 'targetcli' $lock_path = '/var/lib/cinder/tmp' $sqlite_package_name = undef + $pymysql_package_name = undef case $::operatingsystem { 'RedHat', 'CentOS', 'Scientific', 'OracleLinux': { diff --git a/cinder/manifests/qpid.pp b/cinder/manifests/qpid.pp index b5eab2a2b..6c9e658ad 100644 --- a/cinder/manifests/qpid.pp +++ b/cinder/manifests/qpid.pp @@ -1,58 +1,36 @@ # == Class: cinder::qpid # -# class for installing qpid server for cinder +# Deprecated class for installing qpid server for cinder # # === Parameters # # [*enabled*] # (Optional) Whether to enable the qpid service. -# Defaults to 'true'. +# Defaults to undef. # # [*user*] # (Optional) The username to use when connecting to qpid. -# Defaults to 'guest'. +# Defaults to undef. # # [*password*] # (Optional) The password to use when connecting to qpid -# Defaults to 'guest'. +# Defaults to undef. # # [*file*] # (Optional) The SASL database. -# Defaults to '/var/lib/qpidd/qpidd.sasldb'. +# Defaults to undef. # # [*realm*] # (Optional) The Realm for qpid. -# Defaults to 'OPENSTACK'. -# +# Defaults to undef. # class cinder::qpid ( - $enabled = true, - $user ='guest', - $password ='guest', - $file ='/var/lib/qpidd/qpidd.sasldb', - $realm ='OPENSTACK' + $enabled = undef, + $user = undef, + $password = undef, + $file = undef, + $realm = undef ) { - # only configure cinder after the queue is up - Class['qpid::server'] -> Package<| title == 'cinder' |> - - if ($enabled) { - $service_ensure = 'running' - - qpid_user { $user: - password => $password, - file => $file, - realm => $realm, - provider => 'saslpasswd2', - require => Class['qpid::server'], - } - - } else { - $service_ensure = 'stopped' - } - - class { '::qpid::server': - service_ensure => $service_ensure - } - + warning('Qpid driver is removed from Oslo.messaging in the Mitaka release') } diff --git a/cinder/manifests/type.pp b/cinder/manifests/type.pp index 612efeca4..42d79346b 100644 --- a/cinder/manifests/type.pp +++ b/cinder/manifests/type.pp @@ -66,7 +66,7 @@ exec {"cinder type-create ${volume_name}": command => "cinder type-create ${volume_name}", - unless => "cinder type-list | grep -qP '\\b${volume_name}\\b'", + unless => "cinder type-list | grep -qP '\\s${volume_name}\\s'", environment => concat($cinder_env, $region_env), require => Package['python-cinderclient'], path => ['/usr/bin', '/bin'], diff --git a/cinder/metadata.json b/cinder/metadata.json index 5842eca84..3253b2e3c 100644 --- a/cinder/metadata.json +++ b/cinder/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-cinder", - "version": "6.1.0", + "version": "7.0.0", "author": "Puppet Labs and OpenStack Contributors", "summary": "Puppet module for OpenStack Cinder", "license": "Apache-2.0", @@ -33,9 +33,9 @@ "dependencies": [ { "name": "dprince/qpid", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/rabbitmq", "version_requirement": ">=2.0.2 <6.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=7.0.0 <8.0.0" } ] } diff --git a/cinder/spec/acceptance/basic_cinder_spec.rb b/cinder/spec/acceptance/basic_cinder_spec.rb index 597e1f4fb..eeaf6ebde 100644 --- a/cinder/spec/acceptance/basic_cinder_spec.rb +++ b/cinder/spec/acceptance/basic_cinder_spec.rb @@ -29,7 +29,7 @@ # Cinder resources class { '::cinder': - database_connection => 'mysql://cinder:a_big_secret@127.0.0.1/cinder?charset=utf8', + database_connection => 'mysql+pymysql://cinder:a_big_secret@127.0.0.1/cinder?charset=utf8', rabbit_userid => 'cinder', rabbit_password => 'an_even_bigger_secret', rabbit_host => '127.0.0.1', diff --git a/cinder/spec/classes/cinder_api_spec.rb b/cinder/spec/classes/cinder_api_spec.rb index 15bcba38a..20ba39640 100644 --- a/cinder/spec/classes/cinder_api_spec.rb +++ b/cinder/spec/classes/cinder_api_spec.rb @@ -45,45 +45,45 @@ is_expected.to contain_cinder_config('DEFAULT/os_region_name').with( :value => '' ) - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/service_protocol').with( - :value => 'http' + is_expected.to contain_cinder_config('keystone_authtoken/auth_uri').with( + :value => 'http://localhost:5000/' ) - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/service_host').with( - :value => 'localhost' + is_expected.to contain_cinder_config('keystone_authtoken/admin_tenant_name').with( + :value => 'services' ) - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/service_port').with( - :value => '5000' + is_expected.to contain_cinder_config('keystone_authtoken/admin_user').with( + :value => 'cinder' ) - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_protocol').with( + is_expected.to contain_cinder_config('keystone_authtoken/admin_password').with( + :value => 'foo' + ) + is_expected.to contain_cinder_config('keystone_authtoken/auth_protocol').with( :value => 'http' ) - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_host').with( + is_expected.to contain_cinder_config('keystone_authtoken/auth_host').with( :value => 'localhost' ) - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_port').with( + is_expected.to contain_cinder_config('keystone_authtoken/auth_port').with( :value => '35357' ) - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_admin_prefix').with( - :ensure => 'absent' - ) - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/admin_tenant_name').with( - :value => 'services' + is_expected.not_to contain_cinder_config('keystone_authtoken/service_protocol').with( + :value => 'http' ) - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/admin_user').with( - :value => 'cinder' + is_expected.not_to contain_cinder_config('keystone_authtoken/service_host').with( + :value => 'localhost' ) - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/admin_password').with( - :value => 'foo', - :secret => true + is_expected.not_to contain_cinder_config('keystone_authtoken/service_port').with( + :value => '5000' ) - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_uri').with( - :value => 'http://localhost:5000/' + is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_admin_prefix').with( + :ensure => 'absent' ) is_expected.to contain_cinder_config('DEFAULT/os_privileged_user_name').with_value('') is_expected.to contain_cinder_config('DEFAULT/os_privileged_user_password').with_value('') is_expected.to contain_cinder_config('DEFAULT/os_privileged_user_tenant').with_value('') is_expected.to contain_cinder_config('DEFAULT/os_privileged_user_auth_url').with_value('') + is_expected.to contain_cinder_config('keymgr/encryption_auth_url').with_value('') end end @@ -194,7 +194,7 @@ req_params.merge({'keystone_auth_uri' => 'http://localhost:8080/v2.0/'}) end it 'should configure cinder auth_uri correctly' do - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_uri').with( + is_expected.to contain_cinder_config('keystone_authtoken/auth_uri').with( :value => 'http://localhost:8080/v2.0/' ) end @@ -303,6 +303,16 @@ )} end + describe 'with encryption_auth_url' do + let :params do + req_params.merge({ :keymgr_encryption_auth_url => 'http://localhost:5000/v3' }) + end + + it { is_expected.to contain_cinder_config('keymgr/encryption_auth_url').with( + :value => 'http://localhost:5000/v3' + )} + end + describe 'while validating the service with default command' do let :params do req_params.merge({ @@ -350,15 +360,15 @@ }) end it 'configures identity_uri and auth_uri but deprecates old auth settings' do - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/identity_uri').with_value("https://localhost:35357/"); - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_uri').with_value("https://localhost:5000/v2.0/"); + is_expected.to contain_cinder_config('keystone_authtoken/identity_uri').with_value("https://localhost:35357/") + is_expected.to contain_cinder_config('keystone_authtoken/auth_uri').with_value("https://localhost:5000/v2.0/") is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_admin_prefix').with(:ensure => 'absent') - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_port').with(:ensure => 'absent') - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/service_port').with(:ensure => 'absent') - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_protocol').with(:ensure => 'absent') - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/service_protocol').with(:ensure => 'absent') - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/auth_host').with(:ensure => 'absent') - is_expected.to contain_cinder_api_paste_ini('filter:authtoken/service_host').with(:ensure => 'absent') + is_expected.to contain_cinder_config('keystone_authtoken/auth_port').with(:ensure => 'absent') + is_expected.not_to contain_cinder_config('keystone_authtoken/service_port').with(:ensure => 'absent') + is_expected.to contain_cinder_config('keystone_authtoken/auth_protocol').with(:ensure => 'absent') + is_expected.not_to contain_cinder_config('keystone_authtoken/service_protocol').with(:ensure => 'absent') + is_expected.to contain_cinder_config('keystone_authtoken/auth_host').with(:ensure => 'absent') + is_expected.not_to contain_cinder_config('keystone_authtoken/service_host').with(:ensure => 'absent') end end diff --git a/cinder/spec/classes/cinder_backup_swift_spec.rb b/cinder/spec/classes/cinder_backup_swift_spec.rb index f7ba02fa7..36b3b95aa 100644 --- a/cinder/spec/classes/cinder_backup_swift_spec.rb +++ b/cinder/spec/classes/cinder_backup_swift_spec.rb @@ -24,6 +24,7 @@ let :default_params do { :backup_swift_url => 'http://localhost:8080/v1/AUTH_', + :backup_swift_auth_url => 'http://127.0.0.1:5000/v2.0/', :backup_swift_container => 'volumes_backup', :backup_swift_object_size => '', :backup_swift_retry_attempts => '', @@ -43,6 +44,7 @@ it 'configures cinder.conf' do is_expected.to contain_cinder_config('DEFAULT/backup_driver').with_value('cinder.backup.drivers.swift') is_expected.to contain_cinder_config('DEFAULT/backup_swift_url').with_value(p[:backup_swift_url]) + is_expected.to contain_cinder_config('DEFAULT/backup_swift_auth_url').with_value(p[:backup_swift_auth_url]) is_expected.to contain_cinder_config('DEFAULT/backup_swift_container').with_value(p[:backup_swift_container]) is_expected.to contain_cinder_config('DEFAULT/backup_swift_object_size').with_value(p[:backup_swift_object_size]) is_expected.to contain_cinder_config('DEFAULT/backup_swift_retry_attempts').with_value(p[:backup_swift_retry_attempts]) @@ -53,6 +55,7 @@ context 'when overriding default parameters' do before :each do params.merge!(:backup_swift_url => 'https://controller2:8080/v1/AUTH_') + params.merge!(:backup_swift_auth_url => 'https://controller2:35357') params.merge!(:backup_swift_container => 'toto') params.merge!(:backup_swift_object_size => '123') params.merge!(:backup_swift_retry_attempts => '99') @@ -61,6 +64,7 @@ end it 'should replace default parameters with new values' do is_expected.to contain_cinder_config('DEFAULT/backup_swift_url').with_value(p[:backup_swift_url]) + is_expected.to contain_cinder_config('DEFAULT/backup_swift_auth_url').with_value(p[:backup_swift_auth_url]) is_expected.to contain_cinder_config('DEFAULT/backup_swift_container').with_value(p[:backup_swift_container]) is_expected.to contain_cinder_config('DEFAULT/backup_swift_object_size').with_value(p[:backup_swift_object_size]) is_expected.to contain_cinder_config('DEFAULT/backup_swift_retry_attempts').with_value(p[:backup_swift_retry_attempts]) diff --git a/cinder/spec/classes/cinder_db_spec.rb b/cinder/spec/classes/cinder_db_spec.rb index 5e1fa09cb..bbf53ec02 100644 --- a/cinder/spec/classes/cinder_db_spec.rb +++ b/cinder/spec/classes/cinder_db_spec.rb @@ -16,19 +16,18 @@ context 'with specific parameters' do let :params do - { :database_connection => 'mysql://cinder:cinder@localhost/cinder', + { :database_connection => 'mysql+pymysql://cinder:cinder@localhost/cinder', :database_idle_timeout => '3601', :database_min_pool_size => '2', :database_max_retries => '11', :database_retry_interval => '11', } end - it { is_expected.to contain_cinder_config('database/connection').with_value('mysql://cinder:cinder@localhost/cinder').with_secret(true) } + it { is_expected.to contain_cinder_config('database/connection').with_value('mysql+pymysql://cinder:cinder@localhost/cinder').with_secret(true) } it { is_expected.to contain_cinder_config('database/idle_timeout').with_value('3601') } it { is_expected.to contain_cinder_config('database/min_pool_size').with_value('2') } it { is_expected.to contain_cinder_config('database/max_retries').with_value('11') } it { is_expected.to contain_cinder_config('database/retry_interval').with_value('11') } - end context 'with postgresql backend' do @@ -42,6 +41,14 @@ end + context 'with MySQL-python library as backend package' do + let :params do + { :database_connection => 'mysql://cinder:cinder@localhost/cinder', } + end + + it { is_expected.to contain_package('python-mysqldb').with(:ensure => 'present') } + end + context 'with incorrect database_connection string' do let :params do { :database_connection => 'redis://cinder:cinder@localhost/cinder', } @@ -50,6 +57,14 @@ it_raises 'a Puppet::Error', /validate_re/ end + context 'with incorrect pymysql database_connection string' do + let :params do + { :database_connection => 'foo+pymysql://cinder:cinder@localhost/cinder', } + end + + it_raises 'a Puppet::Error', /validate_re/ + end + end context 'on Debian platforms' do @@ -62,6 +77,20 @@ end it_configures 'cinder::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://cinder:cinder@localhost/cinder', } + end + + it 'install the proper backend package' do + is_expected.to contain_package('cinder-backend-package').with( + :ensure => 'present', + :name => 'python-pymysql', + :tag => 'openstack' + ) + end + end end context 'on Redhat platforms' do @@ -73,6 +102,14 @@ end it_configures 'cinder::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://cinder:cinder@localhost/cinder', } + end + + it { is_expected.not_to contain_package('cinder-backend-package') } + end end end diff --git a/cinder/spec/classes/cinder_qpid_spec.rb b/cinder/spec/classes/cinder_qpid_spec.rb deleted file mode 100644 index 59dae1011..000000000 --- a/cinder/spec/classes/cinder_qpid_spec.rb +++ /dev/null @@ -1,51 +0,0 @@ -require 'spec_helper' - -describe 'cinder::qpid' do - - let :facts do - {:puppetversion => '2.7', - :osfamily => 'RedHat'} - end - - describe 'with defaults' do - - it 'should contain all of the default resources' do - - is_expected.to contain_class('qpid::server').with( - :service_ensure => 'running', - :port => '5672' - ) - - end - - it 'should contain user' do - - is_expected.to contain_qpid_user('guest').with( - :password => 'guest', - :file => '/var/lib/qpidd/qpidd.sasldb', - :realm => 'OPENSTACK', - :provider => 'saslpasswd2' - ) - - end - - end - - describe 'when disabled' do - let :params do - { - :enabled => false - } - end - - it 'should be disabled' do - - is_expected.to_not contain_qpid_user('guest') - is_expected.to contain_class('qpid::server').with( - :service_ensure => 'stopped' - ) - - end - end - -end diff --git a/cinder/spec/classes/cinder_spec.rb b/cinder/spec/classes/cinder_spec.rb index e4d52abba..ba7aaacc2 100644 --- a/cinder/spec/classes/cinder_spec.rb +++ b/cinder/spec/classes/cinder_spec.rb @@ -82,101 +82,6 @@ end end - describe 'with qpid rpc supplied' do - - let :params do - { - :database_connection => 'mysql://user:password@host/database', - :qpid_password => 'guest', - :rpc_backend => 'qpid' - } - end - - it { is_expected.to contain_cinder_config('DEFAULT/rpc_backend').with_value('qpid') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_port').with_value('5672') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_hosts').with_value('localhost:5672') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_username').with_value('guest') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_password').with_value('guest').with_secret(true) } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_reconnect').with_value(true) } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_reconnect_timeout').with_value('0') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_reconnect_limit').with_value('0') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_reconnect_interval_min').with_value('0') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_reconnect_interval_max').with_value('0') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_reconnect_interval').with_value('0') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_heartbeat').with_value('60') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') } - it { is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_tcp_nodelay').with_value(true) } - end - - describe 'with modified qpid_hosts' do - let :params do - { - :database_connection => 'mysql://user:password@host/database', - :qpid_password => 'guest', - :rpc_backend => 'qpid', - :qpid_hosts => ['qpid1:5672', 'qpid2:5672'] - } - end - - it 'should contain many' do - is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_hosts').with(:value => 'qpid1:5672,qpid2:5672') - end - end - - describe 'with a single qpid_hosts entry' do - let :params do - { - :database_connection => 'mysql://user:password@host/database', - :qpid_password => 'guest', - :rpc_backend => 'qpid', - :qpid_hosts => ['qpid1:5672'] - } - end - - it 'should contain one' do - is_expected.to contain_cinder_config('oslo_messaging_qpid/qpid_hosts').with(:value => 'qpid1:5672') - end - end - - describe 'with qpid rpc and no qpid_sasl_mechanisms' do - let :params do - { - :database_connection => 'mysql://user:password@host/database', - :qpid_password => 'guest', - :rpc_backend => 'qpid' - } - end - - it { is_expected.to contain_cinder_config('DEFAULT/qpid_sasl_mechanisms').with_ensure('absent') } - end - - describe 'with qpid rpc and qpid_sasl_mechanisms string' do - let :params do - { - :database_connection => 'mysql://user:password@host/database', - :qpid_password => 'guest', - :qpid_sasl_mechanisms => 'PLAIN', - :rpc_backend => 'qpid' - } - end - - it { is_expected.to contain_cinder_config('DEFAULT/qpid_sasl_mechanisms').with_value('PLAIN') } - end - - describe 'with qpid rpc and qpid_sasl_mechanisms array' do - let :params do - { - :database_connection => 'mysql://user:password@host/database', - :qpid_password => 'guest', - :qpid_sasl_mechanisms => [ 'DIGEST-MD5', 'GSSAPI', 'PLAIN' ], - :rpc_backend => 'qpid' - } - end - - it { is_expected.to contain_cinder_config('DEFAULT/qpid_sasl_mechanisms').with_value('DIGEST-MD5 GSSAPI PLAIN') } - end - describe 'with SSL enabled with kombu' do let :params do req_params.merge!({ diff --git a/cinder/spec/defines/cinder_type_spec.rb b/cinder/spec/defines/cinder_type_spec.rb index 6fb848459..67388a82e 100644 --- a/cinder/spec/defines/cinder_type_spec.rb +++ b/cinder/spec/defines/cinder_type_spec.rb @@ -24,7 +24,7 @@ 'OS_USERNAME=admin', 'OS_PASSWORD=asdf', 'OS_AUTH_URL=http://127.127.127.1:5000/v2.0/'], - :unless => "cinder type-list | grep -qP '\\bhippo\\b'", + :unless => "cinder type-list | grep -qP '\\shippo\\s'", :tries => '2', :try_sleep => '5', :require => 'Package[python-cinderclient]') diff --git a/contrail/README.md b/contrail/README.md index e56867435..b3da49dba 100644 --- a/contrail/README.md +++ b/contrail/README.md @@ -1,6 +1,6 @@ # Puppet-contrail -[![Build Status](https://travis-ci.org/enovance/puppet-contrail.png?branch=master)](https://travis-ci.org/enovance/puppet-contrail) +[![Build Status](https://travis-ci.org/redhat-cip/puppet-contrail.png?branch=master)](https://travis-ci.org/redhat-cip/puppet-contrail) [![Puppet Forge](http://img.shields.io/puppetforge/v/eNovance/contrail.svg)](https://forge.puppetlabs.com/eNovance/contrail) [![License](http://img.shields.io/:license-apache2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0.html) diff --git a/contrail/manifests/control/config.pp b/contrail/manifests/control/config.pp index c49b934d4..22013d28b 100644 --- a/contrail/manifests/control/config.pp +++ b/contrail/manifests/control/config.pp @@ -41,6 +41,16 @@ create_resources('contrail_control_config', $control_config) create_resources('contrail_control_nodemgr_config', $control_nodemgr_config) + if $forwarder { + if is_array($forwarder) { + $forwarders_option = join([join($forwarder, ';'),';'], '') + } else { + $forwarders_option = "${forwarder};" + } + } else { + $forwarders_option = '' + } + file { '/etc/contrail/dns/contrail-named.conf' : ensure => file, content => template('contrail/contrail-named.conf.erb'), diff --git a/contrail/metadata.json b/contrail/metadata.json index 5084ad44f..b43de8378 100644 --- a/contrail/metadata.json +++ b/contrail/metadata.json @@ -4,9 +4,9 @@ "author": "Yanis Guenane, Sebastien Badia, and eNovance Contributors", "summary": "Puppet module for Juniper OpenContrail", "license": "Apache-2.0", - "source": "git://github.com/enovance/puppet-{{cookiecutter.project_name}}.git", - "project_page": "https://github.com/enovance/puppet-contrail", - "issues_url": "https://github.com/enovance/puppet-contrail/issues", + "source": "git://github.com/redhat-cip/puppet-contrail.git", + "project_page": "https://github.com/redhat-cip/puppet-contrail", + "issues_url": "https://github.com/redhat-cip/puppet-contrail/issues", "description": "Installs and configures OpenContrail.", "operatingsystem_support": [ { diff --git a/contrail/templates/contrail-named.conf.erb b/contrail/templates/contrail-named.conf.erb index 4f065c553..bf6d70c08 100644 --- a/contrail/templates/contrail-named.conf.erb +++ b/contrail/templates/contrail-named.conf.erb @@ -35,5 +35,5 @@ view "_default_view_" { match-clients {any;}; match-destinations {any;}; match-recursive-only no; - forwarders {<%= @forwarder -%>; }; + forwarders { <%= @forwarders_option -%> }; }; diff --git a/elasticsearch/.fixtures.yml b/elasticsearch/.fixtures.yml index abc7962dc..d750f45cd 100644 --- a/elasticsearch/.fixtures.yml +++ b/elasticsearch/.fixtures.yml @@ -5,7 +5,7 @@ fixtures: branch: '4.7.x' apt: repo: 'https://github.com/puppetlabs/puppetlabs-apt.git' - branch: '1.8.x' + branch: '2.2.x' zypprepo: https://github.com/deadpoint/puppet-zypprepo.git yum: https://github.com/CERIT-SC/puppet-yum.git datacat: https://github.com/richardc/puppet-datacat.git diff --git a/elasticsearch/.pmtignore b/elasticsearch/.pmtignore index cc245cafa..b5e79edf0 100644 --- a/elasticsearch/.pmtignore +++ b/elasticsearch/.pmtignore @@ -1,2 +1,6 @@ spec/ Rakefile +junit/ +logs/ +Gemfile +Gemfile.lock diff --git a/elasticsearch/CHANGELOG.md b/elasticsearch/CHANGELOG.md index 3914fa878..3e1bcdb30 100644 --- a/elasticsearch/CHANGELOG.md +++ b/elasticsearch/CHANGELOG.md @@ -1,3 +1,30 @@ +##0.10.0 ( Dec 14, 2015 ) + +###Summary +Module now works with ES 2.x completely + +####Features +* Work with ES 2.x new plugin system and remain to work with 1.x +* Implemented datacat module from Richard Clamp so other modules can hook into it for adding configuration options +* Fixed init and systemd files to work with 1.x and 2.x +* Made the module work with newer pl-apt module versions +* Export es_include so it is passed on to ES +* Ability to supply long gpg key for apt repo + +####Bugfixes +* Documentation and typographical fixes +* Do not force puppet:/// schema resource +* Use package resource defaults rather than setting provider and source + +####Changes + +####Testing changes +* Improve unit testing and shorten the runtime + +####Known bugs +* Possible package conflicts when using ruby/python defines with main package name + + ##0.9.9 ( Sep 01, 2015 ) ###Summary diff --git a/elasticsearch/Gemfile b/elasticsearch/Gemfile index fdcc1a1d9..68c056e84 100644 --- a/elasticsearch/Gemfile +++ b/elasticsearch/Gemfile @@ -3,7 +3,7 @@ source 'https://rubygems.org' puppetversion = ENV['PUPPET_VERSION'] || '~> 3.8.0' gem 'puppet', puppetversion, :require => false -gem 'beaker', '2.27.0' +gem 'beaker' gem 'beaker-rspec' gem 'metadata-json-lint' gem 'rspec-puppet', '2.2.0' diff --git a/elasticsearch/README.md b/elasticsearch/README.md index 4f861f935..80737b620 100644 --- a/elasticsearch/README.md +++ b/elasticsearch/README.md @@ -1,7 +1,5 @@ #Elasticsearch Puppet module -[![Build Status](https://travis-ci.org/elastic/puppet-elasticsearch.png?branch=master)](https://travis-ci.org/elastic/puppet-elasticsearch) - ####Table of Contents 1. [Overview](#overview) @@ -25,7 +23,7 @@ This module manages Elasticsearch (http://www.elasticsearch.org/overview/elastic The elasticsearch module sets up Elasticsearch instances and can manage plugins and templates. -This module has been tested against all versions of ES 1.x (ES 1.0 - 1.7). **The module is not yet ready for ES 2.x, but it should be soon!** +This module has been tested against all versions of ES 1.x and 2.x ##Setup @@ -48,7 +46,7 @@ This module has been tested against all versions of ES 1.x (ES 1.0 - 1.7). **The #### Repository management When using the repository management you will need the following dependency modules: -* Debian/Ubuntu: [Puppetlabs/apt](http://forge.puppetlabs.com/puppetlabs/apt) Version 1.8.x or lower. +* Debian/Ubuntu: [Puppetlabs/apt](http://forge.puppetlabs.com/puppetlabs/apt) * OpenSuSE: [Darin/zypprepo](https://forge.puppetlabs.com/darin/zypprepo) ##Usage @@ -162,6 +160,11 @@ elasticsearch::plugin { 'elasticsearch/elasticsearch-cloud-aws/2.4.1': Please note that this does not work when you specify 'latest' as a version number. +####ES 2.x official plugins +For the Elasticsearch commercial plugins you can refer them to the simple name. + +See the [Plugin installation](https://www.elastic.co/guide/en/elasticsearch/plugins/current/installation.html) for more details. + ###Scripts Install [scripts](http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html) to be used by Elasticsearch. diff --git a/elasticsearch/lib/puppet/provider/elasticsearch_plugin/plugin.rb b/elasticsearch/lib/puppet/provider/elasticsearch_plugin/plugin.rb new file mode 100644 index 000000000..4c8bb2913 --- /dev/null +++ b/elasticsearch/lib/puppet/provider/elasticsearch_plugin/plugin.rb @@ -0,0 +1,130 @@ +$LOAD_PATH.unshift(File.join(File.dirname(__FILE__),"..","..","..")) + +Puppet::Type.type(:elasticsearch_plugin).provide(:plugin) do + desc "A provider for the resource type `elasticsearch_plugin`, + which handles plugin installation" + + commands :plugin => '/usr/share/elasticsearch/bin/plugin' + commands :es => '/usr/share/elasticsearch/bin/elasticsearch' + + def exists? + es_version + if !File.exists?(pluginfile) + debug "Plugin file #{pluginfile} does not exist" + return false + elsif File.exists?(pluginfile) && readpluginfile != pluginfile_content + debug "Got #{readpluginfile} Expected #{pluginfile_content}. Removing for reinstall" + self.destroy + return false + else + debug "Plugin exists" + return true + end + end + + def pluginfile_content + return @resource[:name] if is1x? + + if @resource[:name].split("/").count == 1 # Official plugin + version = plugin_version(@resource[:name]) + return "#{@resource[:name]}/#{version}" + else + return @resource[:name] + end + end + + def pluginfile + File.join(@resource[:plugin_dir], plugin_name(@resource[:name]), '.name') + end + + def writepluginfile + File.open(pluginfile, 'w') do |file| + file.write pluginfile_content + end + end + + def readpluginfile + f = File.open(pluginfile) + f.readline + end + + def install1x + if !@resource[:url].nil? + commands = [ plugin_name(@resource[:name]), '--url', @resource[:url] ] + elsif !@resource[:source].nil? + commands = [ plugin_name(@resource[:name]), '--url', "file://#{@resource[:source]}" ] + else + commands = [ @resource[:name] ] + end + commands + end + + def install2x + if !@resource[:url].nil? + commands = [ @resource[:url] ] + elsif !@resource[:source].nil? + commands = [ "file://#{@resource[:source]}" ] + else + commands = [ @resource[:name] ] + end + commands + end + + def create + es_version + commands = [] + commands << @resource[:proxy_args] if @resource[:proxy_args] + commands << 'install' + commands << install1x if is1x? + commands << install2x if is2x? + + plugin(commands) + writepluginfile + end + + def destroy + plugin(['remove', @resource[:name]]) + end + + def es_version + return @es_version if @es_version + begin + version = es('-v') # ES 1.x + rescue + version = es('--version') # ES 2.x + rescue + raise "Unknown ES version. Got #{version.inspect}" + ensure + @es_version = version.scan(/\d+\.\d+\.\d+(?:\-\S+)?/).first + debug "Found ES version #{@es_version}" + end + end + + def is1x? + Puppet::Util::Package.versioncmp(@es_version, '2.0.0') < 0 + end + + def is2x? + (Puppet::Util::Package.versioncmp(@es_version, '2.0.0') >= 0) && (Puppet::Util::Package.versioncmp(@es_version, '3.0.0') < 0) + end + + def plugin_version(plugin_name) + vendor, plugin, version = plugin_name.split('/') + return @es_version if is2x? && version.nil? + return version.scan(/\d+\.\d+\.\d+(?:\-\S+)?/).first unless version.nil? + return false + end + + def plugin_name(plugin_name) + + vendor, plugin, version = plugin_name.split('/') + + endname = vendor if plugin.nil? # If its a single name plugin like the ES 2.x official plugins + endname = plugin.gsub(/(elasticsearch-|es-)/, '') unless plugin.nil? + + return endname.downcase if is2x? + return endname + + end + +end diff --git a/elasticsearch/lib/puppet/type/elasticsearch_plugin.rb b/elasticsearch/lib/puppet/type/elasticsearch_plugin.rb new file mode 100644 index 000000000..145880f28 --- /dev/null +++ b/elasticsearch/lib/puppet/type/elasticsearch_plugin.rb @@ -0,0 +1,31 @@ +Puppet::Type.newtype(:elasticsearch_plugin) do + + @doc = "Plugin installation type" + + ensurable do + defaultvalues + defaultto :present + end + + newparam(:name, :namevar => true) do + desc 'An arbitrary name used as the identity of the resource.' + end + + newparam(:url) do + desc 'Url of the package' + end + + newparam(:source) do + desc 'Source of the package. puppet:// or file:// resource' + end + + newparam(:proxy_args) do + desc 'Proxy Host' + end + + newparam(:plugin_dir) do + desc 'Plugin directory' + defaultto '/usr/share/elasticsearch/plugins' + end + +end diff --git a/elasticsearch/manifests/config.pp b/elasticsearch/manifests/config.pp index 405c65341..a7b5d830b 100644 --- a/elasticsearch/manifests/config.pp +++ b/elasticsearch/manifests/config.pp @@ -65,11 +65,6 @@ mode => '0755', } - file { $elasticsearch::plugindir: - ensure => 'directory', - recurse => true, - } - file { $elasticsearch::datadir: ensure => 'directory', } @@ -100,6 +95,7 @@ } } + file { "${elasticsearch::params::homedir}/templates_import": ensure => 'directory', mode => '0644', @@ -133,7 +129,13 @@ } } elsif ( $elasticsearch::ensure == 'absent' ) { - # don't remove anything for now + + file { $elasticsearch::plugindir: + ensure => 'absent', + force => true, + backup => false, + } + } } diff --git a/elasticsearch/manifests/init.pp b/elasticsearch/manifests/init.pp index 724386144..768cf4c6f 100644 --- a/elasticsearch/manifests/init.pp +++ b/elasticsearch/manifests/init.pp @@ -93,7 +93,7 @@ # # [*proxy_url*] # For http and https downloads you can set a proxy server to use -# Format: proto://[user:pass@]server[:port]/ +# Format: proto://[user:pass@]server[:port]/ # Defaults to: undef (proxy disabled) # # [*elasticsearch_user*] @@ -136,6 +136,14 @@ # [*repo_version*] # Our repositories are versioned per major version (0.90, 1.0) select here which version you want # +# [*repo_key_id*] +# String. The apt GPG key id +# Default: D88E42B4 +# +# [*repo_key_source*] +# String. URL of the apt GPG key +# Default: http://packages.elastic.co/GPG-KEY-elasticsearch +# # [*logging_config*] # Hash representation of information you want in the logging.yml file # @@ -228,6 +236,8 @@ $java_package = undef, $manage_repo = false, $repo_version = undef, + $repo_key_id = 'D88E42B4', + $repo_key_source = 'http://packages.elastic.co/GPG-KEY-elasticsearch', $logging_file = undef, $logging_config = undef, $logging_template = undef, @@ -302,13 +312,13 @@ case $::osfamily { 'RedHat', 'Linux', 'Suse': { if ($version =~ /.+-\d/) { - $real_version = $version + $pkg_version = $version } else { - $real_version = "${version}-1" + $pkg_version = "${version}-1" } } default: { - $real_version = $version + $pkg_version = $version } } } @@ -398,6 +408,7 @@ Anchor['elasticsearch::begin'] -> Class['elasticsearch::package'] -> Class['elasticsearch::config'] + -> Elasticsearch::Plugin <| |> -> Elasticsearch::Instance <| |> -> Elasticsearch::Template <| |> diff --git a/elasticsearch/manifests/instance.pp b/elasticsearch/manifests/instance.pp index 75380a1d5..f1e14fcdc 100644 --- a/elasticsearch/manifests/instance.pp +++ b/elasticsearch/manifests/instance.pp @@ -211,6 +211,7 @@ owner => $elasticsearch::elasticsearch_user, group => undef, mode => '0644', + recurse => true, require => [ Exec["mkdir_datadir_elasticsearch_${name}"], Class['elasticsearch::package'] ], before => Elasticsearch::Service[$name], } diff --git a/elasticsearch/manifests/package.pp b/elasticsearch/manifests/package.pp index f53d9ef3a..5e3caa9f5 100644 --- a/elasticsearch/manifests/package.pp +++ b/elasticsearch/manifests/package.pp @@ -33,9 +33,27 @@ #### Package management + # set params: in operation if $elasticsearch::ensure == 'present' { + # Create directory to place the package file + exec { 'create_package_dir_elasticsearch': + cwd => '/', + path => ['/usr/bin', '/bin'], + command => "mkdir -p ${elasticsearch::package_dir}", + creates => $elasticsearch::package_dir, + } + + file { $elasticsearch::package_dir: + ensure => 'directory', + purge => $elasticsearch::purge_package_dir, + force => $elasticsearch::purge_package_dir, + backup => false, + require => Exec['create_package_dir_elasticsearch'], + } + + # Check if we want to install a specific version or not if $elasticsearch::version == false { @@ -47,7 +65,7 @@ } else { # install specific version - $package_ensure = $elasticsearch::real_version + $package_ensure = $elasticsearch::pkg_version } @@ -61,22 +79,6 @@ $package_dir = $elasticsearch::package_dir - # Create directory to place the package file - exec { 'create_package_dir_elasticsearch': - cwd => '/', - path => ['/usr/bin', '/bin'], - command => "mkdir -p ${elasticsearch::package_dir}", - creates => $elasticsearch::package_dir, - } - - file { $package_dir: - ensure => 'directory', - purge => $elasticsearch::purge_package_dir, - force => $elasticsearch::purge_package_dir, - backup => false, - require => Exec['create_package_dir_elasticsearch'], - } - $filenameArray = split($elasticsearch::package_url, '/') $basefilename = $filenameArray[-1] @@ -160,15 +162,6 @@ } $package_ensure = 'purged' - $package_dir = $elasticsearch::package_dir - - file { $package_dir: - ensure => 'absent', - purge => true, - force => true, - backup => false, - } - } if ($elasticsearch::package_provider == 'package') { diff --git a/elasticsearch/manifests/plugin.pp b/elasticsearch/manifests/plugin.pp index 47ce52419..b515a4906 100644 --- a/elasticsearch/manifests/plugin.pp +++ b/elasticsearch/manifests/plugin.pp @@ -79,25 +79,13 @@ include elasticsearch - Exec { - path => [ '/bin', '/usr/bin', '/usr/sbin', '/usr/local/bin', '/usr/local/sbin' ], - cwd => '/', - user => $elasticsearch::elasticsearch_user, - tries => 6, - try_sleep => 10, - timeout => 600, - } - $notify_service = $elasticsearch::restart_on_change ? { false => undef, default => Elasticsearch::Service[$instances], } if ($module_dir != undef) { - warning("module_dir settings is deprecated for plugin ${name}. The directory is now auto detected.") - $plugin_dir = $module_dir - } else { - $plugin_dir = plugin_dir($name) + warning("module_dir settings is deprecated for plugin ${name}. The directory is now auto detected.") } # set proxy by override or parse and use proxy_url from @@ -116,7 +104,7 @@ } } else { - $proxy = '' # lint:ignore:empty_string_assignment + $proxy = undef } if ($source != undef) { @@ -124,53 +112,32 @@ $filenameArray = split($source, '/') $basefilename = $filenameArray[-1] - file { "/tmp/${basefilename}": + $file_source = "${elasticsearch::package_dir}/${basefilename}" + + file { $file_source: ensure => 'file', source => $source, } - $real_url = "file:///tmp/${basefilename}" } elsif ($url != undef) { validate_string($url) - $real_url = $url - } else { - $real_url = undef - } - - if ($real_url == undef) { - $install_cmd = "${elasticsearch::plugintool}${proxy} install ${name}" - $exec_rets = [0,] - } else { - $install_cmd = "${elasticsearch::plugintool}${proxy} install ${name} --url ${real_url}" - $exec_rets = [0,1] } case $ensure { 'installed', 'present': { - $name_file_path = "${elasticsearch::plugindir}/${plugin_dir}/.name" - exec {"purge_plugin_${plugin_dir}_old": - command => "${elasticsearch::plugintool} --remove ${plugin_dir}", - onlyif => "test -e ${elasticsearch::plugindir}/${plugin_dir} && test \"$(cat ${name_file_path})\" != '${name}'", - before => Exec["install_plugin_${name}"], - } - exec {"install_plugin_${name}": - command => $install_cmd, - creates => "${elasticsearch::plugindir}/${plugin_dir}", - returns => $exec_rets, - notify => $notify_service, - require => File[$elasticsearch::plugindir], - } - file {$name_file_path: - ensure => file, - content => "${name}", # lint:ignore:only_variable_string - require => Exec["install_plugin_${name}"], + + elasticsearch_plugin { $name: + ensure => 'present', + source => $file_source, + url => $url, + proxy_args => $proxy, + notify => $notify_service, } + } 'absent': { - exec {"remove_plugin_${name}": - command => "${elasticsearch::plugintool} --remove ${plugin_dir}", - onlyif => "test -d ${elasticsearch::plugindir}/${plugin_dir}", - notify => $notify_service, + elasticsearch_plugin { $name: + ensure => absent, } } default: { diff --git a/elasticsearch/manifests/repo.pp b/elasticsearch/manifests/repo.pp index e2ab6ccb3..36cd549ef 100644 --- a/elasticsearch/manifests/repo.pp +++ b/elasticsearch/manifests/repo.pp @@ -39,8 +39,8 @@ location => "http://packages.elastic.co/elasticsearch/${elasticsearch::repo_version}/debian", release => 'stable', repos => 'main', - key => 'D88E42B4', - key_source => 'http://packages.elastic.co/GPG-KEY-elasticsearch', + key => $::elasticsearch::repo_key_id, + key_source => $::elasticsearch::repo_key_source, include_src => false, } } @@ -49,14 +49,14 @@ descr => 'elasticsearch repo', baseurl => "http://packages.elastic.co/elasticsearch/${elasticsearch::repo_version}/centos", gpgcheck => 1, - gpgkey => 'http://packages.elastic.co/GPG-KEY-elasticsearch', + gpgkey => $::elasticsearch::repo_key_source, enabled => 1, } } 'Suse': { exec { 'elasticsearch_suse_import_gpg': - command => 'rpmkeys --import http://packages.elastic.co/GPG-KEY-elasticsearch', - unless => 'test $(rpm -qa gpg-pubkey | grep -i "D88E42B4" | wc -l) -eq 1 ', + command => "rpmkeys --import ${::elasticsearch::repo_key_source}", + unless => "test $(rpm -qa gpg-pubkey | grep -i '${::elasticsearch::repo_key_id}' | wc -l) -eq 1 ", notify => [ Zypprepo['elasticsearch'] ], } @@ -66,7 +66,7 @@ autorefresh => 1, name => 'elasticsearch', gpgcheck => 1, - gpgkey => 'http://packages.elastic.co/GPG-KEY-elasticsearch', + gpgkey => $::elasticsearch::repo_key_source, type => 'yum', } } @@ -76,30 +76,31 @@ } # Package pinning - if ($elasticsearch::package_pin == true and $elasticsearch::version != false) { + case $::osfamily { 'Debian': { - if !defined(Class['apt']) { - class { 'apt': } - } + include ::apt - apt::pin { $elasticsearch::package_name: - ensure => 'present', - packages => $elasticsearch::package_name, - version => $elasticsearch::real_version, - priority => 1000, + if ($elasticsearch::package_pin == true and $elasticsearch::version != false) { + apt::pin { $elasticsearch::package_name: + ensure => 'present', + packages => $elasticsearch::package_name, + version => $elasticsearch::version, + priority => 1000, + } } + } 'RedHat', 'Linux': { - yum::versionlock { "0:elasticsearch-${elasticsearch::real_version}.noarch": - ensure => 'present', + if ($elasticsearch::package_pin == true and $elasticsearch::version != false) { + yum::versionlock { "0:elasticsearch-${elasticsearch::pkg_version}.noarch": + ensure => 'present', + } } } default: { warning("Unable to pin package for OSfamily \"${::osfamily}\".") } } - } - } diff --git a/elasticsearch/metadata.json b/elasticsearch/metadata.json index a13025ff4..0b76368e4 100644 --- a/elasticsearch/metadata.json +++ b/elasticsearch/metadata.json @@ -1,6 +1,6 @@ { "name": "elasticsearch-elasticsearch", - "version": "0.9.9", + "version": "0.10.0", "source": "https://github.com/elastic/puppet-elasticsearch", "author": "elasticsearch", "license": "Apache-2.0", diff --git a/elasticsearch/spec/acceptance/004_plugin_spec.rb b/elasticsearch/spec/acceptance/004_plugin_spec.rb index 19d79ab3d..be5f33387 100644 --- a/elasticsearch/spec/acceptance/004_plugin_spec.rb +++ b/elasticsearch/spec/acceptance/004_plugin_spec.rb @@ -251,4 +251,49 @@ end + describe "install via url" do + it 'Should run succesful' do + pp = "class { 'elasticsearch': config => { 'node.name' => 'elasticsearch001', 'cluster.name' => '#{test_settings['cluster_name']}' }, manage_repo => true, repo_version => '#{test_settings['repo_version']}', java_install => true } + elasticsearch::instance { 'es-01': config => { 'node.name' => 'elasticsearch001', 'http.port' => '#{test_settings['port_a']}' } } + elasticsearch::plugin{'HQ': url => 'https://github.com/royrusso/elasticsearch-HQ/archive/v2.0.3.zip', instances => 'es-01' } + " + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + + end + + it 'make sure elasticsearch reports it as existing' do + curl_with_retries('validated plugin as installed', default, "http://localhost:#{test_settings['port_a']}/_nodes/?plugin | grep HQ", 0) + end + + end + + describe "module removal" do + + it 'should run successfully' do + pp = "class { 'elasticsearch': ensure => 'absent' } + elasticsearch::instance{ 'es-01': ensure => 'absent' } + " + + apply_manifest(pp, :catch_failures => true) + end + + describe file('/etc/elasticsearch/es-01') do + it { should_not be_directory } + end + + describe package(test_settings['package_name']) do + it { should_not be_installed } + end + + describe service(test_settings['service_name_a']) do + it { should_not be_enabled } + it { should_not be_running } + end + + end + + end diff --git a/elasticsearch/spec/acceptance/005_datapath_spec.rb b/elasticsearch/spec/acceptance/009_datapath_spec.rb similarity index 100% rename from elasticsearch/spec/acceptance/005_datapath_spec.rb rename to elasticsearch/spec/acceptance/009_datapath_spec.rb diff --git a/elasticsearch/spec/acceptance/021_es2x_spec.rb b/elasticsearch/spec/acceptance/021_es2x_spec.rb new file mode 100644 index 000000000..f119facaf --- /dev/null +++ b/elasticsearch/spec/acceptance/021_es2x_spec.rb @@ -0,0 +1,351 @@ +require 'spec_helper_acceptance' + +describe "elasticsearch 2x:" do + + shell("mkdir -p #{default['distmoduledir']}/another/files") + shell("cp /tmp/elasticsearch-kopf.zip #{default['distmoduledir']}/another/files/elasticsearch-kopf.zip") + + describe "Install a plugin from official repository" do + + it 'should run successfully' do + pp = "class { 'elasticsearch': config => { 'node.name' => 'elasticsearch001', 'cluster.name' => '#{test_settings['cluster_name']}' }, manage_repo => true, repo_version => '#{test_settings['repo_version2x']}', java_install => true, version => '2.0.0' } + elasticsearch::instance { 'es-01': config => { 'node.name' => 'elasticsearch001', 'http.port' => '#{test_settings['port_a']}' } } + elasticsearch::plugin{'lmenezes/elasticsearch-kopf': instances => 'es-01' } + " + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + end + + describe service(test_settings['service_name_a']) do + it { should be_enabled } + it { should be_running } + end + + describe package(test_settings['package_name']) do + it { should be_installed } + end + + describe file(test_settings['pid_file_a']) do + it { should be_file } + its(:content) { should match /[0-9]+/ } + end + + it 'make sure the directory exists' do + shell('ls /usr/share/elasticsearch/plugins/kopf/', {:acceptable_exit_codes => 0}) + end + + it 'make sure elasticsearch reports it as existing' do + curl_with_retries('validated plugin as installed', default, "http://localhost:#{test_settings['port_a']}/_nodes/?plugin | grep kopf", 0) + end + + end + describe "Install a plugin from custom git repo" do + it 'should run successfully' do + end + + it 'make sure the directory exists' do + end + + it 'make sure elasticsearch reports it as existing' do + end + + end + + if fact('puppetversion') =~ /3\.[2-9]\./ + + describe "Install a non existing plugin" do + + it 'should run successfully' do + pp = "class { 'elasticsearch': config => { 'node.name' => 'elasticearch001', 'cluster.name' => '#{test_settings['cluster_name']}' }, manage_repo => true, repo_version => '#{test_settings['repo_version2x']}', java_install => true, version => '2.0.0' } + elasticsearch::instance { 'es-01': config => { 'node.name' => 'elasticsearch001', 'http.port' => '#{test_settings['port_a']}' } } + elasticsearch::plugin{'elasticsearch/non-existing': module_dir => 'non-existing', instances => 'es-01' } + " + # Run it twice and test for idempotency + apply_manifest(pp, :expect_failures => true) + end + + end + + else + # The exit codes have changes since Puppet 3.2x + # Since beaker expectations are based on the most recent puppet code All runs on previous versions fails. + end + + describe "module removal" do + + it 'should run successfully' do + pp = "class { 'elasticsearch': ensure => 'absent' } + elasticsearch::instance{ 'es-01': ensure => 'absent' } + " + + apply_manifest(pp, :catch_failures => true) + end + + describe file('/etc/elasticsearch/es-01') do + it { should_not be_directory } + end + + describe package(test_settings['package_name']) do + it { should_not be_installed } + end + + describe service(test_settings['service_name_a']) do + it { should_not be_enabled } + it { should_not be_running } + end + + end + + + describe "install plugin while running ES under user 'elasticsearch'" do + + it 'should run successfully' do + pp = "class { 'elasticsearch': config => { 'node.name' => 'elasticsearch001', 'cluster.name' => '#{test_settings['cluster_name']}' }, manage_repo => true, repo_version => '#{test_settings['repo_version2x']}', java_install => true, elasticsearch_user => 'elasticsearch', elasticsearch_group => 'elasticsearch', version => '2.0.0' } + elasticsearch::instance { 'es-01': config => { 'node.name' => 'elasticsearch001', 'http.port' => '#{test_settings['port_a']}' } } + elasticsearch::plugin{'lmenezes/elasticsearch-kopf': module_dir => 'kopf', instances => 'es-01' } + " + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + end + + describe service(test_settings['service_name_a']) do + it { should be_enabled } + it { should be_running } + end + + describe package(test_settings['package_name']) do + it { should be_installed } + end + + describe file(test_settings['pid_file_a']) do + it { should be_file } + its(:content) { should match /[0-9]+/ } + end + + it 'make sure the directory exists' do + shell('ls /usr/share/elasticsearch/plugins/kopf/', {:acceptable_exit_codes => 0}) + end + + it 'make sure elasticsearch reports it as existing' do + curl_with_retries('validated plugin as installed', default, "http://localhost:#{test_settings['port_a']}/_nodes/?plugin | grep kopf", 0) + end + + end + + + describe "module removal" do + + it 'should run successfully' do + pp = "class { 'elasticsearch': ensure => 'absent' } + elasticsearch::instance{ 'es-01': ensure => 'absent' } + " + + apply_manifest(pp, :catch_failures => true) + end + + describe file('/etc/elasticsearch/es-01') do + it { should_not be_directory } + end + + describe package(test_settings['package_name']) do + it { should_not be_installed } + end + + describe service(test_settings['service_name_a']) do + it { should_not be_enabled } + it { should_not be_running } + end + + end + + describe 'upgrading', :upgrade => true do + + describe 'Setup 2.0.0' do + it 'should run successful' do + pp = "class { 'elasticsearch': config => { 'node.name' => 'elasticsearch001', 'cluster.name' => '#{test_settings['cluster_name']}' }, manage_repo => true, repo_version => '#{test_settings['repo_version2x']}', java_install => true, version => '2.0.0' } + elasticsearch::instance { 'es-01': config => { 'node.name' => 'elasticsearch001', 'http.port' => '#{test_settings['port_a']}' } } + elasticsearch::plugin{'cloud-aws': instances => 'es-01' } + " + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + + end + + it 'make sure the directory exists' do + shell('ls /usr/share/elasticsearch/plugins/cloud-aws/', {:acceptable_exit_codes => 0}) + end + + it 'make sure elasticsearch reports it as existing' do + curl_with_retries('validated plugin as installed', default, "http://localhost:#{test_settings['port_a']}/_nodes/?plugin | grep cloud-aws | grep 2.0.0", 0) + end + + end + + describe "Upgrade to 2.0.1" do + it 'Should run succesful' do + pp = "class { 'elasticsearch': config => { 'node.name' => 'elasticsearch001', 'cluster.name' => '#{test_settings['cluster_name']}' }, manage_repo => true, repo_version => '#{test_settings['repo_version2x']}', java_install => true, version => '2.0.1' } + elasticsearch::instance { 'es-01': config => { 'node.name' => 'elasticsearch001', 'http.port' => '#{test_settings['port_a']}' } } + elasticsearch::plugin{'cloud-aws': instances => 'es-01' } + " + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + + end + + it 'make sure the directory exists' do + shell('ls /usr/share/elasticsearch/plugins/cloud-aws/', {:acceptable_exit_codes => 0}) + end + + it 'make sure elasticsearch reports it as existing' do + curl_with_retries('validated plugin as installed', default, "http://localhost:#{test_settings['port_a']}/_nodes/?plugin | grep cloud-aws | grep 2.0.1", 0) + end + end + + end + + describe "offline install via puppet resource" do + it 'Should run succesful' do + pp = "class { 'elasticsearch': config => { 'node.name' => 'elasticsearch001', 'cluster.name' => '#{test_settings['cluster_name']}' }, manage_repo => true, repo_version => '#{test_settings['repo_version2x']}', java_install => true } + elasticsearch::instance { 'es-01': config => { 'node.name' => 'elasticsearch001', 'http.port' => '#{test_settings['port_a']}' } } + elasticsearch::plugin{'kopf': source => 'puppet:///modules/another/elasticsearch-kopf.zip', instances => 'es-01' } + " + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + + end + + it 'make sure elasticsearch reports it as existing' do + curl_with_retries('validated plugin as installed', default, "http://localhost:#{test_settings['port_a']}/_nodes/?plugin | grep kopf", 0) + end + + end + + describe "module removal" do + + it 'should run successfully' do + pp = "class { 'elasticsearch': ensure => 'absent' } + elasticsearch::instance{ 'es-01': ensure => 'absent' } + file { '/usr/share/elasticsearch/plugin': + ensure => 'absent', + force => true, + recurse => true, + } + " + + apply_manifest(pp, :catch_failures => true) + end + + describe file('/etc/elasticsearch/es-01') do + it { should_not be_directory } + end + + describe package(test_settings['package_name']) do + it { should_not be_installed } + end + + describe service(test_settings['service_name_a']) do + it { should_not be_enabled } + it { should_not be_running } + end + + end + + describe "offline install via file url" do + it 'Should run succesful' do + pp = "class { 'elasticsearch': config => { 'node.name' => 'elasticsearch001', 'cluster.name' => '#{test_settings['cluster_name']}' }, manage_repo => true, repo_version => '#{test_settings['repo_version2x']}', java_install => true } + elasticsearch::instance { 'es-01': config => { 'node.name' => 'elasticsearch001', 'http.port' => '#{test_settings['port_a']}' } } + elasticsearch::plugin{'kopf': url => 'file:///tmp/elasticsearch-kopf.zip', instances => 'es-01' } + " + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + + end + + it 'make sure elasticsearch reports it as existing' do + curl_with_retries('validated plugin as installed', default, "http://localhost:#{test_settings['port_a']}/_nodes/?plugin | grep kopf", 0) + end + + end + + describe "module removal" do + + it 'should run successfully' do + pp = "class { 'elasticsearch': ensure => 'absent' } + elasticsearch::instance{ 'es-01': ensure => 'absent' } + " + + apply_manifest(pp, :catch_failures => true) + end + + describe file('/etc/elasticsearch/es-01') do + it { should_not be_directory } + end + + describe package(test_settings['package_name']) do + it { should_not be_installed } + end + + describe service(test_settings['service_name_a']) do + it { should_not be_enabled } + it { should_not be_running } + end + + end + + describe "install via url" do + it 'Should run succesful' do + pp = "class { 'elasticsearch': config => { 'node.name' => 'elasticsearch001', 'cluster.name' => '#{test_settings['cluster_name']}' }, manage_repo => true, repo_version => '#{test_settings['repo_version2x']}', java_install => true } + elasticsearch::instance { 'es-01': config => { 'node.name' => 'elasticsearch001', 'http.port' => '#{test_settings['port_a']}' } } + elasticsearch::plugin{'HQ': url => 'https://github.com/royrusso/elasticsearch-HQ/archive/v2.0.3.zip', instances => 'es-01' } + " + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + + end + + it 'make sure elasticsearch reports it as existing' do + curl_with_retries('validated plugin as installed', default, "http://localhost:#{test_settings['port_a']}/_nodes/?plugin | grep hq", 0) + end + + end + + describe "module removal" do + + it 'should run successfully' do + pp = "class { 'elasticsearch': ensure => 'absent' } + elasticsearch::instance{ 'es-01': ensure => 'absent' } + " + + apply_manifest(pp, :catch_failures => true) + end + + describe file('/etc/elasticsearch/es-01') do + it { should_not be_directory } + end + + describe package(test_settings['package_name']) do + it { should_not be_installed } + end + + describe service(test_settings['service_name_a']) do + it { should_not be_enabled } + it { should_not be_running } + end + + end + +end diff --git a/elasticsearch/spec/acceptance/xplugins001.rb b/elasticsearch/spec/acceptance/xplugins001.rb new file mode 100644 index 000000000..8618101b3 --- /dev/null +++ b/elasticsearch/spec/acceptance/xplugins001.rb @@ -0,0 +1,91 @@ +require 'spec_helper_acceptance' + +describe "Integration testing" do + + describe "Setup Elasticsearch" do + + it 'should run successfully' do + pp = "class { 'elasticsearch': config => { 'cluster.name' => '#{test_settings['cluster_name']}'}, java_install => true, package_url => '#{test_settings['snapshot_package']}' } + elasticsearch::instance { 'es-01': config => { 'node.name' => 'elasticsearch001', 'http.port' => '#{test_settings['port_a']}' } } + " + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + end + + + describe service(test_settings['service_name_a']) do + it { should be_enabled } + it { should be_running } + end + + describe package(test_settings['package_name']) do + it { should be_installed } + end + + describe file(test_settings['pid_file_a']) do + it { should be_file } + its(:content) { should match /[0-9]+/ } + end + + describe "Elasticsearch serves requests on" do + it { + curl_with_retries("check ES on #{test_settings['port_a']}", default, "http://localhost:#{test_settings['port_a']}/?pretty=true", 0) + } + end + + describe file('/etc/elasticsearch/es-01/elasticsearch.yml') do + it { should be_file } + it { should contain 'name: elasticsearch001' } + end + + describe file('/usr/share/elasticsearch/templates_import') do + it { should be_directory } + end + + end + + describe "Plugin tests" do + + describe "Install a plugin from official repository" do + + it 'should run successfully' do + pp = "class { 'elasticsearch': config => { 'cluster.name' => '#{test_settings['cluster_name']}'}, java_install => true, package_url => '#{test_settings['snapshot_package']}' } + elasticsearch::instance { 'es-01': config => { 'node.name' => 'elasticsearch001', 'http.port' => '#{test_settings['port_a']}' } } + elasticsearch::plugin{'#{ENV['LICENSE_PLUGIN_NAME']}': instances => 'es-01', url => '#{ENV['LICENSE_PLUGIN_URL']}' } + elasticsearch::plugin{'#{ENV['PLUGIN_NAME']}': instances => 'es-01', url => '#{ENV['PLUGIN_URL']}' } + " + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + end + + describe service(test_settings['service_name_a']) do + it { should be_enabled } + it { should be_running } + end + + describe package(test_settings['package_name']) do + it { should be_installed } + end + + describe file(test_settings['pid_file_a']) do + it { should be_file } + its(:content) { should match /[0-9]+/ } + end + + it 'make sure the directory exists' do + shell("ls /usr/share/elasticsearch/plugins/#{ENV['PLUGIN_NAME']}", {:acceptable_exit_codes => 0}) + end + + it 'make sure elasticsearch reports it as existing' do + curl_with_retries('validated plugin as installed', default, "http://localhost:#{test_settings['port_a']}/_nodes/?plugin | grep #{ENV['PLUGIN_NAME']}", 0) + end + + end + + end + +end diff --git a/elasticsearch/spec/classes/000_elasticsearch_init_spec.rb b/elasticsearch/spec/classes/000_elasticsearch_init_spec.rb index a0c16492c..019ebc18d 100644 --- a/elasticsearch/spec/classes/000_elasticsearch_init_spec.rb +++ b/elasticsearch/spec/classes/000_elasticsearch_init_spec.rb @@ -62,7 +62,7 @@ it { should contain_file('/usr/share/elasticsearch/scripts') } it { should contain_file('/usr/share/elasticsearch') } it { should contain_file('/usr/share/elasticsearch/lib') } - it { should contain_file('/usr/share/elasticsearch/plugins') } + # it { should contain_file('/usr/share/elasticsearch/plugins') } it { should contain_file('/usr/share/elasticsearch/bin').with(:mode => '0755') } it { should contain_augeas("#{defaults_path}/elasticsearch") } @@ -245,6 +245,7 @@ } it { should contain_package('elasticsearch').with(:ensure => 'purged') } + it { should contain_file('/usr/share/elasticsearch/plugins').with(:ensure => 'absent') } end @@ -293,7 +294,7 @@ it { should contain_file('/etc/elasticsearch').with(:owner => 'myesuser', :group => 'myesgroup') } it { should contain_file('/var/log/elasticsearch').with(:owner => 'myesuser') } it { should contain_file('/usr/share/elasticsearch').with(:owner => 'myesuser', :group => 'myesgroup') } - it { should contain_file('/usr/share/elasticsearch/plugins').with(:owner => 'myesuser', :group => 'myesgroup') } + # it { should contain_file('/usr/share/elasticsearch/plugins').with(:owner => 'myesuser', :group => 'myesgroup') } it { should contain_file('/usr/share/elasticsearch/data').with(:owner => 'myesuser', :group => 'myesgroup') } it { should contain_file('/var/run/elasticsearch').with(:owner => 'myesuser') } if facts[:osfamily] == 'RedHat' end diff --git a/elasticsearch/spec/classes/001_hiera_spec.rb b/elasticsearch/spec/classes/001_hiera_spec.rb index 181907cb6..00025c9af 100644 --- a/elasticsearch/spec/classes/001_hiera_spec.rb +++ b/elasticsearch/spec/classes/001_hiera_spec.rb @@ -119,10 +119,7 @@ } it { should contain_elasticsearch__plugin('mobz/elasticsearch-head/1.0.0').with(:ensure => 'present', :module_dir => 'head', :instances => ['es-01'] ) } - it { should contain_exec('install_plugin_mobz/elasticsearch-head/1.0.0').with(:command => '/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head/1.0.0', :creates => '/usr/share/elasticsearch/plugins/head').that_notifies('Elasticsearch::Service[es-01]') } - it { should contain_file('/usr/share/elasticsearch/plugins/head/.name').with(:content => 'mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('purge_plugin_head_old').with(:onlyif => "test -e /usr/share/elasticsearch/plugins/head && test \"$(cat /usr/share/elasticsearch/plugins/head/.name)\" != 'mobz/elasticsearch-head/1.0.0'", :command => '/usr/share/elasticsearch/bin/plugin --remove head').that_comes_before('Exec[install_plugin_mobz/elasticsearch-head/1.0.0]') } - + it { should contain_elasticsearch_plugin('mobz/elasticsearch-head/1.0.0') } end diff --git a/elasticsearch/spec/classes/005_elasticsearch_repo_spec.rb b/elasticsearch/spec/classes/005_elasticsearch_repo_spec.rb index 9cf543776..1b34da534 100644 --- a/elasticsearch/spec/classes/005_elasticsearch_repo_spec.rb +++ b/elasticsearch/spec/classes/005_elasticsearch_repo_spec.rb @@ -85,7 +85,59 @@ end end - + + context "Override repo key ID" do + + let :params do + default_params.merge({ + :repo_key_id => '46095ACC8548582C1A2699A9D27D666CD88E42B4' + }) + end + + case facts[:osfamily] + when 'Debian' + context 'has override apt key' do + it { is_expected.to contain_apt__source('elasticsearch').with({ + :key => '46095ACC8548582C1A2699A9D27D666CD88E42B4', + })} + end + when 'Suse' + context 'has override yum key' do + it { is_expected.to contain_exec('elasticsearch_suse_import_gpg').with({ + :unless => "test $(rpm -qa gpg-pubkey | grep -i '46095ACC8548582C1A2699A9D27D666CD88E42B4' | wc -l) -eq 1 ", + })} + end + end + + end + + context "Override repo source URL" do + + let :params do + default_params.merge({ + :repo_key_source => 'https://packages.elasticsearch.org/GPG-KEY-elasticsearch' + }) + end + + case facts[:osfamily] + when 'Debian' + context 'has override apt key source' do + it { is_expected.to contain_apt__source('elasticsearch').with({ + :key_source => 'https://packages.elasticsearch.org/GPG-KEY-elasticsearch', + })} + end + when 'RedHat' + context 'has override yum key source' do + it { should contain_yumrepo('elasticsearch').with(:gpgkey => 'https://packages.elasticsearch.org/GPG-KEY-elasticsearch') } + end + when 'Suse' + context 'has override yum key source' do + it { should contain_exec('elasticsearch_suse_import_gpg').with(:command => 'rpmkeys --import https://packages.elasticsearch.org/GPG-KEY-elasticsearch') } + end + end + + end + end end end diff --git a/elasticsearch/spec/defines/004_elasticsearch_plugin_spec.rb b/elasticsearch/spec/defines/004_elasticsearch_plugin_spec.rb index 337362f84..1438d5c56 100644 --- a/elasticsearch/spec/defines/004_elasticsearch_plugin_spec.rb +++ b/elasticsearch/spec/defines/004_elasticsearch_plugin_spec.rb @@ -24,9 +24,7 @@ } end it { should contain_elasticsearch__plugin('mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('install_plugin_mobz/elasticsearch-head/1.0.0').with(:command => '/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head/1.0.0', :creates => '/usr/share/elasticsearch/plugins/head', :notify => 'Elasticsearch::Service[es-01]') } - it { should contain_file('/usr/share/elasticsearch/plugins/head/.name').with(:content => 'mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('purge_plugin_head_old').with(:onlyif => "test -e /usr/share/elasticsearch/plugins/head && test \"$(cat /usr/share/elasticsearch/plugins/head/.name)\" != 'mobz/elasticsearch-head/1.0.0'", :command => '/usr/share/elasticsearch/bin/plugin --remove head', :before => 'Exec[install_plugin_mobz/elasticsearch-head/1.0.0]') } + it { should contain_elasticsearch_plugin('mobz/elasticsearch-head/1.0.0') } end context "Remove a plugin" do @@ -38,71 +36,11 @@ } end it { should contain_elasticsearch__plugin('mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('remove_plugin_mobz/elasticsearch-head/1.0.0').with(:command => '/usr/share/elasticsearch/bin/plugin --remove head', :onlyif => 'test -d /usr/share/elasticsearch/plugins/head', :notify => 'Elasticsearch::Service[es-01]') } + it { should contain_elasticsearch_plugin('mobz/elasticsearch-head/1.0.0').with(:ensure => 'absent') } end end - context 'with auto path' do - - context "Add a plugin" do - - let :params do { - :ensure => 'present', - :instances => 'es-01' - } end - - it { should contain_elasticsearch__plugin('mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('install_plugin_mobz/elasticsearch-head/1.0.0').with(:command => '/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head/1.0.0', :creates => '/usr/share/elasticsearch/plugins/head', :notify => 'Elasticsearch::Service[es-01]') } - it { should contain_file('/usr/share/elasticsearch/plugins/head/.name').with(:content => 'mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('purge_plugin_head_old').with(:onlyif => "test -e /usr/share/elasticsearch/plugins/head && test \"$(cat /usr/share/elasticsearch/plugins/head/.name)\" != 'mobz/elasticsearch-head/1.0.0'", :command => '/usr/share/elasticsearch/bin/plugin --remove head', :before => 'Exec[install_plugin_mobz/elasticsearch-head/1.0.0]') } - end - - context "Remove a plugin" do - - let :params do { - :ensure => 'absent', - :instances => 'es-01' - } end - - it { should contain_elasticsearch__plugin('mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('remove_plugin_mobz/elasticsearch-head/1.0.0').with(:command => '/usr/share/elasticsearch/bin/plugin --remove head', :onlyif => 'test -d /usr/share/elasticsearch/plugins/head', :notify => 'Elasticsearch::Service[es-01]') } - end - - end - - context "Use a proxy" do - - let :params do { - :ensure => 'present', - :module_dir => 'head', - :instances => 'es-01', - :proxy_host => 'my.proxy.com', - :proxy_port => 3128 - } end - - it { should contain_elasticsearch__plugin('mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('install_plugin_mobz/elasticsearch-head/1.0.0').with(:command => '/usr/share/elasticsearch/bin/plugin -DproxyPort=3128 -DproxyHost=my.proxy.com install mobz/elasticsearch-head/1.0.0', :creates => '/usr/share/elasticsearch/plugins/head', :notify => 'Elasticsearch::Service[es-01]') } - it { should contain_file('/usr/share/elasticsearch/plugins/head/.name').with(:content => 'mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('purge_plugin_head_old').with(:onlyif => "test -e /usr/share/elasticsearch/plugins/head && test \"$(cat /usr/share/elasticsearch/plugins/head/.name)\" != 'mobz/elasticsearch-head/1.0.0'", :command => '/usr/share/elasticsearch/bin/plugin --remove head', :before => 'Exec[install_plugin_mobz/elasticsearch-head/1.0.0]') } - - end - - context "Use a proxy from elasticsearch::proxy_url" do - - let(:pre_condition) { 'class {"elasticsearch": config => { "node" => {"name" => "test" }}, proxy_url => "http://localhost:8080/"}'} - - let :params do { - :ensure => 'present', - :module_dir => 'head', - :instances => 'es-01', - } end - - it { should contain_elasticsearch__plugin('mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('install_plugin_mobz/elasticsearch-head/1.0.0').with(:command => '/usr/share/elasticsearch/bin/plugin -DproxyPort=8080 -DproxyHost=localhost install mobz/elasticsearch-head/1.0.0', :creates => '/usr/share/elasticsearch/plugins/head', :notify => 'Elasticsearch::Service[es-01]') } - - end - context 'with url' do context "Add a plugin with full name" do @@ -114,39 +52,7 @@ } end it { should contain_elasticsearch__plugin('mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('install_plugin_mobz/elasticsearch-head/1.0.0').with(:command => '/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head/1.0.0 --url https://github.com/mobz/elasticsearch-head/archive/master.zip', :creates => '/usr/share/elasticsearch/plugins/head', :notify => 'Elasticsearch::Service[es-01]') } - it { should contain_file('/usr/share/elasticsearch/plugins/head/.name').with(:content => 'mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('purge_plugin_head_old').with(:onlyif => "test -e /usr/share/elasticsearch/plugins/head && test \"$(cat /usr/share/elasticsearch/plugins/head/.name)\" != 'mobz/elasticsearch-head/1.0.0'", :command => '/usr/share/elasticsearch/bin/plugin --remove head', :before => 'Exec[install_plugin_mobz/elasticsearch-head/1.0.0]') } - end - - context "Add a plugin with long name and module_dir" do - - let :params do { - :ensure => 'present', - :instances => 'es-01', - :url => 'https://github.com/mobz/elasticsearch-head/archive/master.zip', - :module_dir => 'head' - } end - - it { should contain_elasticsearch__plugin('mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('install_plugin_mobz/elasticsearch-head/1.0.0').with(:command => '/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head/1.0.0 --url https://github.com/mobz/elasticsearch-head/archive/master.zip', :creates => '/usr/share/elasticsearch/plugins/head', :notify => 'Elasticsearch::Service[es-01]') } - it { should contain_file('/usr/share/elasticsearch/plugins/head/.name').with(:content => 'mobz/elasticsearch-head/1.0.0') } - it { should contain_exec('purge_plugin_head_old').with(:onlyif => "test -e /usr/share/elasticsearch/plugins/head && test \"$(cat /usr/share/elasticsearch/plugins/head/.name)\" != 'mobz/elasticsearch-head/1.0.0'", :command => '/usr/share/elasticsearch/bin/plugin --remove head', :before => 'Exec[install_plugin_mobz/elasticsearch-head/1.0.0]') } - end - - context "Add a plugin with short name" do - - let(:title) { 'head' } - let :params do { - :ensure => 'present', - :instances => 'es-01', - :url => 'https://github.com/mobz/elasticsearch-head/archive/master.zip', - } end - - it { should contain_elasticsearch__plugin('head') } - it { should contain_exec('install_plugin_head').with(:command => '/usr/share/elasticsearch/bin/plugin install head --url https://github.com/mobz/elasticsearch-head/archive/master.zip', :creates => '/usr/share/elasticsearch/plugins/head', :notify => 'Elasticsearch::Service[es-01]') } - it { should contain_file('/usr/share/elasticsearch/plugins/head/.name').with(:content => 'head') } - it { should contain_exec('purge_plugin_head_old').with(:onlyif => "test -e /usr/share/elasticsearch/plugins/head && test \"$(cat /usr/share/elasticsearch/plugins/head/.name)\" != 'head'", :command => '/usr/share/elasticsearch/bin/plugin --remove head', :before => 'Exec[install_plugin_head]') } + it { should contain_elasticsearch_plugin('mobz/elasticsearch-head/1.0.0').with(:ensure => 'present', :url => 'https://github.com/mobz/elasticsearch-head/archive/master.zip') } end end @@ -161,11 +67,8 @@ } end it { should contain_elasticsearch__plugin('head') } - it { should contain_file('/tmp/plugin.zip').with(:source => 'puppet:///path/to/my/plugin.zip') } - it { should contain_exec('install_plugin_head').with(:command => '/usr/share/elasticsearch/bin/plugin install head --url file:///tmp/plugin.zip', :creates => '/usr/share/elasticsearch/plugins/head', :notify => 'Elasticsearch::Service[es-01]') } - it { should contain_file('/usr/share/elasticsearch/plugins/head/.name').with(:content => 'head') } - it { should contain_exec('purge_plugin_head_old').with(:onlyif => "test -e /usr/share/elasticsearch/plugins/head && test \"$(cat /usr/share/elasticsearch/plugins/head/.name)\" != 'head'", :command => '/usr/share/elasticsearch/bin/plugin --remove head', :before => 'Exec[install_plugin_head]') } - + it { should contain_file('/opt/elasticsearch/swdl/plugin.zip').with(:source => 'puppet:///path/to/my/plugin.zip') } + it { should contain_elasticsearch_plugin('head').with(:ensure => 'present', :source => '/opt/elasticsearch/swdl/plugin.zip') } end diff --git a/elasticsearch/spec/spec_acceptance_common.rb b/elasticsearch/spec/spec_acceptance_common.rb index eba5f3ac7..5e90f87f4 100644 --- a/elasticsearch/spec/spec_acceptance_common.rb +++ b/elasticsearch/spec/spec_acceptance_common.rb @@ -1,5 +1,6 @@ test_settings['cluster_name'] = SecureRandom.hex(10) + test_settings['repo_version2x'] = '2.x' case fact('osfamily') when 'RedHat' test_settings['repo_version'] = '1.3' diff --git a/elasticsearch/spec/spec_helper_acceptance.rb b/elasticsearch/spec/spec_helper_acceptance.rb index e8802f054..bc1eca8c5 100644 --- a/elasticsearch/spec/spec_helper_acceptance.rb +++ b/elasticsearch/spec/spec_helper_acceptance.rb @@ -86,6 +86,7 @@ def test_settings end scp_to(host, "#{files_dir}/elasticsearch-bigdesk.zip", "/tmp/elasticsearch-bigdesk.zip") + scp_to(host, "#{files_dir}/elasticsearch-kopf.zip", "/tmp/elasticsearch-kopf.zip") end @@ -128,7 +129,12 @@ def test_settings on host, puppet('module', 'install', 'ceritsc-yum'), { :acceptable_exit_codes => [0,1] } end - on(host, 'mkdir -p etc/puppet/modules/another/files/') + if host.is_pe? + on(host, 'sed -i -e "s/PATH=PATH:\/opt\/puppet\/bin:/PATH=PATH:/" ~/.ssh/environment') + end + + on(host, 'mkdir -p etc/puppet/modules/another/files/') + end end diff --git a/elasticsearch/spec/unit/provider/plugin_spec.rb b/elasticsearch/spec/unit/provider/plugin_spec.rb new file mode 100644 index 000000000..79ac15135 --- /dev/null +++ b/elasticsearch/spec/unit/provider/plugin_spec.rb @@ -0,0 +1,112 @@ +require 'spec_helper' + +provider_class = Puppet::Type.type(:elasticsearch_plugin).provider(:plugin) + +describe provider_class do + + let(:resource_name) { 'lmenezes/elasticsearch-kopf' } + let(:resource) do + Puppet::Type.type(:elasticsearch_plugin).new( + :name => resource_name, + :ensure => :present, + :provider => 'plugin' + ) + end + + let(:provider) do + provider = provider_class.new + provider.resource = resource + provider + end + + describe "ES 1.x" do + before(:each) do + provider_class.expects(:es).with('-v').returns("Version: 1.7.1, Build: b88f43f/2015-07-29T09:54:16Z, JVM: 1.7.0_79") + allow(File).to receive(:open) + provider.es_version + end + + let(:shortname) { provider.plugin_name(resource_name) } + + describe 'install' do + it 'installs plugin' do + provider.expects(:plugin).with(['install', [ resource_name] ]) + provider.create + end + + + it 'with url' do + resource[:url] = 'http://url/to/my/plugin.zip' + provider.expects(:plugin).with(['install', [ shortname, '--url', 'http://url/to/my/plugin.zip' ] ]) + provider.create + end + + it 'with local file' do + resource[:source] = '/tmp/plugin.zip' + provider.expects(:plugin).with(['install', [ shortname, '--url', 'file:///tmp/plugin.zip' ] ]) + provider.create + end + + it 'with proxy' do + resource[:proxy_args] = '-dproxyport=3128 -dproxyhost=localhost' + provider.expects(:plugin).with(['-dproxyport=3128 -dproxyhost=localhost', 'install', [resource_name] ]) + provider.create + end + + end + + describe 'removal' do + it 'destroys' do + provider.expects(:plugin).with(['remove', resource_name]) + provider.destroy + end + end + + end + + describe "ES 2.x" do + + before(:each) do + allow(provider_class).to receive(:es).with('-v').and_raise(Puppet::ExecutionFailure) + allow(provider_class).to receive(:es).with('--version').and_return("Version: 2.0.0, Build: de54438/2015-10-22T08:09:48Z, JVM: 1.8.0_66") + allow(File).to receive(:open) + provider.es_version + end + + let(:shortname) { provider.plugin_name(resource_name) } + + describe 'install' do + it 'installs plugin' do + provider.expects(:plugin).with(['install', [ resource_name] ]) + provider.create + end + + it 'with url' do + resource[:url] = 'http://url/to/my/plugin.zip' + provider.expects(:plugin).with(['install', [ 'http://url/to/my/plugin.zip' ] ]) + provider.create + end + + it 'with local file' do + resource[:source] = '/tmp/plugin.zip' + provider.expects(:plugin).with(['install', [ 'file:///tmp/plugin.zip' ] ]) + provider.create + end + + it 'with proxy' do + resource[:proxy_args] = '-dproxyport=3128 -dproxyhost=localhost' + provider.expects(:plugin).with(['-dproxyport=3128 -dproxyhost=localhost', 'install', [resource_name] ]) + provider.create + end + end + + describe 'removal' do + it 'destroys' do + provider.expects(:plugin).with(['remove', resource_name]) + provider.destroy + end + end + + end + +end diff --git a/elasticsearch/spec/unit/type/plugin_spec.rb b/elasticsearch/spec/unit/type/plugin_spec.rb new file mode 100644 index 000000000..cdc6c0856 --- /dev/null +++ b/elasticsearch/spec/unit/type/plugin_spec.rb @@ -0,0 +1,57 @@ +require 'spec_helper' + +describe Puppet::Type.type(:elasticsearch_plugin).provider(:plugin) do + + let(:resource_name) { "lmenezes/elasticsearch-kopf" } + + describe "input validation" do + + let(:type) { Puppet::Type.type(:elasticsearch_plugin) } + + before do + Process.stubs(:euid).returns 0 + Puppet::Util::Storage.stubs(:store) + end + + it "should default to being installed" do + plugin = Puppet::Type.type(:elasticsearch_plugin).new(:name => resource_name ) + expect(plugin.should(:ensure)).to eq(:present) + end + + describe "when validating attributes" do + [:name, :source, :url, :proxy_args].each do |param| + it "should have a #{param} parameter" do + expect(type.attrtype(param)).to eq(:param) + end + end + + it "should have an ensure property" do + expect(type.attrtype(:ensure)).to eq(:property) + end + end + + end + +end + + describe 'other tests' do + + prov_c = Puppet::Type.type(:elasticsearch_plugin).provider(:plugin) + + describe prov_c do + + it 'should install a plugin' do + resource = Puppet::Type.type(:elasticsearch_plugin).new( + :name => "lmenezes/elasticsearch-kopf", + :ensure => :present + ) + allow(File).to receive(:open) + provider = prov_c.new(resource) + provider.expects(:es).with('-v').returns('Version: 1.7.3, Build: b88f43f/2015-07-29T09:54:16Z, JVM: 1.7.0_79') + provider.expects(:plugin).with(['install', ['lmenezes/elasticsearch-kopf']]) + provider.create + end + + end + end + diff --git a/elasticsearch/templates/etc/init.d/elasticsearch.Debian.erb b/elasticsearch/templates/etc/init.d/elasticsearch.Debian.erb index 7138802e6..cccc8dbd6 100644 --- a/elasticsearch/templates/etc/init.d/elasticsearch.Debian.erb +++ b/elasticsearch/templates/etc/init.d/elasticsearch.Debian.erb @@ -111,6 +111,7 @@ export ES_HEAP_NEWSIZE export ES_DIRECT_SIZE export ES_JAVA_OPTS export ES_CLASSPATH +export ES_INCLUDE # Check DAEMON exists test -x $DAEMON || exit 0 diff --git a/elasticsearch/templates/etc/init.d/elasticsearch.RedHat.erb b/elasticsearch/templates/etc/init.d/elasticsearch.RedHat.erb index 2e7f0cf5b..3cab1f57f 100644 --- a/elasticsearch/templates/etc/init.d/elasticsearch.RedHat.erb +++ b/elasticsearch/templates/etc/init.d/elasticsearch.RedHat.erb @@ -43,6 +43,7 @@ export ES_DIRECT_SIZE export ES_JAVA_OPTS export ES_CLASSPATH export JAVA_HOME +export ES_INCLUDE lockfile=/var/lock/subsys/$prog diff --git a/glance/CHANGELOG.md b/glance/CHANGELOG.md index c168463d4..33d78f1c5 100644 --- a/glance/CHANGELOG.md +++ b/glance/CHANGELOG.md @@ -1,3 +1,44 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes +- remove deprecated mysql_module +- change section name for AMQP qpid parameters +- change section name for AMQP rabbit parameters + +####Features +- add support for RabbitMQ connection heartbeat +- add tag to package and service resources +- add glance::db::sync +- add an ability to manage use_stderr parameter +- reflect provider change in puppet-openstacklib +- put all the logging related parameters to the logging class +- allow customization of db sync command line +- add S3 backend configuration for glance +- add rados_connect_timeout parameter in glance config +- add ability to specify number of workers for glance-registry service +- use OpenstackClient for glance_image auth + +####Bugfixes +- rely on autorequire for config resource ordering +- make sure Facter is only executed on agent +- file backend: do not inherit from glance::api +- glance_image: hardcode os-image-api-version to 1 +- make sure Glance_image is executed after Keystone_endpoint +- solve duplicate declaration issue for python-openstackclient +- append openstacklib/lib to load path for type + +####Maintenance +- fix rspec 3.x syntax +- initial msync run for all Puppet OpenStack modules +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check +- acceptance: use common bits from puppet-openstack-integration +- fix unit tests against Puppet 4.3.0 +- require at least 4.2.0 of stdlib + ##2015-10-10 - 6.1.0 ###Summary diff --git a/glance/README.md b/glance/README.md index 0ac189459..f6a801ff3 100644 --- a/glance/README.md +++ b/glance/README.md @@ -1,7 +1,7 @@ glance ======= -6.1.0 - 2015.1 - Kilo +7.0.0 - 2015.2 - Liberty #### Table of Contents @@ -23,7 +23,7 @@ Module Description The glance module is a thorough attempt to make Puppet capable of managing the entirety of glance. This includes manifests to provision such things as keystone endpoints, RPC configurations specific to glance, and database connections. Types are shipped as part of the glance module to assist in manipulation of configuration files. -This module is tested in combination with other modules needed to build and leverage an entire OpenStack software stack. These modules can be found, all pulled together in the [openstack module](https://github.com/stackforge/puppet-openstack). +This module is tested in combination with other modules needed to build and leverage an entire OpenStack software stack. Setup ----- @@ -38,7 +38,7 @@ Setup ### Beginning with glance -To utilize the glance module's functionality you will need to declare multiple resources. The following is a modified excerpt from the [openstack module](https://github.com/stackfoge/puppet-openstack). This is not an exhaustive list of all the components needed, we recommend you consult and understand the [openstack module](https://github.com/stackforge/puppet-openstack) and the [core openstack](http://docs.openstack.org) documentation. +To utilize the glance module's functionality you will need to declare multiple resources. This is not an exhaustive list of all the components needed, we recommend you consult and understand the [core openstack](http://docs.openstack.org) documentation. **Define a glance node** diff --git a/glance/manifests/api.pp b/glance/manifests/api.pp index b944ccfe7..6de2116ef 100644 --- a/glance/manifests/api.pp +++ b/glance/manifests/api.pp @@ -58,10 +58,24 @@ # (optional) The protocol of the Glance registry service. # Default: http # +# [*scrub_time*] +# (optional) The amount of time in seconds to delay before performing a delete. +# Defaults to $::os_service_default. +# +# [*delayed_delete*] +# (optional) Turn on/off delayed delete. +# Defaults to $::os_service_default. +# # [*auth_type*] # (optional) Type is authorization being used. # Defaults to 'keystone' # +# [*auth_region*] +# (optional) The region for the authentication service. +# If "use_user_token" is not in effect and using keystone auth, +# then region name can be specified. +# Defaults to 'RegionOne'. +# # [*auth_host*] # (optional) DEPRECATED Host running auth service. # Defaults to '127.0.0.1'. @@ -115,11 +129,41 @@ # # [*database_connection*] # (optional) Connection url to connect to nova database. -# Defaults to 'sqlite:///var/lib/glance/glance.sqlite' +# Defaults to undef # # [*database_idle_timeout*] # (optional) Timeout before idle db connections are reaped. -# Defaults to 3600 +# Defaults to undef +# +# [*database_max_retries*] +# (Optional) Maximum number of database connection retries during startup. +# Set to -1 to specify an infinite retry count. +# Defaults to undef. +# +# [*database_retry_interval*] +# (optional) Interval between retries of opening a database connection. +# Defaults to undef. +# +# [*database_min_pool_size*] +# (optional) Minimum number of SQL connections to keep open in a pool. +# Defaults to undef. +# +# [*database_max_pool_size*] +# (optional) Maximum number of SQL connections to keep open in a pool. +# Defaults to undef. +# +# [*database_max_overflow*] +# (optional) If set, use this value for max_overflow with sqlalchemy. +# Defaults to undef. +# +# [*image_cache_max_size*] +# (optional) The upper limit (the maximum size of accumulated cache in bytes) beyond which pruner, +# if running, starts cleaning the images cache. +# Defaults to $::os_service_default. +# +# [*image_cache_stall_time*] +# (optional) The amount of time to let an image remain in the cache without being accessed. +# Defaults to $::os_service_default. # # [*use_syslog*] # (optional) Use syslog for logging. @@ -168,6 +212,16 @@ # (optional) Sets the keystone region to use. # Defaults to 'RegionOne'. # +# [*signing_dir*] +# (optional) Directory used to cache files related to PKI tokens. +# Defaults to $::os_service_default. +# +# [*token_cache_time*] +# (optional) In order to prevent excessive effort spent validating tokens, +# the middleware caches previously-seen tokens for a configurable duration (in seconds). +# Set to -1 to disable caching completely. +# Defaults to $::os_service_default. +# # [*validate*] # (optional) Whether to validate the service is working after any service refreshes # Defaults to false @@ -202,7 +256,10 @@ $registry_host = '0.0.0.0', $registry_port = '9191', $registry_client_protocol = 'http', + $scrub_time = $::os_service_default, + $delayed_delete = $::os_service_default, $auth_type = 'keystone', + $auth_region = 'RegionOne', $auth_uri = false, $identity_uri = false, $pipeline = 'keystone', @@ -219,10 +276,19 @@ $key_file = false, $ca_file = false, $known_stores = false, - $database_connection = 'sqlite:///var/lib/glance/glance.sqlite', - $database_idle_timeout = 3600, + $database_connection = undef, + $database_idle_timeout = undef, + $database_min_pool_size = undef, + $database_max_pool_size = undef, + $database_max_retries = undef, + $database_retry_interval = undef, + $database_max_overflow = undef, + $image_cache_max_size = $::os_service_default, + $image_cache_stall_time = $::os_service_default, $image_cache_dir = '/var/lib/glance/image-cache', $os_region_name = 'RegionOne', + $signing_dir = $::os_service_default, + $token_cache_time = $::os_service_default, $validate = false, $validation_options = {}, # DEPRECATED PARAMETERS @@ -234,6 +300,7 @@ ) inherits glance { include ::glance::policy + include ::glance::api::db include ::glance::api::logging require keystone::python @@ -265,22 +332,7 @@ require => Class['glance'] } - if $database_connection { - if($database_connection =~ /mysql:\/\/\S+:\S+@\S+\/\S+/) { - require 'mysql::bindings' - require 'mysql::bindings::python' - } elsif($database_connection =~ /postgresql:\/\/\S+:\S+@\S+\/\S+/) { - - } elsif($database_connection =~ /sqlite:\/\//) { - - } else { - fail("Invalid db connection ${database_connection}") - } - glance_api_config { - 'database/connection': value => $database_connection, secret => true; - 'database/idle_timeout': value => $database_idle_timeout; - } - } + warning('Default value for auth_region parameter is different from OpenStack project defaults') # basic service config glance_api_config { @@ -289,7 +341,10 @@ 'DEFAULT/backlog': value => $backlog; 'DEFAULT/workers': value => $workers; 'DEFAULT/show_image_direct_url': value => $show_image_direct_url; + 'DEFAULT/scrub_time': value => $scrub_time; + 'DEFAULT/delayed_delete': value => $delayed_delete; 'DEFAULT/image_cache_dir': value => $image_cache_dir; + 'DEFAULT/auth_region': value => $auth_region; 'glance_store/os_region_name': value => $os_region_name; } @@ -305,9 +360,11 @@ } glance_cache_config { - 'DEFAULT/verbose': value => pick($verbose, false); - 'DEFAULT/debug': value => pick($debug, false); - 'glance_store/os_region_name': value => $os_region_name; + 'DEFAULT/verbose': value => pick($verbose, false); + 'DEFAULT/debug': value => pick($debug, false); + 'DEFAULT/image_cache_stall_time': value => $image_cache_stall_time; + 'DEFAULT/image_cache_max_size': value => $image_cache_max_size; + 'glance_store/os_region_name': value => $os_region_name; } # configure api service to connect registry service @@ -393,8 +450,10 @@ if $auth_type == 'keystone' { glance_api_config { 'keystone_authtoken/admin_tenant_name': value => $keystone_tenant; - 'keystone_authtoken/admin_user' : value => $keystone_user; - 'keystone_authtoken/admin_password' : value => $keystone_password, secret => true; + 'keystone_authtoken/admin_user': value => $keystone_user; + 'keystone_authtoken/admin_password': value => $keystone_password, secret => true; + 'keystone_authtoken/token_cache_time': value => $token_cache_time; + 'keystone_authtoken/signing_dir': value => $signing_dir; } glance_cache_config { 'DEFAULT/auth_url' : value => $auth_url; diff --git a/glance/manifests/api/db.pp b/glance/manifests/api/db.pp new file mode 100644 index 000000000..69a8ffa36 --- /dev/null +++ b/glance/manifests/api/db.pp @@ -0,0 +1,101 @@ +# == Class: glance::api::db +# +# Configure the Glance database +# +# === Parameters +# +# [*database_connection*] +# Url used to connect to database. +# (Optional) Defaults to 'sqlite:///var/lib/glance/glance.sqlite'. +# +# [*database_idle_timeout*] +# Timeout when db connections should be reaped. +# (Optional) Defaults to $::os_service_default. +# +# [*database_min_pool_size*] +# Minimum number of SQL connections to keep open in a pool. +# (Optional) Defaults to $::os_service_default. +# +# [*database_max_pool_size*] +# Maximum number of SQL connections to keep open in a pool. +# (Optional) Defaults to $::os_service_default. +# +# [*database_max_retries*] +# Maximum db connection retries during startup. +# Setting -1 implies an infinite retry count. +# (Optional) Defaults to $::os_service_default. +# +# [*database_retry_interval*] +# Interval between retries of opening a sql connection. +# (Optional) Defaults to $::os_service_default. +# +# [*database_max_overflow*] +# If set, use this value for max_overflow with sqlalchemy. +# (Optional) Defaults to $::os_service_default. +# +class glance::api::db ( + $database_connection = 'sqlite:///var/lib/glance/glance.sqlite', + $database_idle_timeout = $::os_service_default, + $database_min_pool_size = $::os_service_default, + $database_max_pool_size = $::os_service_default, + $database_max_retries = $::os_service_default, + $database_retry_interval = $::os_service_default, + $database_max_overflow = $::os_service_default, +) { + + include ::glance::params + + # NOTE(degorenko): In order to keep backward compatibility we rely on the pick function + # to use glance::api:: if glance::api::db:: isn't specified. + $database_connection_real = pick($::glance::api::database_connection, $database_connection) + $database_idle_timeout_real = pick($::glance::api::database_idle_timeout, $database_idle_timeout) + $database_min_pool_size_real = pick($::glance::api::database_min_pool_size, $database_min_pool_size) + $database_max_pool_size_real = pick($::glance::api::database_max_pool_size, $database_max_pool_size) + $database_max_retries_real = pick($::glance::api::database_max_retries, $database_max_retries) + $database_retry_interval_real = pick($::glance::api::database_retry_interval, $database_retry_interval) + $database_max_overflow_real = pick($::glance::api::database_max_overflow, $database_max_overflow) + + validate_re($database_connection_real, + '^(sqlite|mysql(\+pymysql)?|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') + + case $database_connection_real { + /^mysql(\+pymysql)?:\/\//: { + require 'mysql::bindings' + require 'mysql::bindings::python' + if $database_connection_real =~ /^mysql\+pymysql/ { + $backend_package = $::glance::params::pymysql_package_name + } else { + $backend_package = false + } + } + /^postgresql:\/\//: { + $backend_package = false + require 'postgresql::lib::python' + } + /^sqlite:\/\//: { + $backend_package = $::glance::params::sqlite_package_name + } + default: { + fail('Unsupported backend configured') + } + } + + if $backend_package and !defined(Package[$backend_package]) { + package {'glance-backend-package': + ensure => present, + name => $backend_package, + tag => 'openstack', + } + } + + glance_api_config { + 'database/connection': value => $database_connection_real, secret => true; + 'database/idle_timeout': value => $database_idle_timeout_real; + 'database/min_pool_size': value => $database_min_pool_size_real; + 'database/max_retries': value => $database_max_retries_real; + 'database/retry_interval': value => $database_retry_interval_real; + 'database/max_pool_size': value => $database_max_pool_size_real; + 'database/max_overflow': value => $database_max_overflow_real; + } + +} diff --git a/glance/manifests/backend/swift.pp b/glance/manifests/backend/swift.pp index ea3a34b57..56f58051b 100644 --- a/glance/manifests/backend/swift.pp +++ b/glance/manifests/backend/swift.pp @@ -32,6 +32,12 @@ # == class: glance::backend::swift # [*swift_store_region*] # Optional. Default: undef # +# [*default_swift_reference*] +# Optional. The reference to the default swift +# account/backing store parameters to use for adding +# new images. String value. +# Default to 'ref1'. +# class glance::backend::swift( $swift_store_user, $swift_store_key, @@ -42,35 +48,26 @@ # == class: glance::backend::swift $swift_store_create_container_on_put = false, $swift_store_endpoint_type = 'internalURL', $swift_store_region = undef, + $default_swift_reference = 'ref1', ) { glance_api_config { - 'glance_store/default_store': value => 'swift'; - 'glance_store/swift_store_user': value => $swift_store_user; - 'glance_store/swift_store_key': value => $swift_store_key; - 'glance_store/swift_store_auth_address': value => $swift_store_auth_address; - 'glance_store/swift_store_region': value => $swift_store_region; - 'glance_store/swift_store_container': value => $swift_store_container; - 'glance_store/swift_store_auth_version': value => $swift_store_auth_version; + 'glance_store/default_store': value => 'swift'; + 'glance_store/swift_store_region': value => $swift_store_region; + 'glance_store/swift_store_container': value => $swift_store_container; 'glance_store/swift_store_create_container_on_put': value => $swift_store_create_container_on_put; 'glance_store/swift_store_large_object_size': value => $swift_store_large_object_size; 'glance_store/swift_store_endpoint_type': value => $swift_store_endpoint_type; - } - glance_cache_config { - 'glance_store/swift_store_user': value => $swift_store_user; - 'glance_store/swift_store_key': value => $swift_store_key; - 'glance_store/swift_store_auth_address': value => $swift_store_auth_address; - 'glance_store/swift_store_region': value => $swift_store_region; - 'glance_store/swift_store_container': value => $swift_store_container; - 'glance_store/swift_store_auth_version': value => $swift_store_auth_version; - 'glance_store/swift_store_create_container_on_put': - value => $swift_store_create_container_on_put; - 'glance_store/swift_store_large_object_size': - value => $swift_store_large_object_size; + 'DEFAULT/swift_store_config_file': value => '/etc/glance/glance-api.conf'; + 'glance_store/default_swift_reference': value => $default_swift_reference; + "${default_swift_reference}/user": value => $swift_store_user; + "${default_swift_reference}/key": value => $swift_store_key; + "${default_swift_reference}/auth_address": value => $swift_store_auth_address; + "${default_swift_reference}/auth_version": value => $swift_store_auth_version; } } diff --git a/glance/manifests/db/mysql.pp b/glance/manifests/db/mysql.pp index 59170a4d4..e1d56ad79 100644 --- a/glance/manifests/db/mysql.pp +++ b/glance/manifests/db/mysql.pp @@ -26,10 +26,6 @@ # [*collate*] # the database collation. Optional. Defaults to 'utf8_general_ci' # -# === Deprecated parameters: -# -# [*cluster_id*] This parameter does nothing -# class glance::db::mysql( $password, $dbname = 'glance', @@ -38,7 +34,6 @@ $allowed_hosts = undef, $charset = 'utf8', $collate = 'utf8_general_ci', - $cluster_id = 'localzone', ) { validate_string($password) diff --git a/glance/manifests/db/sync.pp b/glance/manifests/db/sync.pp index 09986c0c2..6137e32b1 100644 --- a/glance/manifests/db/sync.pp +++ b/glance/manifests/db/sync.pp @@ -7,10 +7,10 @@ # (optional) String of extra command line parameters to append # to the glance-manage db sync command. These will be inserted # in the command line between 'glance-manage' and 'db sync'. -# Defaults to undef +# Defaults to '--config-file /etc/glance/glance-registry.conf' # class glance::db::sync( - $extra_params = undef, + $extra_params = '--config-file /etc/glance/glance-registry.conf', ) { include ::glance::params diff --git a/glance/manifests/init.pp b/glance/manifests/init.pp index 8ea4a754d..0ef95c349 100644 --- a/glance/manifests/init.pp +++ b/glance/manifests/init.pp @@ -30,5 +30,5 @@ # == class: glance } } - ensure_resource('package', 'python-openstackclient', {'ensure' => $package_ensure, tag => 'openstack'}) + include '::openstacklib::openstackclient' } diff --git a/glance/manifests/keystone/auth.pp b/glance/manifests/keystone/auth.pp index 52028c841..469956cc0 100644 --- a/glance/manifests/keystone/auth.pp +++ b/glance/manifests/keystone/auth.pp @@ -182,7 +182,7 @@ $real_service_name = pick($service_name, $auth_name) if $configure_endpoint { - Keystone_endpoint["${region}/${real_service_name}"] ~> Service <| name == 'glance-api' |> + Keystone_endpoint["${region}/${real_service_name}"] ~> Service<| title == 'glance-api' |> Keystone_endpoint["${region}/${real_service_name}"] -> Glance_image<||> } @@ -203,8 +203,8 @@ } if $configure_user_role { - Keystone_user_role["${auth_name}@${tenant}"] ~> Service <| name == 'glance-registry' |> - Keystone_user_role["${auth_name}@${tenant}"] ~> Service <| name == 'glance-api' |> + Keystone_user_role["${auth_name}@${tenant}"] ~> Service<| title == 'glance-registry' |> + Keystone_user_role["${auth_name}@${tenant}"] ~> Service<| title == 'glance-api' |> } } diff --git a/glance/manifests/notify/qpid.pp b/glance/manifests/notify/qpid.pp index 7d12e162b..7909baeee 100644 --- a/glance/manifests/notify/qpid.pp +++ b/glance/manifests/notify/qpid.pp @@ -1,6 +1,7 @@ # == Class: glance::notify::qpid # # used to configure qpid notifications for glance +# Deprecated class # # === Parameters: # @@ -24,20 +25,12 @@ # Defaults to tcp. # class glance::notify::qpid( - $qpid_password, - $qpid_username = 'guest', - $qpid_hostname = 'localhost', - $qpid_port = '5672', - $qpid_protocol = 'tcp' + $qpid_password = undef, + $qpid_username = undef, + $qpid_hostname = undef, + $qpid_port = undef, + $qpid_protocol = undef ) inherits glance::api { - glance_api_config { - 'DEFAULT/notifier_driver': value => 'qpid'; - 'oslo_messaging_qpid/qpid_hostname': value => $qpid_hostname; - 'oslo_messaging_qpid/qpid_port': value => $qpid_port; - 'oslo_messaging_qpid/qpid_protocol': value => $qpid_protocol; - 'oslo_messaging_qpid/qpid_username': value => $qpid_username; - 'oslo_messaging_qpid/qpid_password': value => $qpid_password, secret => true; - } - + warning('Qpid driver is removed from Oslo.messaging in the Mitaka release') } diff --git a/glance/manifests/notify/rabbitmq.pp b/glance/manifests/notify/rabbitmq.pp index 6a8226603..f9e2c7c1b 100644 --- a/glance/manifests/notify/rabbitmq.pp +++ b/glance/manifests/notify/rabbitmq.pp @@ -58,6 +58,11 @@ # available on some distributions. # Defaults to 'TLSv1' # +# [*kombu_reconnect_delay*] +# (optional) How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. +# Defaults to $::os_service_default. +# # [*rabbit_notification_exchange*] # Defaults to 'glance' # @@ -88,6 +93,7 @@ $kombu_ssl_certfile = undef, $kombu_ssl_keyfile = undef, $kombu_ssl_version = 'TLSv1', + $kombu_reconnect_delay = $::os_service_default, $rabbit_notification_exchange = 'glance', $rabbit_notification_topic = 'notifications', $rabbit_durable_queues = false, @@ -125,6 +131,7 @@ 'oslo_messaging_rabbit/rabbit_notification_topic': value => $rabbit_notification_topic; 'oslo_messaging_rabbit/heartbeat_timeout_threshold': value => $rabbit_heartbeat_timeout_threshold; 'oslo_messaging_rabbit/heartbeat_rate': value => $rabbit_heartbeat_rate; + 'oslo_messaging_rabbit/kombu_reconnect_delay': value => $kombu_reconnect_delay; 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl; 'oslo_messaging_rabbit/amqp_durable_queues': value => $amqp_durable_queues_real; } diff --git a/glance/manifests/params.pp b/glance/manifests/params.pp index 6fbed16a2..08f56329a 100644 --- a/glance/manifests/params.pp +++ b/glance/manifests/params.pp @@ -13,20 +13,22 @@ $registry_package_name = 'openstack-glance' $api_service_name = 'openstack-glance-api' $registry_service_name = 'openstack-glance-registry' - $db_sync_command = 'glance-manage --config-file=/etc/glance/glance-registry.conf db_sync' if ($::operatingsystem != 'fedora' and versioncmp($::operatingsystemrelease, '7') < 0) { $pyceph_package_name = 'python-ceph' } else { $pyceph_package_name = 'python-rbd' } + $sqlite_package_name = undef + $pymysql_package_name = undef } 'Debian': { $api_package_name = 'glance-api' $registry_package_name = 'glance-registry' $api_service_name = 'glance-api' $registry_service_name = 'glance-registry' - $db_sync_command = 'glance-manage --config-file=/etc/glance/glance-registry.conf db_sync' $pyceph_package_name = 'python-ceph' + $sqlite_package_name = 'python-pysqlite2' + $pymysql_package_name = 'python-pymysql' } default: { fail("Unsupported osfamily: ${::osfamily} operatingsystem: ${::operatingsystem}, module ${module_name} only support osfamily RedHat and Debian") diff --git a/glance/manifests/registry.pp b/glance/manifests/registry.pp index 220ee1ab6..c925b3153 100644 --- a/glance/manifests/registry.pp +++ b/glance/manifests/registry.pp @@ -39,13 +39,34 @@ # If set to boolean false, it will not log to any directory. # Defaults to undef. # -# [*database_connection*] -# (optional) Connection url to connect to nova database. -# Defaults to 'sqlite:///var/lib/glance/glance.sqlite' +# [*database_connection*] +# (optional) Connection url to connect to nova database. +# Defaults to undef # -# [*database_idle_timeout*] -# (optional) Timeout before idle db connections are reaped. -# Defaults to 3600 +# [*database_idle_timeout*] +# (optional) Timeout before idle db connections are reaped. +# Defaults to undef +# +# [*database_max_retries*] +# (Optional) Maximum number of database connection retries during startup. +# Set to -1 to specify an infinite retry count. +# Defaults to undef. +# +# [*database_retry_interval*] +# (optional) Interval between retries of opening a database connection. +# Defaults to undef. +# +# [*database_min_pool_size*] +# (optional) Minimum number of SQL connections to keep open in a pool. +# Defaults to undef. +# +# [*database_max_pool_size*] +# (optional) Maximum number of SQL connections to keep open in a pool. +# Defaults to undef. +# +# [*database_max_overflow*] +# (optional) If set, use this value for max_overflow with sqlalchemy. +# Defaults to undef. # # [*auth_type*] # (optional) Authentication type. Defaults to 'keystone'. @@ -128,42 +149,65 @@ # (Optional) Run db sync on the node. # Defaults to true # +# [*os_region_name*] +# (optional) Sets the keystone region to use. +# Defaults to 'RegionOne'. +# +# [*signing_dir*] +# Directory used to cache files related to PKI tokens. +# Defaults to $::os_service_default. +# +# [*token_cache_time*] +# In order to prevent excessive effort spent validating tokens, +# the middleware caches previously-seen tokens for a configurable duration (in seconds). +# Set to -1 to disable caching completely. +# Defaults to $::os_service_default. +# class glance::registry( $keystone_password, - $package_ensure = 'present', - $verbose = undef, - $debug = undef, - $bind_host = '0.0.0.0', - $bind_port = '9191', - $workers = $::processorcount, - $log_file = undef, - $log_dir = undef, - $database_connection = 'sqlite:///var/lib/glance/glance.sqlite', - $database_idle_timeout = 3600, - $auth_type = 'keystone', - $auth_uri = false, - $identity_uri = false, - $keystone_tenant = 'services', - $keystone_user = 'glance', - $pipeline = 'keystone', - $use_syslog = undef, - $use_stderr = undef, - $log_facility = undef, - $manage_service = true, - $enabled = true, - $purge_config = false, - $cert_file = false, - $key_file = false, - $ca_file = false, - $sync_db = true, + $package_ensure = 'present', + $verbose = undef, + $debug = undef, + $bind_host = '0.0.0.0', + $bind_port = '9191', + $workers = $::processorcount, + $log_file = undef, + $log_dir = undef, + $database_connection = undef, + $database_idle_timeout = undef, + $database_min_pool_size = undef, + $database_max_pool_size = undef, + $database_max_retries = undef, + $database_retry_interval = undef, + $database_max_overflow = undef, + $auth_type = 'keystone', + $auth_uri = false, + $identity_uri = false, + $keystone_tenant = 'services', + $keystone_user = 'glance', + $pipeline = 'keystone', + $use_syslog = undef, + $use_stderr = undef, + $log_facility = undef, + $manage_service = true, + $enabled = true, + $purge_config = false, + $cert_file = false, + $key_file = false, + $ca_file = false, + $sync_db = true, + $os_region_name = 'RegionOne', + $signing_dir = $::os_service_default, + $token_cache_time = $::os_service_default, # DEPRECATED PARAMETERS - $auth_host = '127.0.0.1', - $auth_port = '35357', - $auth_admin_prefix = false, - $auth_protocol = 'http', + $auth_host = '127.0.0.1', + $auth_port = '35357', + $auth_admin_prefix = false, + $auth_protocol = 'http', ) inherits glance { include ::glance::registry::logging + include ::glance::registry::db require keystone::python if ( $glance::params::api_package_name != $glance::params::registry_package_name ) { @@ -188,27 +232,13 @@ require => Class['glance'] } - if $database_connection { - if($database_connection =~ /mysql:\/\/\S+:\S+@\S+\/\S+/) { - require 'mysql::bindings' - require 'mysql::bindings::python' - } elsif($database_connection =~ /postgresql:\/\/\S+:\S+@\S+\/\S+/) { - - } elsif($database_connection =~ /sqlite:\/\//) { - - } else { - fail("Invalid db connection ${database_connection}") - } - glance_registry_config { - 'database/connection': value => $database_connection, secret => true; - 'database/idle_timeout': value => $database_idle_timeout; - } - } + warning('Default value for os_region_name parameter is different from OpenStack project defaults') glance_registry_config { - 'DEFAULT/workers': value => $workers; - 'DEFAULT/bind_host': value => $bind_host; - 'DEFAULT/bind_port': value => $bind_port; + 'DEFAULT/workers': value => $workers; + 'DEFAULT/bind_host': value => $bind_host; + 'DEFAULT/bind_port': value => $bind_port; + 'glance_store/os_region_name': value => $os_region_name; } if $identity_uri { @@ -282,8 +312,10 @@ if $auth_type == 'keystone' { glance_registry_config { 'keystone_authtoken/admin_tenant_name': value => $keystone_tenant; - 'keystone_authtoken/admin_user' : value => $keystone_user; - 'keystone_authtoken/admin_password' : value => $keystone_password, secret => true; + 'keystone_authtoken/admin_user': value => $keystone_user; + 'keystone_authtoken/admin_password': value => $keystone_password, secret => true; + 'keystone_authtoken/token_cache_time': value => $token_cache_time; + 'keystone_authtoken/signing_dir': value => $signing_dir; } } diff --git a/glance/manifests/registry/db.pp b/glance/manifests/registry/db.pp new file mode 100644 index 000000000..b0d854020 --- /dev/null +++ b/glance/manifests/registry/db.pp @@ -0,0 +1,101 @@ +# == Class: glance::registry::db +# +# Configure the Glance database +# +# === Parameters +# +# [*database_connection*] +# Url used to connect to database. +# (Optional) Defaults to 'sqlite:///var/lib/glance/glance.sqlite'. +# +# [*database_idle_timeout*] +# Timeout when db connections should be reaped. +# (Optional) Defaults to $::os_service_default. +# +# [*database_min_pool_size*] +# Minimum number of SQL connections to keep open in a pool. +# (Optional) Defaults to $::os_service_default. +# +# [*database_max_pool_size*] +# Maximum number of SQL connections to keep open in a pool. +# (Optional) Defaults to $::os_service_default. +# +# [*database_max_retries*] +# Maximum db connection retries during startup. +# Setting -1 implies an infinite retry count. +# (Optional) Defaults to $::os_service_default. +# +# [*database_retry_interval*] +# Interval between retries of opening a sql connection. +# (Optional) Defaults to $::os_service_default. +# +# [*database_max_overflow*] +# If set, use this value for max_overflow with sqlalchemy. +# (Optional) Defaults to $::os_service_default. +# +class glance::registry::db ( + $database_connection = 'sqlite:///var/lib/glance/glance.sqlite', + $database_idle_timeout = $::os_service_default, + $database_min_pool_size = $::os_service_default, + $database_max_pool_size = $::os_service_default, + $database_max_retries = $::os_service_default, + $database_retry_interval = $::os_service_default, + $database_max_overflow = $::os_service_default, +) { + + include ::glance::params + + # NOTE(degorenko): In order to keep backward compatibility we rely on the pick function + # to use glance::registry:: if glance::registry::db:: isn't specified. + $database_connection_real = pick($::glance::registry::database_connection, $database_connection) + $database_idle_timeout_real = pick($::glance::registry::database_idle_timeout, $database_idle_timeout) + $database_min_pool_size_real = pick($::glance::registry::database_min_pool_size, $database_min_pool_size) + $database_max_pool_size_real = pick($::glance::registry::database_max_pool_size, $database_max_pool_size) + $database_max_retries_real = pick($::glance::registry::database_max_retries, $database_max_retries) + $database_retry_interval_real = pick($::glance::registry::database_retry_interval, $database_retry_interval) + $database_max_overflow_real = pick($::glance::registry::database_max_overflow, $database_max_overflow) + + validate_re($database_connection_real, + '^(sqlite|mysql(\+pymysql)?|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') + + case $database_connection_real { + /^mysql(\+pymysql)?:\/\//: { + require 'mysql::bindings' + require 'mysql::bindings::python' + if $database_connection_real =~ /^mysql\+pymysql/ { + $backend_package = $::glance::params::pymysql_package_name + } else { + $backend_package = false + } + } + /^postgresql:\/\//: { + $backend_package = false + require 'postgresql::lib::python' + } + /^sqlite:\/\//: { + $backend_package = $::glance::params::sqlite_package_name + } + default: { + fail('Unsupported backend configured') + } + } + + if $backend_package and !defined(Package[$backend_package]) { + package {'glance-backend-package': + ensure => present, + name => $backend_package, + tag => 'openstack', + } + } + + glance_registry_config { + 'database/connection': value => $database_connection_real, secret => true; + 'database/idle_timeout': value => $database_idle_timeout_real; + 'database/min_pool_size': value => $database_min_pool_size_real; + 'database/max_retries': value => $database_max_retries_real; + 'database/retry_interval': value => $database_retry_interval_real; + 'database/max_pool_size': value => $database_max_pool_size_real; + 'database/max_overflow': value => $database_max_overflow_real; + } + +} diff --git a/glance/metadata.json b/glance/metadata.json index 787b828df..28e073b62 100644 --- a/glance/metadata.json +++ b/glance/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-glance", - "version": "6.1.0", + "version": "7.0.0", "author": "Puppet Labs and OpenStack Contributors", "summary": "Puppet module for OpenStack Glance", "license": "Apache-2.0", @@ -32,8 +32,8 @@ "description": "Installs and configures OpenStack Glance (Image Service).", "dependencies": [ { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.2.0 <5.0.0" }, - { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=7.0.0 <8.0.0" } ] } diff --git a/glance/spec/acceptance/basic_glance_spec.rb b/glance/spec/acceptance/basic_glance_spec.rb index 4348a527c..87847edb3 100644 --- a/glance/spec/acceptance/basic_glance_spec.rb +++ b/glance/spec/acceptance/basic_glance_spec.rb @@ -3,9 +3,7 @@ describe 'glance class' do context 'default parameters' do - - it 'should work with no errors' do - pp= <<-EOS + pp= <<-EOS include ::openstack_integration include ::openstack_integration::repos include ::openstack_integration::mysql @@ -24,12 +22,12 @@ class { '::glance::keystone::auth': password => 'a_big_secret', } class { '::glance::api': - database_connection => 'mysql://glance:a_big_secret@127.0.0.1/glance?charset=utf8', + database_connection => 'mysql+pymysql://glance:a_big_secret@127.0.0.1/glance?charset=utf8', verbose => false, keystone_password => 'a_big_secret', } class { '::glance::registry': - database_connection => 'mysql://glance:a_big_secret@127.0.0.1/glance?charset=utf8', + database_connection => 'mysql+pymysql://glance:a_big_secret@127.0.0.1/glance?charset=utf8', verbose => false, keystone_password => 'a_big_secret', } @@ -41,8 +39,29 @@ class { '::glance::registry': is_public => 'yes', source => 'http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img', } - EOS + EOS + + it 'should configure the glance endpoint before the glance-api service uses it' do + pp2 = pp + "Service['glance-api'] -> Keystone_endpoint['RegionOne/glance']" + expect(apply_manifest(pp2, :expect_failures => true, :noop => true).stderr).to match(/Found 1 dependency cycle/i) + end + + it 'should configure the glance user before the glance-api service uses it' do + pp2 = pp + "Service['glance-api'] -> Keystone_user_role['glance@services']" + expect(apply_manifest(pp2, :expect_failures => true, :noop => true).stderr).to match(/Found 1 dependency cycle/i) + end + it 'should configure the glance user before the glance-registry service uses it' do + pp2 = pp + "Service['glance-registry'] -> Keystone_user_role['glance@services']" + expect(apply_manifest(pp2, :expect_failures => true, :noop => true).stderr).to match(/Found 1 dependency cycle/i) + end + + it 'should configure the glance-api service before using it to provision glance_images' do + pp2 = pp + "Glance_image['test_image'] -> Service['glance-api']" + expect(apply_manifest(pp2, :expect_failures => true, :noop => true).stderr).to match(/Found 1 dependency cycle/i) + end + + it 'should work with no errors' do # Run it twice and test for idempotency apply_manifest(pp, :catch_failures => true) apply_manifest(pp, :catch_changes => true) diff --git a/glance/spec/classes/glance_api_db_spec.rb b/glance/spec/classes/glance_api_db_spec.rb new file mode 100644 index 000000000..f7de6b97a --- /dev/null +++ b/glance/spec/classes/glance_api_db_spec.rb @@ -0,0 +1,94 @@ +require 'spec_helper' + +describe 'glance::api::db' do + + shared_examples 'glance::api::db' do + context 'with default parameters' do + it { is_expected.to contain_glance_api_config('database/connection').with_value('sqlite:///var/lib/glance/glance.sqlite').with_secret(true) } + it { is_expected.to contain_glance_api_config('database/idle_timeout').with_value('') } + it { is_expected.to contain_glance_api_config('database/min_pool_size').with_value('') } + it { is_expected.to contain_glance_api_config('database/max_retries').with_value('') } + it { is_expected.to contain_glance_api_config('database/retry_interval').with_value('') } + it { is_expected.to contain_glance_api_config('database/max_pool_size').with_value('') } + it { is_expected.to contain_glance_api_config('database/max_overflow').with_value('') } + end + + context 'with specific parameters' do + let :params do + { :database_connection => 'mysql+pymysql://glance_api:glance@localhost/glance', + :database_idle_timeout => '3601', + :database_min_pool_size => '2', + :database_max_retries => '11', + :database_retry_interval => '11', + :database_max_pool_size => '11', + :database_max_overflow => '21', + } + end + + it { is_expected.to contain_glance_api_config('database/connection').with_value('mysql+pymysql://glance_api:glance@localhost/glance').with_secret(true) } + it { is_expected.to contain_glance_api_config('database/idle_timeout').with_value('3601') } + it { is_expected.to contain_glance_api_config('database/min_pool_size').with_value('2') } + it { is_expected.to contain_glance_api_config('database/max_retries').with_value('11') } + it { is_expected.to contain_glance_api_config('database/retry_interval').with_value('11') } + it { is_expected.to contain_glance_api_config('database/max_pool_size').with_value('11') } + it { is_expected.to contain_glance_api_config('database/max_overflow').with_value('21') } + end + + context 'with MySQL-python library as backend package' do + let :params do + { :database_connection => 'mysql://glance_api:glance@localhost/glance' } + end + + it { is_expected.to contain_package('python-mysqldb').with(:ensure => 'present') } + end + + context 'with incorrect pymysql database_connection string' do + let :params do + { :database_connection => 'foo+pymysql://glance_api:glance@localhost/glance', } + end + + it_raises 'a Puppet::Error', /validate_re/ + end + + end + + context 'on Debian platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'Debian' }) + end + + it_configures 'glance::api::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://glance_api:glance@localhost/glance', } + end + + it 'install the proper backend package' do + is_expected.to contain_package('glance-backend-package').with( + :ensure => 'present', + :name => 'python-pymysql', + :tag => 'openstack' + ) + end + end + end + + context 'on Redhat platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'RedHat' }) + end + + it_configures 'glance::api::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://glance_api:glance@localhost/glance', } + end + + it { is_expected.not_to contain_package('glance-backend-package') } + end + end + +end + diff --git a/glance/spec/classes/glance_api_spec.rb b/glance/spec/classes/glance_api_spec.rb index fecce007b..95437dad7 100644 --- a/glance/spec/classes/glance_api_spec.rb +++ b/glance/spec/classes/glance_api_spec.rb @@ -22,6 +22,7 @@ :log_file => '/var/log/glance/api.log', :log_dir => '/var/log/glance', :auth_type => 'keystone', + :auth_region => 'RegionOne', :enabled => true, :manage_service => true, :backlog => '4096', @@ -32,13 +33,17 @@ :keystone_tenant => 'services', :keystone_user => 'glance', :keystone_password => 'ChangeMe', - :database_idle_timeout => '3600', - :database_connection => 'sqlite:///var/lib/glance/glance.sqlite', + :token_cache_time => '', :show_image_direct_url => false, :purge_config => false, :known_stores => false, + :delayed_delete => '', + :scrub_time => '', :image_cache_dir => '/var/lib/glance/image-cache', + :image_cache_stall_time => '', + :image_cache_max_size => '', :os_region_name => 'RegionOne', + :signing_dir => '', :pipeline => 'keystone', } end @@ -53,6 +58,7 @@ :registry_port => '9111', :registry_client_protocol => 'https', :auth_type => 'not_keystone', + :auth_region => 'RegionOne2', :enabled => false, :backlog => '4095', :workers => '5', @@ -62,11 +68,15 @@ :keystone_tenant => 'admin2', :keystone_user => 'admin2', :keystone_password => 'ChangeMe2', - :database_idle_timeout => '36002', - :database_connection => 'mysql:///var:lib@glance/glance', + :token_cache_time => '300', :show_image_direct_url => true, + :delayed_delete => 'true', + :scrub_time => '10', :image_cache_dir => '/tmp/glance', + :image_cache_stall_time => '10', + :image_cache_max_size => '10737418240', :os_region_name => 'RegionOne2', + :signing_dir => '/path/to/dir', :pipeline => 'keystone2', } ].each do |param_set| @@ -84,6 +94,7 @@ it { is_expected.to contain_class 'glance' } it { is_expected.to contain_class 'glance::policy' } it { is_expected.to contain_class 'glance::api::logging' } + it { is_expected.to contain_class 'glance::api::db' } it { is_expected.to contain_service('glance-api').with( 'ensure' => (param_hash[:manage_service] && param_hash[:enabled]) ? 'running': 'stopped', @@ -105,6 +116,10 @@ 'registry_port', 'registry_client_protocol', 'show_image_direct_url', + 'delayed_delete', + 'scrub_time', + 'image_cache_dir', + 'auth_region' ].each do |config| is_expected.to contain_glance_api_config("DEFAULT/#{config}").with_value(param_hash[config.intern]) end @@ -114,6 +129,8 @@ [ 'registry_host', 'registry_port', + 'image_cache_stall_time', + 'image_cache_max_size', ].each do |config| is_expected.to contain_glance_cache_config("DEFAULT/#{config}").with_value(param_hash[config.intern]) end @@ -128,12 +145,6 @@ end end - it 'is_expected.to config db' do - is_expected.to contain_glance_api_config('database/connection').with_value(param_hash[:database_connection]) - is_expected.to contain_glance_api_config('database/connection').with_value(param_hash[:database_connection]).with_secret(true) - is_expected.to contain_glance_api_config('database/idle_timeout').with_value(param_hash[:database_idle_timeout]) - end - it 'is_expected.to have no ssl options' do is_expected.to contain_glance_api_config('DEFAULT/ca_file').with_ensure('absent') is_expected.to contain_glance_api_config('DEFAULT/cert_file').with_ensure('absent') @@ -155,7 +166,7 @@ if params[:auth_type] == 'keystone' is_expected.to contain('paste_deploy/flavor').with_value('keystone+cachemanagement') - ['admin_tenant_name', 'admin_user', 'admin_password'].each do |config| + ['admin_tenant_name', 'admin_user', 'admin_password', 'token_cache_time', 'signing_dir'].each do |config| is_expected.to contain_glance_api_config("keystone_authtoken/#{config}").with_value(param_hash[config.intern]) end is_expected.to contain_glance_api_config('keystone_authtoken/admin_password').with_value(param_hash[:keystone_password]).with_secret(true) diff --git a/glance/spec/classes/glance_backend_swift_spec.rb b/glance/spec/classes/glance_backend_swift_spec.rb index 0e561ff80..3922cfc6e 100644 --- a/glance/spec/classes/glance_backend_swift_spec.rb +++ b/glance/spec/classes/glance_backend_swift_spec.rb @@ -22,61 +22,49 @@ it 'configures glance-api.conf' do is_expected.to contain_glance_api_config('glance_store/default_store').with_value('swift') - is_expected.to contain_glance_api_config('glance_store/swift_store_key').with_value('key') - is_expected.to contain_glance_api_config('glance_store/swift_store_user').with_value('user') - is_expected.to contain_glance_api_config('glance_store/swift_store_auth_version').with_value('2') is_expected.to contain_glance_api_config('glance_store/swift_store_large_object_size').with_value('5120') - is_expected.to contain_glance_api_config('glance_store/swift_store_auth_address').with_value('127.0.0.1:5000/v2.0/') is_expected.to contain_glance_api_config('glance_store/swift_store_container').with_value('glance') is_expected.to contain_glance_api_config('glance_store/swift_store_create_container_on_put').with_value(false) is_expected.to contain_glance_api_config('glance_store/swift_store_endpoint_type').with_value('internalURL') is_expected.to contain_glance_api_config('glance_store/swift_store_region').with_value(nil) + is_expected.to contain_glance_api_config('DEFAULT/swift_store_config_file').with_value('/etc/glance/glance-api.conf') + is_expected.to contain_glance_api_config('glance_store/default_swift_reference').with_value('ref1') + is_expected.to contain_glance_api_config('ref1/key').with_value('key') + is_expected.to contain_glance_api_config('ref1/user').with_value('user') + is_expected.to contain_glance_api_config('ref1/auth_version').with_value('2') + is_expected.to contain_glance_api_config('ref1/auth_address').with_value('127.0.0.1:5000/v2.0/') end - it 'configures glance-cache.conf' do - is_expected.to contain_glance_cache_config('glance_store/swift_store_key').with_value('key') - is_expected.to contain_glance_cache_config('glance_store/swift_store_user').with_value('user') - is_expected.to contain_glance_cache_config('glance_store/swift_store_auth_version').with_value('2') - is_expected.to contain_glance_cache_config('glance_store/swift_store_large_object_size').with_value('5120') - is_expected.to contain_glance_cache_config('glance_store/swift_store_auth_address').with_value('127.0.0.1:5000/v2.0/') - is_expected.to contain_glance_cache_config('glance_store/swift_store_container').with_value('glance') - is_expected.to contain_glance_cache_config('glance_store/swift_store_create_container_on_put').with_value(false) - is_expected.to contain_glance_cache_config('glance_store/swift_store_region').with_value(nil) - end end describe 'when overriding parameters' do let :params do { - :swift_store_user => 'user', - :swift_store_key => 'key', + :swift_store_user => 'user2', + :swift_store_key => 'key2', :swift_store_auth_version => '1', :swift_store_large_object_size => '100', :swift_store_auth_address => '127.0.0.2:8080/v1.0/', :swift_store_container => 'swift', :swift_store_create_container_on_put => true, :swift_store_endpoint_type => 'publicURL', - :swift_store_region => 'RegionTwo' + :swift_store_region => 'RegionTwo', + :default_swift_reference => 'swift_creds', } end it 'configures glance-api.conf' do is_expected.to contain_glance_api_config('glance_store/swift_store_container').with_value('swift') is_expected.to contain_glance_api_config('glance_store/swift_store_create_container_on_put').with_value(true) - is_expected.to contain_glance_api_config('glance_store/swift_store_auth_version').with_value('1') is_expected.to contain_glance_api_config('glance_store/swift_store_large_object_size').with_value('100') - is_expected.to contain_glance_api_config('glance_store/swift_store_auth_address').with_value('127.0.0.2:8080/v1.0/') is_expected.to contain_glance_api_config('glance_store/swift_store_endpoint_type').with_value('publicURL') is_expected.to contain_glance_api_config('glance_store/swift_store_region').with_value('RegionTwo') + is_expected.to contain_glance_api_config('glance_store/default_swift_reference').with_value('swift_creds') + is_expected.to contain_glance_api_config('swift_creds/key').with_value('key2') + is_expected.to contain_glance_api_config('swift_creds/user').with_value('user2') + is_expected.to contain_glance_api_config('swift_creds/auth_version').with_value('1') + is_expected.to contain_glance_api_config('swift_creds/auth_address').with_value('127.0.0.2:8080/v1.0/') end - it 'configures glance-cache.conf' do - is_expected.to contain_glance_cache_config('glance_store/swift_store_container').with_value('swift') - is_expected.to contain_glance_cache_config('glance_store/swift_store_create_container_on_put').with_value(true) - is_expected.to contain_glance_cache_config('glance_store/swift_store_auth_version').with_value('1') - is_expected.to contain_glance_cache_config('glance_store/swift_store_large_object_size').with_value('100') - is_expected.to contain_glance_cache_config('glance_store/swift_store_auth_address').with_value('127.0.0.2:8080/v1.0/') - is_expected.to contain_glance_cache_config('glance_store/swift_store_region').with_value('RegionTwo') - end end end diff --git a/glance/spec/classes/glance_db_sync_spec.rb b/glance/spec/classes/glance_db_sync_spec.rb index e4b21d0e9..4b12c1fc1 100644 --- a/glance/spec/classes/glance_db_sync_spec.rb +++ b/glance/spec/classes/glance_db_sync_spec.rb @@ -6,7 +6,7 @@ it 'runs glance-manage db_sync' do is_expected.to contain_exec('glance-manage db_sync').with( - :command => 'glance-manage db_sync', + :command => 'glance-manage --config-file /etc/glance/glance-registry.conf db_sync', :path => '/usr/bin', :user => 'glance', :refreshonly => 'true', diff --git a/glance/spec/classes/glance_notify_qpid_spec.rb b/glance/spec/classes/glance_notify_qpid_spec.rb deleted file mode 100644 index 2cf8fd4c9..000000000 --- a/glance/spec/classes/glance_notify_qpid_spec.rb +++ /dev/null @@ -1,57 +0,0 @@ -require 'spec_helper' -describe 'glance::notify::qpid' do - let :facts do - @default_facts.merge({ - :osfamily => 'Debian', - }) - end - - let :pre_condition do - 'class { "glance::api": keystone_password => "pass" }' - end - - describe 'when default params and qpid_password' do - let :params do - {:qpid_password => 'pass'} - end - - it { is_expected.to contain_glance_api_config('DEFAULT/notifier_driver').with_value('qpid') } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_username').with_value('guest') } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_password').with_value('pass') } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_password').with_value(params[:qpid_password]).with_secret(true) } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost') } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_port').with_value('5672') } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') } - end - - describe 'when passing params' do - let :params do - { - :qpid_password => 'pass2', - :qpid_username => 'guest2', - :qpid_hostname => 'localhost2', - :qpid_port => '5673' - } - end - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_username').with_value('guest2') } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost2') } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_port').with_value('5673') } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') } - end - - describe 'when configuring with ssl' do - let :params do - { - :qpid_password => 'pass3', - :qpid_username => 'guest3', - :qpid_hostname => 'localhost3', - :qpid_port => '5671', - :qpid_protocol => 'ssl' - } - end - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_username').with_value('guest3') } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost3') } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_port').with_value('5671') } - it { is_expected.to contain_glance_api_config('oslo_messaging_qpid/qpid_protocol').with_value('ssl') } - end -end diff --git a/glance/spec/classes/glance_notify_rabbitmq_spec.rb b/glance/spec/classes/glance_notify_rabbitmq_spec.rb index 45f19f897..5588dbbe1 100644 --- a/glance/spec/classes/glance_notify_rabbitmq_spec.rb +++ b/glance/spec/classes/glance_notify_rabbitmq_spec.rb @@ -28,6 +28,7 @@ it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/rabbit_notification_topic').with_value('notifications') } it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') } it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') } + it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/kombu_reconnect_delay').with_value('') } end describe 'when passing params and use ssl' do @@ -39,6 +40,7 @@ :rabbit_port => '5673', :rabbit_use_ssl => true, :rabbit_durable_queues => true, + :kombu_reconnect_delay => '5.0' } it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/rabbit_userid').with_value('guest2') } it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/rabbit_host').with_value('localhost2') } @@ -49,6 +51,7 @@ it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') } it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('TLSv1') } it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/rabbit_durable_queues').with_value('true') } + it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/kombu_reconnect_delay').with_value('5.0') } end end diff --git a/glance/spec/classes/glance_registry_db_spec.rb b/glance/spec/classes/glance_registry_db_spec.rb new file mode 100644 index 000000000..9dc1f21b9 --- /dev/null +++ b/glance/spec/classes/glance_registry_db_spec.rb @@ -0,0 +1,94 @@ +require 'spec_helper' + +describe 'glance::registry::db' do + + shared_examples 'glance::registry::db' do + context 'with default parameters' do + it { is_expected.to contain_glance_registry_config('database/connection').with_value('sqlite:///var/lib/glance/glance.sqlite').with_secret(true) } + it { is_expected.to contain_glance_registry_config('database/idle_timeout').with_value('') } + it { is_expected.to contain_glance_registry_config('database/min_pool_size').with_value('') } + it { is_expected.to contain_glance_registry_config('database/max_retries').with_value('') } + it { is_expected.to contain_glance_registry_config('database/retry_interval').with_value('') } + it { is_expected.to contain_glance_registry_config('database/max_pool_size').with_value('') } + it { is_expected.to contain_glance_registry_config('database/max_overflow').with_value('') } + end + + context 'with specific parameters' do + let :params do + { :database_connection => 'mysql+pymysql://glance_registry:glance@localhost/glance', + :database_idle_timeout => '3601', + :database_min_pool_size => '2', + :database_max_retries => '11', + :database_retry_interval => '11', + :database_max_pool_size => '11', + :database_max_overflow => '21', + } + end + + it { is_expected.to contain_glance_registry_config('database/connection').with_value('mysql+pymysql://glance_registry:glance@localhost/glance').with_secret(true) } + it { is_expected.to contain_glance_registry_config('database/idle_timeout').with_value('3601') } + it { is_expected.to contain_glance_registry_config('database/min_pool_size').with_value('2') } + it { is_expected.to contain_glance_registry_config('database/max_retries').with_value('11') } + it { is_expected.to contain_glance_registry_config('database/retry_interval').with_value('11') } + it { is_expected.to contain_glance_registry_config('database/max_pool_size').with_value('11') } + it { is_expected.to contain_glance_registry_config('database/max_overflow').with_value('21') } + end + + context 'with MySQL-python library as backend package' do + let :params do + { :database_connection => 'mysql://glance_registry:glance@localhost/glance' } + end + + it { is_expected.to contain_package('python-mysqldb').with(:ensure => 'present') } + end + + context 'with incorrect pymysql database_connection string' do + let :params do + { :database_connection => 'foo+pymysql://glance_registry:glance@localhost/glance', } + end + + it_raises 'a Puppet::Error', /validate_re/ + end + + end + + context 'on Debian platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'Debian' }) + end + + it_configures 'glance::registry::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://glance_registry:glance@localhost/glance', } + end + + it 'install the proper backend package' do + is_expected.to contain_package('glance-backend-package').with( + :ensure => 'present', + :name => 'python-pymysql', + :tag => 'openstack' + ) + end + end + end + + context 'on Redhat platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'RedHat' }) + end + + it_configures 'glance::registry::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://glance_registry:glance@localhost/glance', } + end + + it { is_expected.not_to contain_package('glance-backend-package') } + end + end + +end + diff --git a/glance/spec/classes/glance_registry_spec.rb b/glance/spec/classes/glance_registry_spec.rb index 392e470eb..f1ad7046c 100644 --- a/glance/spec/classes/glance_registry_spec.rb +++ b/glance/spec/classes/glance_registry_spec.rb @@ -18,8 +18,6 @@ :workers => facts[:processorcount], :log_file => '/var/log/glance/registry.log', :log_dir => '/var/log/glance', - :database_connection => 'sqlite:///var/lib/glance/glance.sqlite', - :database_idle_timeout => '3600', :enabled => true, :manage_service => true, :auth_type => 'keystone', @@ -32,6 +30,9 @@ :keystone_password => 'ChangeMe', :purge_config => false, :sync_db => true, + :os_region_name => 'RegionOne', + :signing_dir => '', + :token_cache_time => '', } end @@ -41,8 +42,6 @@ :bind_host => '127.0.0.1', :bind_port => '9111', :workers => '5', - :database_connection => 'sqlite:///var/lib/glance.sqlite', - :database_idle_timeout => '360', :enabled => false, :auth_type => 'keystone', :auth_host => '127.0.0.1', @@ -53,6 +52,9 @@ :keystone_user => 'admin', :keystone_password => 'ChangeMe', :sync_db => false, + :os_region_name => 'RegionOne2', + :signing_dir => '/path/to/dir', + :token_cache_time => '300', } ].each do |param_set| @@ -66,6 +68,7 @@ end it { is_expected.to contain_class 'glance::registry' } + it { is_expected.to contain_class 'glance::registry::db' } it { is_expected.to contain_class 'glance::registry::logging' } it { is_expected.to contain_service('glance-registry').with( @@ -92,12 +95,6 @@ ].each do |config| is_expected.to contain_glance_registry_config("DEFAULT/#{config}").with_value(param_hash[config.intern]) end - [ - 'database_connection', - 'database_idle_timeout', - ].each do |config| - is_expected.to contain_glance_registry_config("database/#{config.gsub(/database_/,'')}").with_value(param_hash[config.intern]) - end [ 'auth_host', 'auth_port', @@ -112,6 +109,15 @@ is_expected.to contain_glance_registry_config("keystone_authtoken/admin_user").with_value(param_hash[:keystone_user]) is_expected.to contain_glance_registry_config("keystone_authtoken/admin_password").with_value(param_hash[:keystone_password]) is_expected.to contain_glance_registry_config("keystone_authtoken/admin_password").with_value(param_hash[:keystone_password]).with_secret(true) + is_expected.to contain_glance_registry_config("keystone_authtoken/token_cache_time").with_value(param_hash[:token_cache_time]) + is_expected.to contain_glance_registry_config("keystone_authtoken/signing_dir").with_value(param_hash[:signing_dir]) + end + end + it 'is_expected.to lay down default glance_store registry config' do + [ + 'os_region_name', + ].each do |config| + is_expected.to contain_glance_registry_config("glance_store/#{config}").with_value(param_hash[config.intern]) end end end diff --git a/gnocchi/CHANGELOG.md b/gnocchi/CHANGELOG.md new file mode 100644 index 000000000..9db850184 --- /dev/null +++ b/gnocchi/CHANGELOG.md @@ -0,0 +1,4 @@ +##2015-11-25 - 7.0.0 +###Summary + +- Initial release of the puppet-gnocchi module diff --git a/gnocchi/README.md b/gnocchi/README.md index 00ac0ed86..30a3a78e4 100644 --- a/gnocchi/README.md +++ b/gnocchi/README.md @@ -1,6 +1,8 @@ puppet-gnocchi ============== +7.0.0 - 2015.2.0 - Liberty + #### Table of Contents 1. [Overview - What is the gnocchi module?](#overview) diff --git a/gnocchi/examples/site.pp b/gnocchi/examples/site.pp index 0b9ded95c..d7e00f53b 100644 --- a/gnocchi/examples/site.pp +++ b/gnocchi/examples/site.pp @@ -17,3 +17,13 @@ identity_uri => 'https://identity.openstack.org:35357', keystone_password => 'verysecrete' } + +class { '::gnocchi::statsd': + resource_id => '07f26121-5777-48ba-8a0b-d70468133dd9', + user_id => 'f81e9b1f-9505-4298-bc33-43dfbd9a973b', + project_id => '203ef419-e73f-4b8a-a73f-3d599a72b18d', + archive_policy_name => 'high', + flush_delay => '100', +} + +include ::gnocchi::client diff --git a/gnocchi/manifests/api.pp b/gnocchi/manifests/api.pp index 9c6ced6dc..be019fcc4 100644 --- a/gnocchi/manifests/api.pp +++ b/gnocchi/manifests/api.pp @@ -76,7 +76,6 @@ $service_name = $::gnocchi::params::api_service_name, ) inherits gnocchi::params { - include ::gnocchi::params include ::gnocchi::policy validate_string($keystone_password) @@ -120,6 +119,7 @@ tag => ['gnocchi-service', 'gnocchi-db-sync-service'], } Class['gnocchi::db'] -> Service[$service_name] + Service <<| title == 'httpd' |>> { tag +> 'gnocchi-db-sync-service' } # we need to make sure gnocchi-api/eventlet is stopped before trying to start apache Service['gnocchi-api'] -> Service[$service_name] diff --git a/gnocchi/manifests/client.pp b/gnocchi/manifests/client.pp new file mode 100644 index 000000000..f7f209006 --- /dev/null +++ b/gnocchi/manifests/client.pp @@ -0,0 +1,21 @@ +# +# Installs the gnocchi python library. +# +# == parameters +# [*ensure*] +# ensure state for package. +# +class gnocchi::client ( + $ensure = 'present' +) { + + include ::gnocchi::params + + package { 'python-gnocchiclient': + ensure => $ensure, + name => $::gnocchi::params::client_package_name, + tag => 'openstack', + } + +} + diff --git a/gnocchi/manifests/init.pp b/gnocchi/manifests/init.pp index f0c1cb00e..c64317189 100644 --- a/gnocchi/manifests/init.pp +++ b/gnocchi/manifests/init.pp @@ -10,7 +10,8 @@ # # [*log_dir*] # (optional) Directory where logs should be stored. -# If set to boolean false, it will not log to any directory. +# If set to boolean false or the $::os_service_default, it will not log to +# any directory. # Defaults to undef # # [*state_path*] @@ -52,6 +53,7 @@ $debug = undef, $use_syslog = undef, $use_stderr = undef, + $log_dir = undef, $log_facility = undef, $database_connection = undef, ) inherits gnocchi::params { diff --git a/gnocchi/manifests/logging.pp b/gnocchi/manifests/logging.pp index d9c1c0721..8835bd4c7 100644 --- a/gnocchi/manifests/logging.pp +++ b/gnocchi/manifests/logging.pp @@ -6,252 +6,143 @@ # # [*verbose*] # (Optional) Should the daemons log verbose messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*debug*] # (Optional) Should the daemons log debug messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_syslog*] # (Optional) Use syslog for logging. -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_stderr*] # (optional) Use stderr for logging -# Defaults to 'true' +# Defaults to $::os_service_default # # [*log_facility*] # (Optional) Syslog facility to receive log lines. -# Defaults to 'LOG_USER' +# Defaults to $::os_service_default # # [*log_dir*] # (optional) Directory where logs should be stored. -# If set to boolean false, it will not log to any directory. +# If set to boolean false or the $::os_service_default, it will not log to +# any directory. # Defaults to '/var/log/gnocchi' # # [*logging_context_format_string*] -# (optional) Format string to use for log messages with context. -# Defaults to undef. -# Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ +# (optional) format string to use for log messages with context. +# Defaults to $::os_service_default +# example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [%(request_id)s %(user_identity)s] %(instance)s%(message)s' # # [*logging_default_format_string*] -# (optional) Format string to use for log messages without context. -# Defaults to undef. -# Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ +# (optional) format string to use for log messages without context. +# Defaults to $::os_service_default +# example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [-] %(instance)s%(message)s' # # [*logging_debug_format_suffix*] -# (optional) Formatted data to append to log format when level is DEBUG. -# Defaults to undef. -# Example: '%(funcName)s %(pathname)s:%(lineno)d' +# (optional) formatted data to append to log format when level is debug. +# Defaults to $::os_service_default +# example: '%(funcname)s %(pathname)s:%(lineno)d' # # [*logging_exception_prefix*] -# (optional) Prefix each line of exception output with this format. -# Defaults to undef. -# Example: '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s' +# (optional) prefix each line of exception output with this format. +# Defaults to $::os_service_default +# example: '%(asctime)s.%(msecs)03d %(process)d trace %(name)s %(instance)s' # # [*log_config_append*] -# The name of an additional logging configuration file. -# Defaults to undef. -# See https://docs.python.org/2/howto/logging.html +# the name of an additional logging configuration file. +# Defaults to $::os_service_default +# see https://docs.python.org/2/howto/logging.html # # [*default_log_levels*] -# (optional) Hash of logger (keys) and level (values) pairs. -# Defaults to undef. -# Example: -# { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', -# 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', -# 'iso8601' => 'WARN', -# 'requests.packages.urllib3.connectionpool' => 'WARN' } +# (optional) hash of logger (keys) and level (values) pairs. +# Defaults to $::os_service_default +# example: +# { 'amqp' => 'warn', 'amqplib' => 'warn', 'boto' => 'warn', +# 'qpid' => 'warn', 'sqlalchemy' => 'warn', 'suds' => 'info', +# 'iso8601' => 'warn', +# 'requests.packages.urllib3.connectionpool' => 'warn' } # # [*publish_errors*] -# (optional) Publish error events (boolean value). -# Defaults to undef (false if unconfigured). +# (optional) publish error events (boolean value). +# Defaults to $::os_service_default # # [*fatal_deprecations*] -# (optional) Make deprecations fatal (boolean value) -# Defaults to undef (false if unconfigured). +# (optional) make deprecations fatal (boolean value) +# Defaults to $::os_service_default # # [*instance_format*] -# (optional) If an instance is passed with the log message, format it +# (optional) if an instance is passed with the log message, format it # like this (string value). -# Defaults to undef. -# Example: '[instance: %(uuid)s] ' +# Defaults to $::os_service_default +# example: '[instance: %(uuid)s] ' # # [*instance_uuid_format*] -# (optional) If an instance UUID is passed with the log message, format +# (optional) if an instance uuid is passed with the log message, format # it like this (string value). -# Defaults to undef. -# Example: instance_uuid_format='[instance: %(uuid)s] ' +# Defaults to $::os_service_default +# example: instance_uuid_format='[instance: %(uuid)s] ' # [*log_date_format*] -# (optional) Format string for %%(asctime)s in log records. -# Defaults to undef. -# Example: 'Y-%m-%d %H:%M:%S' +# (optional) format string for %%(asctime)s in log records. +# Defaults to $::os_service_default +# example: 'y-%m-%d %h:%m:%s' class gnocchi::logging( - $use_syslog = false, - $use_stderr = true, - $log_facility = 'LOG_USER', + $use_syslog = $::os_service_default, + $use_stderr = $::os_service_default, + $log_facility = $::os_service_default, $log_dir = '/var/log/gnocchi', - $verbose = false, - $debug = false, - $logging_context_format_string = undef, - $logging_default_format_string = undef, - $logging_debug_format_suffix = undef, - $logging_exception_prefix = undef, - $log_config_append = undef, - $default_log_levels = undef, - $publish_errors = undef, - $fatal_deprecations = undef, - $instance_format = undef, - $instance_uuid_format = undef, - $log_date_format = undef, + $verbose = $::os_service_default, + $debug = $::os_service_default, + $logging_context_format_string = $::os_service_default, + $logging_default_format_string = $::os_service_default, + $logging_debug_format_suffix = $::os_service_default, + $logging_exception_prefix = $::os_service_default, + $log_config_append = $::os_service_default, + $default_log_levels = $::os_service_default, + $publish_errors = $::os_service_default, + $fatal_deprecations = $::os_service_default, + $instance_format = $::os_service_default, + $instance_uuid_format = $::os_service_default, + $log_date_format = $::os_service_default, ) { - # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function + # note(spredzy): in order to keep backward compatibility we rely on the pick function # to use gnocchi:: first then gnocchi::logging::. - $use_syslog_real = pick($::gnocchi::use_syslog,$use_syslog) - $use_stderr_real = pick($::gnocchi::use_stderr,$use_stderr) + $use_syslog_real = pick($::gnocchi::use_syslog,$use_syslog) + $use_stderr_real = pick($::gnocchi::use_stderr,$use_stderr) $log_facility_real = pick($::gnocchi::log_facility,$log_facility) - $log_dir_real = pick($::gnocchi::log_dir,$log_dir) - $verbose_real = pick($::gnocchi::verbose,$verbose) - $debug_real = pick($::gnocchi::debug,$debug) + $log_dir_real = pick($::gnocchi::log_dir,$log_dir) + $verbose_real = pick($::gnocchi::verbose,$verbose) + $debug_real = pick($::gnocchi::debug,$debug) + + if is_service_default($default_log_levels) { + $default_log_levels_real = $default_log_levels + } else { + $default_log_levels_real = join(sort(join_keys_to_values($default_log_levels, '=')), ',') + } gnocchi_config { - 'DEFAULT/debug' : value => $debug_real; - 'DEFAULT/verbose' : value => $verbose_real; - 'DEFAULT/use_stderr' : value => $use_stderr_real; - 'DEFAULT/use_syslog' : value => $use_syslog_real; - 'DEFAULT/log_dir' : value => $log_dir_real; - 'DEFAULT/syslog_log_facility': value => $log_facility_real; + 'DEFAULT/debug' : value => $debug_real; + 'DEFAULT/verbose' : value => $verbose_real; + 'DEFAULT/use_stderr' : value => $use_stderr_real; + 'DEFAULT/use_syslog' : value => $use_syslog_real; + 'DEFAULT/log_dir' : value => $log_dir_real; + 'DEFAULT/syslog_log_facility' : value => $log_facility_real; + 'DEFAULT/logging_context_format_string' : value => $logging_context_format_string; + 'DEFAULT/logging_default_format_string' : value => $logging_default_format_string; + 'DEFAULT/logging_debug_format_suffix' : value => $logging_debug_format_suffix; + 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; + 'DEFAULT/log_config_append' : value => $log_config_append; + 'DEFAULT/default_log_levels' : value => $default_log_levels_real; + 'DEFAULT/publish_errors' : value => $publish_errors; + 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; + 'DEFAULT/instance_format' : value => $instance_format; + 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; + 'DEFAULT/log_date_format' : value => $log_date_format; } - - if $logging_context_format_string { - gnocchi_config { - 'DEFAULT/logging_context_format_string' : - value => $logging_context_format_string; - } - } - else { - gnocchi_config { - 'DEFAULT/logging_context_format_string' : ensure => absent; - } - } - - if $logging_default_format_string { - gnocchi_config { - 'DEFAULT/logging_default_format_string' : - value => $logging_default_format_string; - } - } - else { - gnocchi_config { - 'DEFAULT/logging_default_format_string' : ensure => absent; - } - } - - if $logging_debug_format_suffix { - gnocchi_config { - 'DEFAULT/logging_debug_format_suffix' : - value => $logging_debug_format_suffix; - } - } - else { - gnocchi_config { - 'DEFAULT/logging_debug_format_suffix' : ensure => absent; - } - } - - if $logging_exception_prefix { - gnocchi_config { - 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; - } - } - else { - gnocchi_config { - 'DEFAULT/logging_exception_prefix' : ensure => absent; - } - } - - if $log_config_append { - gnocchi_config { - 'DEFAULT/log_config_append' : value => $log_config_append; - } - } - else { - gnocchi_config { - 'DEFAULT/log_config_append' : ensure => absent; - } - } - - if $default_log_levels { - gnocchi_config { - 'DEFAULT/default_log_levels' : - value => join(sort(join_keys_to_values($default_log_levels, '=')), ','); - } - } - else { - gnocchi_config { - 'DEFAULT/default_log_levels' : ensure => absent; - } - } - - if $publish_errors { - gnocchi_config { - 'DEFAULT/publish_errors' : value => $publish_errors; - } - } - else { - gnocchi_config { - 'DEFAULT/publish_errors' : ensure => absent; - } - } - - if $fatal_deprecations { - gnocchi_config { - 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; - } - } - else { - gnocchi_config { - 'DEFAULT/fatal_deprecations' : ensure => absent; - } - } - - if $instance_format { - gnocchi_config { - 'DEFAULT/instance_format' : value => $instance_format; - } - } - else { - gnocchi_config { - 'DEFAULT/instance_format' : ensure => absent; - } - } - - if $instance_uuid_format { - gnocchi_config { - 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; - } - } - else { - gnocchi_config { - 'DEFAULT/instance_uuid_format' : ensure => absent; - } - } - - if $log_date_format { - gnocchi_config { - 'DEFAULT/log_date_format' : value => $log_date_format; - } - } - else { - gnocchi_config { - 'DEFAULT/log_date_format' : ensure => absent; - } - } - - } diff --git a/gnocchi/manifests/params.pp b/gnocchi/manifests/params.pp index d97aa34aa..0308dd3e2 100644 --- a/gnocchi/manifests/params.pp +++ b/gnocchi/manifests/params.pp @@ -10,6 +10,9 @@ $api_service_name = 'openstack-gnocchi-api' $indexer_package_name = 'openstack-gnocchi-indexer-sqlalchemy' $carbonara_package_name = 'openstack-gnocchi-carbonara' + $statsd_package_name = 'openstack-gnocchi-statsd' + $statsd_service_name = 'openstack-gnocchi-statsd' + $client_package_name = 'python-gnocchiclient' $gnocchi_wsgi_script_path = '/var/www/cgi-bin/gnocchi' $gnocchi_wsgi_script_source = '/usr/lib/python2.7/site-packages/gnocchi/rest/app.wsgi' } @@ -20,6 +23,9 @@ $api_service_name = 'gnocchi-api' $indexer_package_name = 'gnocchi-indexer-sqlalchemy' $carbonara_package_name = 'gnocchi-carbonara' + $statsd_package_name = 'gnocchi-statsd' + $statsd_service_name = 'gnocchi-statsd' + $client_package_name = 'python-gnocchiclient' $gnocchi_wsgi_script_path = '/usr/lib/cgi-bin/gnocchi' $gnocchi_wsgi_script_source = '/usr/share/gnocchi-common/app.wsgi' } diff --git a/gnocchi/manifests/statsd.pp b/gnocchi/manifests/statsd.pp new file mode 100644 index 000000000..afbc25733 --- /dev/null +++ b/gnocchi/manifests/statsd.pp @@ -0,0 +1,78 @@ +# Installs & configure the gnocchi statsd service +# +# == Parameters +# +# [*resource_id*] +# (required) Resource UUID to use to identify statsd in Gnocchi. +# +# [*user_id*] +# (required) User UUID to use to identify statsd in Gnocchi. +# +# [*project_id*] +# (required) Project UUID to use to identify statsd in Gnocchi. +# +# [*flush_delay*] +# (required) Delay between flushes. +# +# [*enabled*] +# (optional) Should the service be enabled. +# Defaults to true +# +# [*package_ensure*] +# (optional) ensure state for package. +# Defaults to 'present' +# +# [*manage_service*] +# (optional) Whether the service should be managed by Puppet. +# Defaults to true. +# +# [*archive_policy_name*] +# (optional) Archive policy name to use when creating metrics. +# Defaults to undef. +# +class gnocchi::statsd ( + $resource_id, + $user_id, + $project_id, + $flush_delay, + $archive_policy_name = undef, + $manage_service = true, + $enabled = true, + $package_ensure = 'present', +) inherits gnocchi::params { + + Gnocchi_config<||> ~> Service['gnocchi-statsd'] + Package['gnocchi-statsd'] -> Service['gnocchi-statsd'] + + package { 'gnocchi-statsd': + ensure => $package_ensure, + name => $::gnocchi::params::statsd_package_name, + tag => ['openstack', 'gnocchi-package'], + } + + if $manage_service { + if $enabled { + $service_ensure = 'running' + } else { + $service_ensure = 'stopped' + } + } + + service { 'gnocchi-statsd': + ensure => $service_ensure, + name => $::gnocchi::params::statsd_service_name, + enable => $enabled, + hasstatus => true, + hasrestart => true, + tag => ['gnocchi-service', 'gnocchi-db-sync-service'], + } + + gnocchi_config { + 'statsd/resource_id' : value => $resource_id; + 'statsd/user_id' : value => $user_id; + 'statsd/project_id' : value => $project_id; + 'statsd/archive_policy_name' : value => $archive_policy_name; + 'statsd/flush_delay' : value => $flush_delay; + } + +} diff --git a/gnocchi/metadata.json b/gnocchi/metadata.json index 739ea7044..69bfb42b0 100644 --- a/gnocchi/metadata.json +++ b/gnocchi/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-gnocchi", - "version": "5.0.0", + "version": "7.0.0", "author": "OpenStack Contributors", "summary": "Puppet module for OpenStack Gnocchi", "license": "Apache-2.0", @@ -32,8 +32,8 @@ ], "dependencies": [ { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=7.0.0 <8.0.0" } ] } diff --git a/gnocchi/spec/acceptance/basic_gnocchi_spec.rb b/gnocchi/spec/acceptance/basic_gnocchi_spec.rb index 549b38971..b93abe320 100644 --- a/gnocchi/spec/acceptance/basic_gnocchi_spec.rb +++ b/gnocchi/spec/acceptance/basic_gnocchi_spec.rb @@ -40,6 +40,14 @@ class { '::gnocchi::storage::file': } class { '::gnocchi::wsgi::apache': ssl => false, } + class { '::gnocchi::statsd': + archive_policy_name => 'high', + flush_delay => '100', + resource_id => '07f26121-5777-48ba-8a0b-d70468133dd9', + user_id => 'f81e9b1f-9505-4298-bc33-43dfbd9a973b', + project_id => '203ef419-e73f-4b8a-a73f-3d599a72b18d', + } + class { '::gnocchi::client': } } } EOS diff --git a/gnocchi/spec/classes/gnocchi_api_spec.rb b/gnocchi/spec/classes/gnocchi_api_spec.rb index 6436f2363..eaaee7f78 100644 --- a/gnocchi/spec/classes/gnocchi_api_spec.rb +++ b/gnocchi/spec/classes/gnocchi_api_spec.rb @@ -126,12 +126,14 @@ class { 'gnocchi': }" context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ + :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => '8.0', :concat_basedir => '/var/lib/puppet/concat', :fqdn => 'some.host.tld', - :processorcount => 2 } + :processorcount => 2, + }) end let :platform_params do @@ -144,12 +146,14 @@ class { 'gnocchi': }" context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ + :osfamily => 'RedHat', :operatingsystem => 'RedHat', :operatingsystemrelease => '7.1', :fqdn => 'some.host.tld', :concat_basedir => '/var/lib/puppet/concat', - :processorcount => 2 } + :processorcount => 2, + }) end let :platform_params do @@ -162,7 +166,7 @@ class { 'gnocchi': }" describe 'with custom auth_uri' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end before do params.merge!({ @@ -176,10 +180,10 @@ class { 'gnocchi': }" describe "with custom keystone identity_uri" do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end before do - params.merge!({ + params.merge!({ :keystone_identity_uri => 'https://foo.bar:1234/', }) end @@ -190,10 +194,10 @@ class { 'gnocchi': }" describe "with custom keystone identity_uri and auth_uri" do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end before do - params.merge!({ + params.merge!({ :keystone_identity_uri => 'https://foo.bar:35357/', :keystone_auth_uri => 'https://foo.bar:5000/v2.0/', }) diff --git a/gnocchi/spec/classes/gnocchi_client_spec.rb b/gnocchi/spec/classes/gnocchi_client_spec.rb new file mode 100644 index 000000000..34248061e --- /dev/null +++ b/gnocchi/spec/classes/gnocchi_client_spec.rb @@ -0,0 +1,33 @@ +require 'spec_helper' + +describe 'gnocchi::client' do + + shared_examples_for 'gnocchi client' do + + it { is_expected.to contain_class('gnocchi::params') } + + it 'installs gnocchi client package' do + is_expected.to contain_package('python-gnocchiclient').with( + :ensure => 'present', + :name => 'python-gnocchiclient', + :tag => 'openstack', + ) + end + end + + context 'on Debian platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'Debian' }) + end + + it_configures 'gnocchi client' + end + + context 'on RedHat platforms' do + let :facts do + @default_facts.merge({ :osfamily => 'RedHat' }) + end + + it_configures 'gnocchi client' + end +end diff --git a/gnocchi/spec/classes/gnocchi_db_mysql_spec.rb b/gnocchi/spec/classes/gnocchi_db_mysql_spec.rb index 7cfdc10be..15ba863e8 100644 --- a/gnocchi/spec/classes/gnocchi_db_mysql_spec.rb +++ b/gnocchi/spec/classes/gnocchi_db_mysql_spec.rb @@ -10,7 +10,7 @@ end let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :params do diff --git a/gnocchi/spec/classes/gnocchi_db_postgresql_spec.rb b/gnocchi/spec/classes/gnocchi_db_postgresql_spec.rb index f80dab7da..2d44a5536 100644 --- a/gnocchi/spec/classes/gnocchi_db_postgresql_spec.rb +++ b/gnocchi/spec/classes/gnocchi_db_postgresql_spec.rb @@ -12,11 +12,11 @@ context 'on a RedHat osfamily' do let :facts do - { - :osfamily => 'RedHat', - :operatingsystemrelease => '7.0', - :concat_basedir => '/var/lib/puppet/concat' - } + @default_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7.0', + :concat_basedir => '/var/lib/puppet/concat', + }) end context 'with only required parameters' do @@ -34,12 +34,12 @@ context 'on a Debian osfamily' do let :facts do - { + @default_facts.merge({ :operatingsystemrelease => '7.8', :operatingsystem => 'Debian', :osfamily => 'Debian', - :concat_basedir => '/var/lib/puppet/concat' - } + :concat_basedir => '/var/lib/puppet/concat', + }) end context 'with only required parameters' do diff --git a/gnocchi/spec/classes/gnocchi_db_spec.rb b/gnocchi/spec/classes/gnocchi_db_spec.rb index 433870285..bf0fa1a87 100644 --- a/gnocchi/spec/classes/gnocchi_db_spec.rb +++ b/gnocchi/spec/classes/gnocchi_db_spec.rb @@ -48,10 +48,11 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', - :operatingsystem => 'Debian', + @default_facts.merge({ + :osfamily => 'Debian', + :operatingsystem => 'Debian', :operatingsystemrelease => 'jessie', - } + }) end let :platform_params do @@ -63,9 +64,10 @@ context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ + :osfamily => 'RedHat', :operatingsystemrelease => '7.1', - } + }) end let :platform_params do diff --git a/gnocchi/spec/classes/gnocchi_init_spec.rb b/gnocchi/spec/classes/gnocchi_init_spec.rb index 1f2edf9e5..8231a9a4b 100644 --- a/gnocchi/spec/classes/gnocchi_init_spec.rb +++ b/gnocchi/spec/classes/gnocchi_init_spec.rb @@ -21,8 +21,10 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', - :operatingsystem => 'Debian' } + @default_facts.merge({ + :osfamily => 'Debian', + :operatingsystem => 'Debian', + }) end let :platform_params do @@ -34,7 +36,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/gnocchi/spec/classes/gnocchi_logging_spec.rb b/gnocchi/spec/classes/gnocchi_logging_spec.rb index b8d08b7c7..662034135 100644 --- a/gnocchi/spec/classes/gnocchi_logging_spec.rb +++ b/gnocchi/spec/classes/gnocchi_logging_spec.rb @@ -20,23 +20,23 @@ 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', 'iso8601' => 'WARN', 'requests.packages.urllib3.connectionpool' => 'WARN' }, - :fatal_deprecations => true, - :instance_format => '[instance: %(uuid)s] ', - :instance_uuid_format => '[instance: %(uuid)s] ', - :log_date_format => '%Y-%m-%d %H:%M:%S', - :use_syslog => true, - :use_stderr => false, - :log_facility => 'LOG_FOO', - :log_dir => '/var/log', - :verbose => true, - :debug => true, + :fatal_deprecations => true, + :instance_format => '[instance: %(uuid)s] ', + :instance_uuid_format => '[instance: %(uuid)s] ', + :log_date_format => '%Y-%m-%d %H:%M:%S', + :use_syslog => true, + :use_stderr => false, + :log_facility => 'LOG_FOO', + :log_dir => '/tmp/gnocchi', + :verbose => true, + :debug => true, } end shared_examples_for 'gnocchi-logging' do context 'with basic logging options and default settings' do - it_configures 'basic default logging settings' + it_configures 'basic default logging settings' end context 'with basic logging options and non-default settings' do @@ -56,12 +56,13 @@ end shared_examples 'basic default logging settings' do - it 'configures gnocchi logging settins with default values' do - is_expected.to contain_gnocchi_config('DEFAULT/use_syslog').with(:value => 'false') - is_expected.to contain_gnocchi_config('DEFAULT/use_stderr').with(:value => 'true') + it 'configures gnocchi logging settings with default values' do + is_expected.to contain_gnocchi_config('DEFAULT/use_syslog').with(:value => '') + is_expected.to contain_gnocchi_config('DEFAULT/use_stderr').with(:value => '') + is_expected.to contain_gnocchi_config('DEFAULT/syslog_log_facility').with(:value => '') is_expected.to contain_gnocchi_config('DEFAULT/log_dir').with(:value => '/var/log/gnocchi') - is_expected.to contain_gnocchi_config('DEFAULT/verbose').with(:value => 'false') - is_expected.to contain_gnocchi_config('DEFAULT/debug').with(:value => 'false') + is_expected.to contain_gnocchi_config('DEFAULT/verbose').with(:value => '') + is_expected.to contain_gnocchi_config('DEFAULT/debug').with(:value => '') end end @@ -70,7 +71,7 @@ is_expected.to contain_gnocchi_config('DEFAULT/use_syslog').with(:value => 'true') is_expected.to contain_gnocchi_config('DEFAULT/use_stderr').with(:value => 'false') is_expected.to contain_gnocchi_config('DEFAULT/syslog_log_facility').with(:value => 'LOG_FOO') - is_expected.to contain_gnocchi_config('DEFAULT/log_dir').with(:value => '/var/log') + is_expected.to contain_gnocchi_config('DEFAULT/log_dir').with(:value => '/tmp/gnocchi') is_expected.to contain_gnocchi_config('DEFAULT/verbose').with(:value => 'true') is_expected.to contain_gnocchi_config('DEFAULT/debug').with(:value => 'true') end @@ -112,7 +113,6 @@ end end - shared_examples_for 'logging params unset' do [ :logging_context_format_string, :logging_default_format_string, :logging_debug_format_suffix, :logging_exception_prefix, @@ -120,13 +120,13 @@ :default_log_levels, :fatal_deprecations, :instance_format, :instance_uuid_format, :log_date_format, ].each { |param| - it { is_expected.to contain_gnocchi_config("DEFAULT/#{param}").with_ensure('absent') } + it { is_expected.to contain_gnocchi_config("DEFAULT/#{param}").with(:value => '') } } end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'gnocchi-logging' @@ -134,7 +134,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'gnocchi-logging' diff --git a/gnocchi/spec/classes/gnocchi_policy_spec.rb b/gnocchi/spec/classes/gnocchi_policy_spec.rb index 0e5b2f62d..c8a1cf2ee 100644 --- a/gnocchi/spec/classes/gnocchi_policy_spec.rb +++ b/gnocchi/spec/classes/gnocchi_policy_spec.rb @@ -25,7 +25,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'gnocchi policies' @@ -33,7 +33,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'gnocchi policies' diff --git a/gnocchi/spec/classes/gnocchi_statsd_spec.rb b/gnocchi/spec/classes/gnocchi_statsd_spec.rb new file mode 100644 index 000000000..afcf19ad2 --- /dev/null +++ b/gnocchi/spec/classes/gnocchi_statsd_spec.rb @@ -0,0 +1,107 @@ +require 'spec_helper' + +describe 'gnocchi::statsd' do + + let :params do + { :enabled => true, + :manage_service => true, + :resource_id => '07f26121-5777-48ba-8a0b-d70468133dd9', + :user_id => '07f26121-5777-48ba-8a0b-d70468133dd9', + :project_id => '07f26121-5777-48ba-8a0b-d70468133dd9', + :archive_policy_name => 'high', + :flush_delay => '200', + } + end + + shared_examples_for 'gnocchi-statsd' do + + it { is_expected.to contain_class('gnocchi::params') } + + it 'installs gnocchi-statsd package' do + is_expected.to contain_package('gnocchi-statsd').with( + :ensure => 'present', + :name => platform_params[:statsd_package_name], + :tag => ['openstack', 'gnocchi-package'], + ) + end + + it 'configures gnocchi statsd' do + is_expected.to contain_gnocchi_config('statsd/resource_id').with_value('07f26121-5777-48ba-8a0b-d70468133dd9') + is_expected.to contain_gnocchi_config('statsd/user_id').with_value('07f26121-5777-48ba-8a0b-d70468133dd9') + is_expected.to contain_gnocchi_config('statsd/project_id').with_value('07f26121-5777-48ba-8a0b-d70468133dd9') + is_expected.to contain_gnocchi_config('statsd/archive_policy_name').with_value('high') + is_expected.to contain_gnocchi_config('statsd/flush_delay').with_value('200') + end + + [{:enabled => true}, {:enabled => false}].each do |param_hash| + context "when service should be #{param_hash[:enabled] ? 'enabled' : 'disabled'}" do + before do + params.merge!(param_hash) + end + + it 'configures gnocchi-statsd service' do + is_expected.to contain_service('gnocchi-statsd').with( + :ensure => (params[:manage_service] && params[:enabled]) ? 'running' : 'stopped', + :name => platform_params[:statsd_service_name], + :enable => params[:enabled], + :hasstatus => true, + :hasrestart => true, + :tag => ['gnocchi-service', 'gnocchi-db-sync-service'], + ) + end + end + end + + context 'with disabled service managing' do + before do + params.merge!({ + :manage_service => false, + :enabled => false }) + end + + it 'configures gnocchi-statsd service' do + is_expected.to contain_service('gnocchi-statsd').with( + :ensure => nil, + :name => platform_params[:statsd_service_name], + :enable => false, + :hasstatus => true, + :hasrestart => true, + :tag => ['gnocchi-service', 'gnocchi-db-sync-service'], + ) + end + end + end + + context 'on Debian platforms' do + let :facts do + @default_facts.merge({ + :osfamily => 'Debian', + :operatingsystem => 'Debian', + }) + end + + let :platform_params do + { :statsd_package_name => 'gnocchi-statsd', + :statsd_service_name => 'gnocchi-statsd' } + end + + it_configures 'gnocchi-statsd' + end + + context 'on RedHat platforms' do + let :facts do + @default_facts.merge({ + :osfamily => 'RedHat', + :operatingsystem => 'RedHat', + }) + end + + let :platform_params do + { :statsd_package_name => 'openstack-gnocchi-statsd', + :statsd_service_name => 'openstack-gnocchi-statsd' } + end + + it_configures 'gnocchi-statsd' + end + +end diff --git a/gnocchi/spec/classes/gnocchi_storage_ceph_spec.rb b/gnocchi/spec/classes/gnocchi_storage_ceph_spec.rb index 0148f1b13..7bd3d7a81 100644 --- a/gnocchi/spec/classes/gnocchi_storage_ceph_spec.rb +++ b/gnocchi/spec/classes/gnocchi_storage_ceph_spec.rb @@ -27,9 +27,7 @@ context 'on Debian platforms' do let :facts do - { - :osfamily => 'Debian' - } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'gnocchi storage ceph' @@ -37,9 +35,7 @@ context 'on RedHat platforms' do let :facts do - { - :osfamily => 'RedHat' - } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'gnocchi storage ceph' diff --git a/gnocchi/spec/classes/gnocchi_storage_file_spec.rb b/gnocchi/spec/classes/gnocchi_storage_file_spec.rb index ee24ea9e1..8565d66f2 100644 --- a/gnocchi/spec/classes/gnocchi_storage_file_spec.rb +++ b/gnocchi/spec/classes/gnocchi_storage_file_spec.rb @@ -21,9 +21,7 @@ context 'on Debian platforms' do let :facts do - { - :osfamily => 'Debian' - } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'gnocchi storage file' @@ -31,9 +29,7 @@ context 'on RedHat platforms' do let :facts do - { - :osfamily => 'RedHat' - } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'gnocchi storage file' diff --git a/gnocchi/spec/classes/gnocchi_storage_spec.rb b/gnocchi/spec/classes/gnocchi_storage_spec.rb index 8fb6a266a..95d533aab 100644 --- a/gnocchi/spec/classes/gnocchi_storage_spec.rb +++ b/gnocchi/spec/classes/gnocchi_storage_spec.rb @@ -21,8 +21,10 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', - :operatingsystem => 'Debian' } + @default_facts.merge({ + :osfamily => 'Debian', + :operatingsystem => 'Debian', + }) end let :platform_params do @@ -34,8 +36,10 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat', - :operatingsystem => 'RedHat' } + @default_facts.merge({ + :osfamily => 'RedHat', + :operatingsystem => 'RedHat', + }) end let :platform_params do diff --git a/gnocchi/spec/classes/gnocchi_storage_swift_spec.rb b/gnocchi/spec/classes/gnocchi_storage_swift_spec.rb index bd03fa0bf..92c9ec860 100644 --- a/gnocchi/spec/classes/gnocchi_storage_swift_spec.rb +++ b/gnocchi/spec/classes/gnocchi_storage_swift_spec.rb @@ -24,9 +24,7 @@ context 'on Debian platforms' do let :facts do - { - :osfamily => 'Debian' - } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'gnocchi storage swift' @@ -34,9 +32,7 @@ context 'on RedHat platforms' do let :facts do - { - :osfamily => 'RedHat' - } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'gnocchi storage swift' diff --git a/gnocchi/spec/classes/gnocchi_wsgi_apache_spec.rb b/gnocchi/spec/classes/gnocchi_wsgi_apache_spec.rb index a96d7cf0f..187e2cb47 100644 --- a/gnocchi/spec/classes/gnocchi_wsgi_apache_spec.rb +++ b/gnocchi/spec/classes/gnocchi_wsgi_apache_spec.rb @@ -3,11 +3,11 @@ describe 'gnocchi::wsgi::apache' do let :global_facts do - { + @default_facts.merge({ :processorcount => 42, :concat_basedir => '/var/lib/puppet/concat', - :fqdn => 'some.host.tld' - } + :fqdn => 'some.host.tld', + }) end shared_examples_for 'apache serving gnocchi with mod_wsgi' do diff --git a/gnocchi/spec/spec_helper.rb b/gnocchi/spec/spec_helper.rb index 3df4cede1..9bc7bcf96 100644 --- a/gnocchi/spec/spec_helper.rb +++ b/gnocchi/spec/spec_helper.rb @@ -5,6 +5,9 @@ RSpec.configure do |c| c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' + c.before :each do + @default_facts = { :os_service_default => '' } + end end at_exit { RSpec::Puppet::Coverage.report! } diff --git a/heat/CHANGELOG.md b/heat/CHANGELOG.md index 9b9a1b34f..5fc6ac95a 100644 --- a/heat/CHANGELOG.md +++ b/heat/CHANGELOG.md @@ -1,3 +1,46 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes +- change section name for AMQP qpid parameters +- change section name for AMQP rabbit parameters +- update rpc_backend default parameter +- cleanup configure_delegated_roles deprecated parameter + +####Features +- add support for RabbitMQ connection heartbeat +- keystone/auth: make service description configurable +- add tag to package and service resources +- add heat::config class +- expose RPC response timeout as a puppet parameter +- support setting instance_user to an empty string +- add heat::db::sync +- add an ability to manage use_stderr parameter +- reflect provider change in puppet-openstacklib +- put all the logging related parameters to the logging class +- add rabbit_ha_queues option +- improve heat::keystone::domain +- remove POSIX users, groups, and file modes +- use postgresql lib class for psycopg package +- move deps & external hooks into a standalone class +- introduce heat::db class +- make the role for heat_stack_user configurable +- allow to not manage Keystone domain +- add hooks for external install & svc management + +####Bugfixes +- rely on autorequire for config resource ordering +- fix up doc string for workers variable + +####Maintenance +- acceptance: enable debug & verbosity for OpenStack logs +- initial msync run for all Puppet OpenStack modules +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check +- acceptance: use common bits from puppet-openstack-integration + ##2015-10-14 - 6.1.0 ###Summary diff --git a/heat/README.md b/heat/README.md index 7c0486994..aecc697bb 100644 --- a/heat/README.md +++ b/heat/README.md @@ -1,7 +1,7 @@ puppet-heat ============= -6.1.0 - 2015.1 - Kilo +7.0.0 - 2015.2 - Liberty #### Table of Contents diff --git a/heat/examples/site.pp b/heat/examples/site.pp index 0e2b79c4c..2a0f902cd 100644 --- a/heat/examples/site.pp +++ b/heat/examples/site.pp @@ -15,7 +15,7 @@ class { '::heat': # The keystone_password parameter is mandatory keystone_password => 'password', - sql_connection => 'mysql://heat:heat@localhost/heat' + sql_connection => 'mysql+pymysql://heat:heat@localhost/heat' } # Install heat-engine diff --git a/heat/manifests/db.pp b/heat/manifests/db.pp index 957aa4b11..2a4ea7a4f 100644 --- a/heat/manifests/db.pp +++ b/heat/manifests/db.pp @@ -10,28 +10,28 @@ # # [*database_idle_timeout*] # Timeout when db connections should be reaped. -# (Optional) Defaults to 3600. +# (Optional) Defaults to $::os_service_default. # # [*database_min_pool_size*] # Minimum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 1. +# (Optional) Defaults to $::os_service_default. # # [*database_max_pool_size*] # Maximum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default. # # [*database_max_retries*] # Maximum db connection retries during startup. # Setting -1 implies an infinite retry count. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default. # # [*database_retry_interval*] # Interval between retries of opening a sql connection. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default. # # [*database_max_overflow*] # If set, use this value for max_overflow with sqlalchemy. -# (Optional) Defaults to 20. +# (Optional) Defaults to $::os_service_default. # # [*sync_db*] # (Optional) Run db sync on nodes after connection setting has been set. @@ -39,16 +39,17 @@ # class heat::db ( $database_connection = 'sqlite:////var/lib/heat/heat.sqlite', - $database_idle_timeout = 3600, - $database_min_pool_size = 1, - $database_max_pool_size = 10, - $database_max_retries = 10, - $database_retry_interval = 10, - $database_max_overflow = 20, + $database_idle_timeout = $::os_service_default, + $database_min_pool_size = $::os_service_default, + $database_max_pool_size = $::os_service_default, + $database_max_retries = $::os_service_default, + $database_retry_interval = $::os_service_default, + $database_max_overflow = $::os_service_default, $sync_db = true, ) { include ::heat::deps + include ::heat::params # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function # to use heat:: if heat::db:: isn't specified. @@ -62,49 +63,50 @@ $sync_db_real = pick($::heat::sync_db, $sync_db) validate_re($database_connection_real, - '(sqlite|mysql|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') + '^(sqlite|mysql(\+pymysql)?|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') - if $database_connection_real { - case $database_connection_real { - /^mysql:\/\//: { + case $database_connection_real { + /^mysql(\+pymysql)?:\/\//: { + require 'mysql::bindings' + require 'mysql::bindings::python' + if $database_connection_real =~ /^mysql\+pymysql/ { + $backend_package = $::heat::params::pymysql_package_name + } else { $backend_package = false - require 'mysql::bindings' - require 'mysql::bindings::python' - } - /^postgresql:\/\//: { - $backend_package = false - require 'postgresql::lib::python' - } - /^sqlite:\/\//: { - $backend_package = $::heat::params::sqlite_package_name - } - default: { - fail('Unsupported backend configured') } } - - if $backend_package and !defined(Package[$backend_package]) { - package {'heat-backend-package': - ensure => present, - name => $backend_package, - tag => 'openstack', - } + /^postgresql:\/\//: { + $backend_package = false + require 'postgresql::lib::python' } - - heat_config { - 'database/connection': value => $database_connection_real, secret => true; - 'database/idle_timeout': value => $database_idle_timeout_real; - 'database/min_pool_size': value => $database_min_pool_size_real; - 'database/max_retries': value => $database_max_retries_real; - 'database/retry_interval': value => $database_retry_interval_real; - 'database/max_pool_size': value => $database_max_pool_size_real; - 'database/max_overflow': value => $database_max_overflow_real; + /^sqlite:\/\//: { + $backend_package = $::heat::params::sqlite_package_name + } + default: { + fail('Unsupported backend configured') } + } - if $sync_db_real { - include ::heat::db::sync + if $backend_package and !defined(Package[$backend_package]) { + package {'heat-backend-package': + ensure => present, + name => $backend_package, + tag => 'openstack', } + } + + heat_config { + 'database/connection': value => $database_connection_real, secret => true; + 'database/idle_timeout': value => $database_idle_timeout_real; + 'database/min_pool_size': value => $database_min_pool_size_real; + 'database/max_retries': value => $database_max_retries_real; + 'database/retry_interval': value => $database_retry_interval_real; + 'database/max_pool_size': value => $database_max_pool_size_real; + 'database/max_overflow': value => $database_max_overflow_real; + } + if $sync_db_real { + include ::heat::db::sync } } diff --git a/heat/manifests/db/sync.pp b/heat/manifests/db/sync.pp index 4703cf90b..931339a4d 100644 --- a/heat/manifests/db/sync.pp +++ b/heat/manifests/db/sync.pp @@ -1,13 +1,23 @@ # # Class to execute heat dbsync # -class heat::db::sync { +# ==Parameters +# +# [*extra_params*] +# (optional) String of extra command line parameters to append +# to the heat-manage db_sync command. These will be inserted +# in the command line between 'heat-manage' and 'db_sync'. +# Defaults to '--config-file /etc/heat/heat.conf' +# +class heat::db::sync( + $extra_params = '--config-file /etc/heat/heat.conf', +) { include ::heat::deps include ::heat::params exec { 'heat-dbsync': - command => $::heat::params::dbsync_command, + command => "heat-manage ${extra_params} db_sync", path => '/usr/bin', user => 'heat', refreshonly => true, diff --git a/heat/manifests/engine.pp b/heat/manifests/engine.pp index e29483d76..58a5f7820 100644 --- a/heat/manifests/engine.pp +++ b/heat/manifests/engine.pp @@ -77,20 +77,35 @@ # configure the keystone roles. # Defaults to ['heat_stack_owner'] # +# [*instance_connection_is_secure*] +# (Optional) Instance connection to CFN/CW API via https. +# Defaults to $::os_service_default +# +# [*instance_connection_https_validate_certificates*] +# (Optional) Instance connection to CFN/CW API validate certs if SSL is used. +# Defaults to $::os_service_default +# +# [*max_resources_per_stack*] +# (Optional) Maximum resources allowed per top-level stack. +# Defaults to $::os_service_default +# class heat::engine ( $auth_encryption_key, - $package_ensure = 'present', - $manage_service = true, - $enabled = true, - $heat_stack_user_role = 'heat_stack_user', - $heat_metadata_server_url = 'http://127.0.0.1:8000', - $heat_waitcondition_server_url = 'http://127.0.0.1:8000/v1/waitcondition', - $heat_watch_server_url = 'http://127.0.0.1:8003', - $engine_life_check_timeout = '2', - $deferred_auth_method = 'trusts', - $default_software_config_transport = 'POLL_SERVER_CFN', - $default_deployment_signal_transport = 'CFN_SIGNAL', - $trusts_delegated_roles = ['heat_stack_owner'], + $package_ensure = 'present', + $manage_service = true, + $enabled = true, + $heat_stack_user_role = 'heat_stack_user', + $heat_metadata_server_url = 'http://127.0.0.1:8000', + $heat_waitcondition_server_url = 'http://127.0.0.1:8000/v1/waitcondition', + $heat_watch_server_url = 'http://127.0.0.1:8003', + $engine_life_check_timeout = '2', + $deferred_auth_method = 'trusts', + $default_software_config_transport = 'POLL_SERVER_CFN', + $default_deployment_signal_transport = 'CFN_SIGNAL', + $trusts_delegated_roles = ['heat_stack_owner'], + $instance_connection_is_secure = $::os_service_default, + $instance_connection_https_validate_certificates = $::os_service_default, + $max_resources_per_stack = $::os_service_default, ) { include ::heat::deps @@ -131,15 +146,18 @@ } heat_config { - 'DEFAULT/auth_encryption_key' : value => $auth_encryption_key; - 'DEFAULT/heat_stack_user_role' : value => $heat_stack_user_role; - 'DEFAULT/heat_metadata_server_url' : value => $heat_metadata_server_url; - 'DEFAULT/heat_waitcondition_server_url' : value => $heat_waitcondition_server_url; - 'DEFAULT/heat_watch_server_url' : value => $heat_watch_server_url; - 'DEFAULT/engine_life_check_timeout' : value => $engine_life_check_timeout; - 'DEFAULT/default_software_config_transport' : value => $default_software_config_transport; - 'DEFAULT/default_deployment_signal_transport' : value => $default_deployment_signal_transport; - 'DEFAULT/trusts_delegated_roles' : value => $trusts_delegated_roles; - 'DEFAULT/deferred_auth_method' : value => $deferred_auth_method; + 'DEFAULT/auth_encryption_key': value => $auth_encryption_key; + 'DEFAULT/heat_stack_user_role': value => $heat_stack_user_role; + 'DEFAULT/heat_metadata_server_url': value => $heat_metadata_server_url; + 'DEFAULT/heat_waitcondition_server_url': value => $heat_waitcondition_server_url; + 'DEFAULT/heat_watch_server_url': value => $heat_watch_server_url; + 'DEFAULT/engine_life_check_timeout': value => $engine_life_check_timeout; + 'DEFAULT/default_software_config_transport': value => $default_software_config_transport; + 'DEFAULT/default_deployment_signal_transport': value => $default_deployment_signal_transport; + 'DEFAULT/trusts_delegated_roles': value => $trusts_delegated_roles; + 'DEFAULT/deferred_auth_method': value => $deferred_auth_method; + 'DEFAULT/max_resources_per_stack': value => $max_resources_per_stack; + 'DEFAULT/instance_connection_https_validate_certificates': value => $instance_connection_https_validate_certificates; + 'DEFAULT/instance_connection_is_secure': value => $instance_connection_is_secure; } } diff --git a/heat/manifests/init.pp b/heat/manifests/init.pp index d119f0f9a..869678373 100644 --- a/heat/manifests/init.pp +++ b/heat/manifests/init.pp @@ -27,7 +27,7 @@ # # [*rpc_response_timeout*] # (Optional) Configure the timeout (in seconds) for rpc responses -# Defaults to 60 seconds +# Defaults to $::os_service_default. # # [*rabbit_host*] # (Optional) IP or hostname of the rabbit server. @@ -44,7 +44,7 @@ # # [*rabbit_userid*] # (Optional) User to connect to the rabbit server. -# Defaults to 'guest' +# Defaults to $::os_service_default. # # [*rabbit_password*] # (Optional) Password to connect to the rabbit_server. @@ -52,7 +52,7 @@ # # [*rabbit_virtual_host*] # (Optional) Virtual_host to use. -# Defaults to '/' +# Defaults to $::os_service_default. # # [*rabbit_ha_queues*] # (optional) Use HA queues in RabbitMQ (x-ha-policy: all). @@ -71,7 +71,7 @@ # check the heartbeat on RabbitMQ connection. (i.e. rabbit_heartbeat_rate=2 # when rabbit_heartbeat_timeout_threshold=60, the heartbeat will be checked # every 30 seconds. -# Defaults to 2 +# Defaults to $::os_service_default. # # [*rabbit_use_ssl*] # (Optional) Connect over SSL for RabbitMQ. @@ -79,26 +79,40 @@ # # [*kombu_ssl_ca_certs*] # (Optional) SSL certification authority file (valid only if SSL enabled). -# Defaults to undef +# Defaults to $::os_service_default. # # [*kombu_ssl_certfile*] # (Optional) SSL cert file (valid only if SSL enabled). -# Defaults to undef +# Defaults to $::os_service_default. # # [*kombu_ssl_keyfile*] # (Optional) SSL key file (valid only if SSL enabled). -# Defaults to undef +# Defaults to $::os_service_default. # # [*kombu_ssl_version*] # (Optional) SSL version to use (valid only if SSL enabled). # Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be # available on some distributions. -# Defaults to 'TLSv1' +# Defaults to $::os_service_default. # # [*amqp_durable_queues*] # (Optional) Use durable queues in amqp. # Defaults to false # +# [*max_template_size*] +# (Optional) Maximum raw byte size of any template. +# Defaults to $::os_service_default +# +# [*max_json_body_size*] +# (Optional) Maximum raw byte size of JSON request body. +# Should be larger than max_template_size. +# Defaults to $::os_service_default +# +# [*notification_driver*] +# (Optional) Driver or drivers to handle sending notifications. +# Value can be a string or a list. +# Defaults to $::os_service_default +# # == keystone authentication options # # [*auth_uri*] @@ -119,34 +133,6 @@ # # [*keystone_ec2_uri*] # -# ==== Various QPID options (Optional) -# -# [*qpid_hostname*] -# -# [*qpid_port*] -# -# [*qpid_username*] -# -# [*qpid_password*] -# -# [*qpid_heartbeat*] -# -# [*qpid_protocol*] -# -# [*qpid_tcp_nodelay*] -# -# [*qpid_reconnect*] -# -# [*qpid_reconnect_timeout*] -# -# [*qpid_reconnect_limit*] -# -# [*qpid_reconnect_interval*] -# -# [*qpid_reconnect_interval_min*] -# -# [*qpid_reconnect_interval_max*] -# # [*database_connection*] # (optional) Connection url for the heat database. # Defaults to undef. @@ -191,12 +177,12 @@ # (optional) Specifies the Authentication method. # Set to 'standalone' to get Heat to work with a remote OpenStack # Tested versions include 0.9 and 2.2 -# Defaults to undef +# Defaults to $::os_service_default. # # [*region_name*] # (Optional) Region name for services. This is the # default region name that heat talks to service endpoints on. -# Defaults to undef +# Defaults to $::os_service_default. # # [*instance_user*] # (Optional) The default user for new instances. Although heat claims that @@ -207,11 +193,11 @@ # # [*enable_stack_adopt*] # (Optional) Enable the stack-adopt feature. -# Defaults to undef +# Defaults to $::os_service_default. # # [*enable_stack_abandon*] # (Optional) Enable the stack-abandon feature. -# Defaults to undef +# Defaults to $::os_service_default. # # [*sync_db*] # (Optional) Run db sync on nodes after connection setting has been set. @@ -237,6 +223,32 @@ # (Optional) DEPRECATED. The protocol used to access the keystone host # Defaults to http. # +# [*qpid_hostname*] +# +# [*qpid_port*] +# +# [*qpid_username*] +# +# [*qpid_password*] +# +# [*qpid_heartbeat*] +# +# [*qpid_protocol*] +# +# [*qpid_tcp_nodelay*] +# +# [*qpid_reconnect*] +# +# [*qpid_reconnect_timeout*] +# +# [*qpid_reconnect_limit*] +# +# [*qpid_reconnect_interval*] +# +# [*qpid_reconnect_interval_min*] +# +# [*qpid_reconnect_interval_max*] +# class heat( $auth_uri = false, $identity_uri = false, @@ -249,35 +261,22 @@ $keystone_password = false, $keystone_ec2_uri = 'http://127.0.0.1:5000/v2.0/ec2tokens', $rpc_backend = 'rabbit', - $rpc_response_timeout = 60, + $rpc_response_timeout = $::os_service_default, $rabbit_host = '127.0.0.1', $rabbit_port = 5672, $rabbit_hosts = undef, - $rabbit_userid = 'guest', + $rabbit_userid = $::os_service_default, $rabbit_password = '', - $rabbit_virtual_host = '/', + $rabbit_virtual_host = $::os_service_default, $rabbit_ha_queues = undef, $rabbit_heartbeat_timeout_threshold = 0, - $rabbit_heartbeat_rate = 2, + $rabbit_heartbeat_rate = $::os_service_default, $rabbit_use_ssl = false, - $kombu_ssl_ca_certs = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_keyfile = undef, - $kombu_ssl_version = 'TLSv1', + $kombu_ssl_ca_certs = $::os_service_default, + $kombu_ssl_certfile = $::os_service_default, + $kombu_ssl_keyfile = $::os_service_default, + $kombu_ssl_version = $::os_service_default, $amqp_durable_queues = false, - $qpid_hostname = 'localhost', - $qpid_port = 5672, - $qpid_username = 'guest', - $qpid_password = 'guest', - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, - $qpid_reconnect = true, - $qpid_reconnect_timeout = 0, - $qpid_reconnect_limit = 0, - $qpid_reconnect_interval_min = 0, - $qpid_reconnect_interval_max = 0, - $qpid_reconnect_interval = 0, $use_syslog = undef, $use_stderr = undef, $log_facility = undef, @@ -288,11 +287,14 @@ $database_min_pool_size = undef, $database_max_pool_size = undef, $database_max_overflow = undef, - $flavor = undef, - $region_name = undef, - $enable_stack_adopt = undef, - $enable_stack_abandon = undef, + $flavor = $::os_service_default, + $region_name = $::os_service_default, + $enable_stack_adopt = $::os_service_default, + $enable_stack_abandon = $::os_service_default, $sync_db = undef, + $max_template_size = $::os_service_default, + $max_json_body_size = $::os_service_default, + $notification_driver = $::os_service_default, # Deprecated parameters $mysql_module = undef, $sql_connection = undef, @@ -300,6 +302,19 @@ $keystone_port = '35357', $keystone_protocol = 'http', $instance_user = undef, + $qpid_hostname = undef, + $qpid_port = undef, + $qpid_username = undef, + $qpid_password = undef, + $qpid_heartbeat = undef, + $qpid_protocol = undef, + $qpid_tcp_nodelay = undef, + $qpid_reconnect = undef, + $qpid_reconnect_timeout = undef, + $qpid_reconnect_limit = undef, + $qpid_reconnect_interval_min = undef, + $qpid_reconnect_interval_max = undef, + $qpid_reconnect_interval = undef, ) { include ::heat::logging @@ -307,16 +322,17 @@ include ::heat::deps include ::heat::params - if $kombu_ssl_ca_certs and !$rabbit_use_ssl { + if (!is_service_default($kombu_ssl_ca_certs)) and !$rabbit_use_ssl { fail('The kombu_ssl_ca_certs parameter requires rabbit_use_ssl to be set to true') } - if $kombu_ssl_certfile and !$rabbit_use_ssl { + if (!is_service_default($kombu_ssl_certfile)) and !$rabbit_use_ssl { fail('The kombu_ssl_certfile parameter requires rabbit_use_ssl to be set to true') } - if $kombu_ssl_keyfile and !$rabbit_use_ssl { + if (!is_service_default($kombu_ssl_keyfile)) and !$rabbit_use_ssl { fail('The kombu_ssl_keyfile parameter requires rabbit_use_ssl to be set to true') } - if ($kombu_ssl_certfile and !$kombu_ssl_keyfile) or ($kombu_ssl_keyfile and !$kombu_ssl_certfile) { + if ((!is_service_default($kombu_ssl_certfile)) and is_service_default($kombu_ssl_keyfile)) + or ((!is_service_default($kombu_ssl_keyfile)) and is_service_default($kombu_ssl_certfile)) { fail('The kombu_ssl_certfile and kombu_ssl_keyfile parameters must be used together') } if $mysql_module { @@ -363,64 +379,16 @@ 'oslo_messaging_rabbit/heartbeat_rate': value => $rabbit_heartbeat_rate; 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl; 'oslo_messaging_rabbit/amqp_durable_queues': value => $amqp_durable_queues; - } - - if $rabbit_use_ssl { - - if $kombu_ssl_ca_certs { - heat_config { 'oslo_messaging_rabbit/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs; } - } else { - heat_config { 'oslo_messaging_rabbit/kombu_ssl_ca_certs': ensure => absent; } - } - - if $kombu_ssl_certfile or $kombu_ssl_keyfile { - heat_config { - 'oslo_messaging_rabbit/kombu_ssl_certfile': value => $kombu_ssl_certfile; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': value => $kombu_ssl_keyfile; - } - } else { - heat_config { - 'oslo_messaging_rabbit/kombu_ssl_certfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': ensure => absent; - } - } - - if $kombu_ssl_version { - heat_config { 'oslo_messaging_rabbit/kombu_ssl_version': value => $kombu_ssl_version; } - } else { - heat_config { 'oslo_messaging_rabbit/kombu_ssl_version': ensure => absent; } - } - - } else { - heat_config { - 'oslo_messaging_rabbit/kombu_ssl_version': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_ca_certs': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_certfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': ensure => absent; - } + 'oslo_messaging_rabbit/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs; + 'oslo_messaging_rabbit/kombu_ssl_certfile': value => $kombu_ssl_certfile; + 'oslo_messaging_rabbit/kombu_ssl_keyfile': value => $kombu_ssl_keyfile; + 'oslo_messaging_rabbit/kombu_ssl_version': value => $kombu_ssl_version; } } if $rpc_backend == 'qpid' { - - heat_config { - 'oslo_messaging_qpid/qpid_hostname' : value => $qpid_hostname; - 'oslo_messaging_qpid/qpid_port' : value => $qpid_port; - 'oslo_messaging_qpid/qpid_username' : value => $qpid_username; - 'oslo_messaging_qpid/qpid_password' : value => $qpid_password, secret => true; - 'oslo_messaging_qpid/qpid_heartbeat' : value => $qpid_heartbeat; - 'oslo_messaging_qpid/qpid_protocol' : value => $qpid_protocol; - 'oslo_messaging_qpid/qpid_tcp_nodelay' : value => $qpid_tcp_nodelay; - 'oslo_messaging_qpid/qpid_reconnect' : value => $qpid_reconnect; - 'oslo_messaging_qpid/qpid_reconnect_timeout' : value => $qpid_reconnect_timeout; - 'oslo_messaging_qpid/qpid_reconnect_limit' : value => $qpid_reconnect_limit; - 'oslo_messaging_qpid/qpid_reconnect_interval_min' : value => $qpid_reconnect_interval_min; - 'oslo_messaging_qpid/qpid_reconnect_interval_max' : value => $qpid_reconnect_interval_max; - 'oslo_messaging_qpid/qpid_reconnect_interval' : value => $qpid_reconnect_interval; - 'oslo_messaging_qpid/amqp_durable_queues' : value => $amqp_durable_queues; - } - + warning('Qpid driver is removed from Oslo.messaging in the Mitaka release') } # if both auth_uri and identity_uri are set we skip these deprecated settings entirely @@ -481,26 +449,28 @@ } } - heat_config { - 'DEFAULT/rpc_backend' : value => $rpc_backend; - 'DEFAULT/rpc_response_timeout' : value => $rpc_response_timeout; - 'ec2authtoken/auth_uri' : value => $keystone_ec2_uri; - 'keystone_authtoken/admin_tenant_name' : value => $keystone_tenant; - 'keystone_authtoken/admin_user' : value => $keystone_user; - 'keystone_authtoken/admin_password' : value => $keystone_password, secret => true; + if (!is_service_default($enable_stack_adopt)) { + validate_bool($enable_stack_adopt) } - if $flavor { - heat_config { 'paste_deploy/flavor': value => $flavor; } - } else { - heat_config { 'paste_deploy/flavor': ensure => absent; } + if (!is_service_default($enable_stack_abandon)) { + validate_bool($enable_stack_abandon) } - # region name - if $region_name { - heat_config { 'DEFAULT/region_name_for_services': value => $region_name; } - } else { - heat_config { 'DEFAULT/region_name_for_services': ensure => absent; } + heat_config { + 'DEFAULT/rpc_backend': value => $rpc_backend; + 'DEFAULT/rpc_response_timeout': value => $rpc_response_timeout; + 'DEFAULT/max_template_size': value => $max_template_size; + 'DEFAULT/max_json_body_size': value => $max_json_body_size; + 'DEFAULT/notification_driver': value => $notification_driver; + 'DEFAULT/region_name_for_services': value => $region_name; + 'DEFAULT/enable_stack_abandon': value => $enable_stack_abandon; + 'DEFAULT/enable_stack_adopt': value => $enable_stack_adopt; + 'ec2authtoken/auth_uri': value => $keystone_ec2_uri; + 'keystone_authtoken/admin_tenant_name': value => $keystone_tenant; + 'keystone_authtoken/admin_user': value => $keystone_user; + 'keystone_authtoken/admin_password': value => $keystone_password, secret => true; + 'paste_deploy/flavor': value => $flavor; } # instance_user @@ -513,17 +483,4 @@ heat_config { 'DEFAULT/instance_user': ensure => absent; } } - if $enable_stack_adopt != undef { - validate_bool($enable_stack_adopt) - heat_config { 'DEFAULT/enable_stack_adopt': value => $enable_stack_adopt; } - } else { - heat_config { 'DEFAULT/enable_stack_adopt': ensure => absent; } - } - - if $enable_stack_abandon != undef { - validate_bool($enable_stack_abandon) - heat_config { 'DEFAULT/enable_stack_abandon': value => $enable_stack_abandon; } - } else { - heat_config { 'DEFAULT/enable_stack_abandon': ensure => absent; } - } } diff --git a/heat/manifests/logging.pp b/heat/manifests/logging.pp index 426c14a68..8efa890b1 100644 --- a/heat/manifests/logging.pp +++ b/heat/manifests/logging.pp @@ -5,24 +5,24 @@ # === Parameters # # [*verbose*] -# (Optional) Should the daemons log verbose messages -# Defaults to 'false' +# (Optional) Should the daemons log verbose messages. +# Defaults to $::os_service_default. # # [*debug*] -# (Optional) Should the daemons log debug messages -# Defaults to 'false' +# (Optional) Should the daemons log debug messages. +# Defaults to $::os_service_default. # # [*use_syslog*] # (Optional) Use syslog for logging. -# Defaults to 'false' +# Defaults to $::os_service_default. # # [*use_stderr*] -# (optional) Use stderr for logging -# Defaults to 'true' +# (optional) Use stderr for logging. +# Defaults to $::os_service_default. # # [*log_facility*] # (Optional) Syslog facility to receive log lines. -# Defaults to 'LOG_USER' +# Defaults to $::os_service_default. # # [*log_dir*] # (optional) Directory where logs should be stored. @@ -31,34 +31,34 @@ # # [*logging_context_format_string*] # (optional) Format string to use for log messages with context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [%(request_id)s %(user_identity)s] %(instance)s%(message)s' # # [*logging_default_format_string*] # (optional) Format string to use for log messages without context. -# Defaults to undef. +# Defaults to $::os_service_default. # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [-] %(instance)s%(message)s' # # [*logging_debug_format_suffix*] # (optional) Formatted data to append to log format when level is DEBUG. -# Defaults to undef. +# Defaults to $::os_service_default. # Example: '%(funcName)s %(pathname)s:%(lineno)d' # # [*logging_exception_prefix*] # (optional) Prefix each line of exception output with this format. -# Defaults to undef. +# Defaults to $::os_service_default. # Example: '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s' # # [*log_config_append*] # The name of an additional logging configuration file. -# Defaults to undef. +# Defaults to $::os_service_default. # See https://docs.python.org/2/howto/logging.html # # [*default_log_levels*] # (optional) Hash of logger (keys) and level (values) pairs. -# Defaults to undef. +# Defaults to $::os_service_default. # Example: # {'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', # 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', @@ -67,47 +67,47 @@ # # [*publish_errors*] # (optional) Publish error events (boolean value). -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default. # # [*fatal_deprecations*] -# (optional) Make deprecations fatal (boolean value) -# Defaults to undef (false if unconfigured). +# (optional) Make deprecations fatal (boolean value). +# Defaults to $::os_service_default. # # [*instance_format*] # (optional) If an instance is passed with the log message, format it # like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default. # Example: '[instance: %(uuid)s] ' # # [*instance_uuid_format*] # (optional) If an instance UUID is passed with the log message, format # It like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default. # Example: instance_uuid_format='[instance: %(uuid)s] ' # [*log_date_format*] # (optional) Format string for %%(asctime)s in log records. -# Defaults to undef. +# Defaults to $::os_service_default. # Example: 'Y-%m-%d %H:%M:%S' # class heat::logging( - $use_syslog = false, - $use_stderr = true, - $log_facility = 'LOG_USER', + $use_syslog = $::os_service_default, + $use_stderr = $::os_service_default, + $log_facility = $::os_service_default, $log_dir = '/var/log/heat', - $verbose = false, - $debug = false, - $logging_context_format_string = undef, - $logging_default_format_string = undef, - $logging_debug_format_suffix = undef, - $logging_exception_prefix = undef, - $log_config_append = undef, - $default_log_levels = undef, - $publish_errors = undef, - $fatal_deprecations = undef, - $instance_format = undef, - $instance_uuid_format = undef, - $log_date_format = undef, + $verbose = $::os_service_default, + $debug = $::os_service_default, + $logging_context_format_string = $::os_service_default, + $logging_default_format_string = $::os_service_default, + $logging_debug_format_suffix = $::os_service_default, + $logging_exception_prefix = $::os_service_default, + $log_config_append = $::os_service_default, + $default_log_levels = $::os_service_default, + $publish_errors = $::os_service_default, + $fatal_deprecations = $::os_service_default, + $instance_format = $::os_service_default, + $instance_uuid_format = $::os_service_default, + $log_date_format = $::os_service_default, ) { include ::heat::deps @@ -121,139 +121,30 @@ $verbose_real = pick($::heat::verbose,$verbose) $debug_real = pick($::heat::debug,$debug) - heat_config { - 'DEFAULT/debug' : value => $debug_real; - 'DEFAULT/verbose' : value => $verbose_real; - 'DEFAULT/use_stderr' : value => $use_stderr_real; - 'DEFAULT/use_syslog' : value => $use_syslog_real; - 'DEFAULT/log_dir' : value => $log_dir_real; - 'DEFAULT/syslog_log_facility': value => $log_facility_real; + if is_service_default($default_log_levels) { + $default_log_levels_real = $default_log_levels + } else { + $default_log_levels_real = join(sort(join_keys_to_values($default_log_levels, '=')), ',') } - if $logging_context_format_string { - heat_config { - 'DEFAULT/logging_context_format_string' : - value => $logging_context_format_string; - } - } - else { - heat_config { - 'DEFAULT/logging_context_format_string' : ensure => absent; - } - } - - if $logging_default_format_string { - heat_config { - 'DEFAULT/logging_default_format_string' : - value => $logging_default_format_string; - } - } - else { - heat_config { - 'DEFAULT/logging_default_format_string' : ensure => absent; - } - } - - if $logging_debug_format_suffix { - heat_config { - 'DEFAULT/logging_debug_format_suffix' : - value => $logging_debug_format_suffix; - } - } - else { - heat_config { - 'DEFAULT/logging_debug_format_suffix' : ensure => absent; - } - } - - if $logging_exception_prefix { - heat_config { - 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; - } - } - else { - heat_config { - 'DEFAULT/logging_exception_prefix' : ensure => absent; - } - } - - if $log_config_append { - heat_config { - 'DEFAULT/log_config_append' : value => $log_config_append; - } - } - else { - heat_config { - 'DEFAULT/log_config_append' : ensure => absent; - } - } - - if $default_log_levels { - heat_config { - 'DEFAULT/default_log_levels' : - value => join(sort(join_keys_to_values($default_log_levels, '=')), ','); - } - } - else { - heat_config { - 'DEFAULT/default_log_levels' : ensure => absent; - } - } - - if $publish_errors { - heat_config { - 'DEFAULT/publish_errors' : value => $publish_errors; - } - } - else { - heat_config { - 'DEFAULT/publish_errors' : ensure => absent; - } - } - - if $fatal_deprecations { - heat_config { - 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; - } - } - else { - heat_config { - 'DEFAULT/fatal_deprecations' : ensure => absent; - } - } - - if $instance_format { - heat_config { - 'DEFAULT/instance_format' : value => $instance_format; - } - } - else { - heat_config { - 'DEFAULT/instance_format' : ensure => absent; - } - } - - if $instance_uuid_format { - heat_config { - 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; - } - } - else { - heat_config { - 'DEFAULT/instance_uuid_format' : ensure => absent; - } - } - - if $log_date_format { - heat_config { - 'DEFAULT/log_date_format' : value => $log_date_format; - } - } - else { - heat_config { - 'DEFAULT/log_date_format' : ensure => absent; - } - } - + heat_config { + 'DEFAULT/debug': value => $debug_real; + 'DEFAULT/verbose': value => $verbose_real; + 'DEFAULT/use_stderr': value => $use_stderr_real; + 'DEFAULT/use_syslog': value => $use_syslog_real; + 'DEFAULT/log_dir': value => $log_dir_real; + 'DEFAULT/syslog_log_facility': value => $log_facility_real; + 'DEFAULT/default_log_levels': value => $default_log_levels_real; + 'DEFAULT/logging_context_format_string': value => $logging_context_format_string; + 'DEFAULT/logging_default_format_string': value => $logging_default_format_string; + 'DEFAULT/logging_debug_format_suffix': value => $logging_debug_format_suffix; + 'DEFAULT/logging_exception_prefix': value => $logging_exception_prefix; + 'DEFAULT/log_config_append': value => $log_config_append; + 'DEFAULT/publish_errors': value => $publish_errors; + 'DEFAULT/fatal_deprecations': value => $fatal_deprecations; + 'DEFAULT/instance_format': value => $instance_format; + 'DEFAULT/instance_uuid_format': value => $instance_uuid_format; + 'DEFAULT/log_date_format': value => $log_date_format; + } } diff --git a/heat/manifests/params.pp b/heat/manifests/params.pp index d8b4830e5..6e5c026b0 100644 --- a/heat/manifests/params.pp +++ b/heat/manifests/params.pp @@ -4,9 +4,6 @@ # class heat::params { - $dbsync_command = - 'heat-manage --config-file /etc/heat/heat.conf db_sync' - case $::osfamily { 'RedHat': { # package names @@ -17,6 +14,7 @@ $client_package_name = 'python-heatclient' $common_package_name = 'openstack-heat-common' $sqlite_package_name = undef + $pymysql_package_name = undef # service names $api_service_name = 'openstack-heat-api' $api_cloudwatch_service_name = 'openstack-heat-api-cloudwatch' @@ -32,6 +30,7 @@ $client_package_name = 'python-heatclient' $common_package_name = 'heat-common' $sqlite_package_name = 'python-pysqlite2' + $pymysql_package_name = 'python-pymysql' # service names $api_service_name = 'heat-api' $api_cloudwatch_service_name = 'heat-api-cloudwatch' diff --git a/heat/metadata.json b/heat/metadata.json index 5798063a1..0fba24bdc 100644 --- a/heat/metadata.json +++ b/heat/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-heat", - "version": "6.1.0", + "version": "7.0.0", "author": "eNovance and OpenStack Contributors", "summary": "Puppet module for OpenStack Heat", "license": "Apache-2.0", @@ -32,8 +32,8 @@ "description": "Installs and configures OpenStack Heat (Orchestration).", "dependencies": [ { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=7.0.0 <8.0.0" } ] } diff --git a/heat/spec/acceptance/basic_heat_spec.rb b/heat/spec/acceptance/basic_heat_spec.rb index 81ca93dde..9021f2943 100644 --- a/heat/spec/acceptance/basic_heat_spec.rb +++ b/heat/spec/acceptance/basic_heat_spec.rb @@ -32,7 +32,7 @@ class { '::heat': rabbit_userid => 'heat', rabbit_password => 'an_even_bigger_secret', rabbit_host => '127.0.0.1', - database_connection => 'mysql://heat:a_big_secret@127.0.0.1/heat?charset=utf8', + database_connection => 'mysql+pymysql://heat:a_big_secret@127.0.0.1/heat?charset=utf8', identity_uri => 'http://127.0.0.1:35357/', keystone_password => 'a_big_secret', debug => true, diff --git a/heat/spec/classes/heat_db_postgresql_spec.rb b/heat/spec/classes/heat_db_postgresql_spec.rb index 6767d01f7..fe2cee00a 100644 --- a/heat/spec/classes/heat_db_postgresql_spec.rb +++ b/heat/spec/classes/heat_db_postgresql_spec.rb @@ -14,11 +14,11 @@ context 'on a RedHat osfamily' do let :facts do - { + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.0', :concat_basedir => '/var/lib/puppet/concat' - } + }) end context 'with only required parameters' do diff --git a/heat/spec/classes/heat_db_spec.rb b/heat/spec/classes/heat_db_spec.rb index a138827a9..20d9e1da0 100644 --- a/heat/spec/classes/heat_db_spec.rb +++ b/heat/spec/classes/heat_db_spec.rb @@ -8,32 +8,43 @@ it { is_expected.to contain_class('heat::db::sync') } it { is_expected.to contain_heat_config('database/connection').with_value('sqlite:////var/lib/heat/heat.sqlite').with_secret(true) } - it { is_expected.to contain_heat_config('database/idle_timeout').with_value('3600') } - it { is_expected.to contain_heat_config('database/min_pool_size').with_value('1') } - it { is_expected.to contain_heat_config('database/max_retries').with_value('10') } - it { is_expected.to contain_heat_config('database/retry_interval').with_value('10') } + it { is_expected.to contain_heat_config('database/idle_timeout').with_value('') } + it { is_expected.to contain_heat_config('database/min_pool_size').with_value('') } + it { is_expected.to contain_heat_config('database/max_pool_size').with_value('') } + it { is_expected.to contain_heat_config('database/max_retries').with_value('') } + it { is_expected.to contain_heat_config('database/retry_interval').with_value('') } end context 'with specific parameters' do let :params do - { :database_connection => 'mysql://heat:heat@localhost/heat', + { :database_connection => 'mysql+pymysql://heat:heat@localhost/heat', :database_idle_timeout => '3601', :database_min_pool_size => '2', + :database_max_pool_size => '12', :database_max_retries => '11', :database_retry_interval => '11', :sync_db => false } end it { is_expected.not_to contain_class('heat::db::sync') } - it { is_expected.to contain_heat_config('database/connection').with_value('mysql://heat:heat@localhost/heat').with_secret(true) } + it { is_expected.to contain_heat_config('database/connection').with_value('mysql+pymysql://heat:heat@localhost/heat').with_secret(true) } it { is_expected.to contain_heat_config('database/idle_timeout').with_value('3601') } it { is_expected.to contain_heat_config('database/min_pool_size').with_value('2') } + it { is_expected.to contain_heat_config('database/max_pool_size').with_value('12') } it { is_expected.to contain_heat_config('database/max_retries').with_value('11') } it { is_expected.to contain_heat_config('database/retry_interval').with_value('11') } end + context 'with MySQL-python library as backend package' do + let :params do + { :database_connection => 'mysql://heat:heat@localhost/heat' } + end + + it { is_expected.to contain_heat_config('database/connection').with_value('mysql://heat:heat@localhost/heat').with_secret(true) } + end + context 'with postgresql backend' do let :params do { :database_connection => 'postgresql://heat:heat@localhost/heat', } @@ -53,6 +64,14 @@ it_raises 'a Puppet::Error', /validate_re/ end + context 'with incorrect database_connection string' do + let :params do + { :database_connection => 'foo+pymysql://heat:heat@localhost/heat', } + end + + it_raises 'a Puppet::Error', /validate_re/ + end + end context 'on Debian platforms' do @@ -65,6 +84,14 @@ end it_configures 'heat::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://heat:heat@localhost/heat' } + end + + it { is_expected.to contain_package('heat-backend-package').with({ :ensure => 'present', :name => 'python-pymysql' }) } + end end context 'on Redhat platforms' do @@ -76,6 +103,14 @@ end it_configures 'heat::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://heat:heat@localhost/heat' } + end + + it { is_expected.not_to contain_package('heat-backend-package') } + end end end diff --git a/heat/spec/classes/heat_db_sync_spec.rb b/heat/spec/classes/heat_db_sync_spec.rb index 9af865845..595a88b8d 100644 --- a/heat/spec/classes/heat_db_sync_spec.rb +++ b/heat/spec/classes/heat_db_sync_spec.rb @@ -14,8 +14,27 @@ ) end + describe 'overriding extra_params' do + let :params do + { + :extra_params => '--config-file /etc/heat/heat01.conf', + } + end + + it { + is_expected.to contain_exec('heat-dbsync').with( + :command => 'heat-manage --config-file /etc/heat/heat01.conf db_sync', + :path => '/usr/bin', + :user => 'heat', + :refreshonly => 'true', + :logoutput => 'on_failure' + ) + } + end + end + context 'on a RedHat osfamily' do let :facts do @default_facts.merge({ diff --git a/heat/spec/classes/heat_engine_spec.rb b/heat/spec/classes/heat_engine_spec.rb index 2a9782b96..d06cccc04 100644 --- a/heat/spec/classes/heat_engine_spec.rb +++ b/heat/spec/classes/heat_engine_spec.rb @@ -69,6 +69,9 @@ it { is_expected.to contain_heat_config('DEFAULT/deferred_auth_method').with_value( expected_params[:deferred_auth_method] ) } it { is_expected.to contain_heat_config('DEFAULT/default_software_config_transport').with_value( expected_params[:default_software_config_transport] ) } it { is_expected.to contain_heat_config('DEFAULT/default_deployment_signal_transport').with_value( expected_params[:default_deployment_signal_transport] ) } + it { is_expected.to contain_heat_config('DEFAULT/instance_connection_is_secure').with_value('') } + it { is_expected.to contain_heat_config('DEFAULT/instance_connection_https_validate_certificates').with_value('') } + it { is_expected.to contain_heat_config('DEFAULT/max_resources_per_stack').with_value('') } end context 'with disabled service managing' do diff --git a/heat/spec/classes/heat_init_spec.rb b/heat/spec/classes/heat_init_spec.rb index 74bc6a90a..fdb58dbb8 100644 --- a/heat/spec/classes/heat_init_spec.rb +++ b/heat/spec/classes/heat_init_spec.rb @@ -11,10 +11,10 @@ :log_dir => '/var/log/heat', :rabbit_host => '127.0.0.1', :rabbit_port => 5672, - :rabbit_userid => 'guest', + :rabbit_userid => '', :rabbit_password => '', - :rabbit_virtual_host => '/', - :database_connection => 'mysql://user@host/database', + :rabbit_virtual_host => '', + :database_connection => 'mysql+pymysql://user@host/database', :database_idle_timeout => 3600, :auth_uri => 'http://127.0.0.1:5000/v2.0', :keystone_ec2_uri => 'http://127.0.0.1:5000/v2.0/ec2tokens', @@ -23,16 +23,6 @@ } end - let :qpid_params do - { - :rpc_backend => "qpid", - :qpid_hostname => 'localhost', - :qpid_port => 5672, - :qpid_username => 'guest', - :qpid_password => 'guest', - } - end - shared_examples_for 'heat' do context 'with rabbit_host parameter' do @@ -64,13 +54,6 @@ it_configures 'rabbit with heartbeat configured' end - context 'with qpid instance' do - before {params.merge!(qpid_params) } - - it_configures 'a heat base installation' - it_configures 'qpid as rpc backend' - end - it_configures 'with SSL enabled with kombu' it_configures 'with SSL enabled without kombu' it_configures 'with SSL disabled' @@ -78,6 +61,7 @@ it_configures "with custom keystone identity_uri" it_configures "with custom keystone identity_uri and auth_uri" it_configures 'with enable_stack_adopt and enable_stack_abandon set' + it_configures 'with notification_driver set to a string' end shared_examples_for 'a heat base installation' do @@ -97,6 +81,14 @@ is_expected.to contain_class('heat::db::sync') end + it 'configures max_template_size' do + is_expected.to contain_heat_config('DEFAULT/max_template_size').with_value('') + end + + it 'configures max_json_body_size' do + is_expected.to contain_heat_config('DEFAULT/max_json_body_size').with_value('') + end + it 'configures auth_uri' do is_expected.to contain_heat_config('keystone_authtoken/auth_uri').with_value( params[:auth_uri] ) end @@ -111,6 +103,9 @@ is_expected.to contain_heat_config('keystone_authtoken/admin_password').with_secret(true) end + it 'configures notification_driver' do + is_expected.to contain_heat_config('DEFAULT/notification_driver').with_value('') + end end @@ -121,18 +116,18 @@ is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_password').with_secret( true ) is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value( params[:rabbit_virtual_host] ) is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value(false) - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('') is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') - is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') + is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_rate').with_value('') end it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_host').with_value( params[:rabbit_host] ) } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_port').with_value( params[:rabbit_port] ) } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_hosts').with_value( "#{params[:rabbit_host]}:#{params[:rabbit_port]}" ) } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_ha_queues').with_value('false') } - it { is_expected.to contain_heat_config('DEFAULT/rpc_response_timeout').with_value('60') } + it { is_expected.to contain_heat_config('DEFAULT/rpc_response_timeout').with_value('') } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/amqp_durable_queues').with_value(false) } end @@ -143,12 +138,12 @@ is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_password').with_value( params[:rabbit_password] ) is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value( params[:rabbit_virtual_host] ) is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value(false) - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('') is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') - is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') + is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_rate').with_value('') end it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_host').with_ensure('absent') } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_port').with_ensure('absent') } @@ -164,12 +159,12 @@ is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_password').with_secret( true ) is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value( params[:rabbit_virtual_host] ) is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value(false) - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('') is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') - is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') + is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_rate').with_value('') end it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_host').with_ensure('absent') } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_port').with_ensure('absent') } @@ -195,45 +190,15 @@ is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_password').with_secret( true ) is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value( params[:rabbit_virtual_host] ) is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value(false) - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('') end it { is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('60') } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_rate').with_value('10') } end - shared_examples_for 'qpid as rpc backend' do - context("with default parameters") do - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_reconnect').with_value(true) } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_reconnect_timeout').with_value('0') } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_reconnect_limit').with_value('0') } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_reconnect_interval_min').with_value('0') } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_reconnect_interval_max').with_value('0') } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_reconnect_interval').with_value('0') } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_heartbeat').with_value('60') } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_tcp_nodelay').with_value(true) } - it { is_expected.to contain_heat_config('DEFAULT/rpc_response_timeout').with_value('60') } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/amqp_durable_queues').with_value(false) } - end - - context("with mandatory parameters set") do - it { is_expected.to contain_heat_config('DEFAULT/rpc_backend').with_value('qpid') } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_hostname').with_value( params[:qpid_hostname] ) } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_port').with_value( params[:qpid_port] ) } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_username').with_value( params[:qpid_username]) } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_password').with_value(params[:qpid_password]) } - it { is_expected.to contain_heat_config('oslo_messaging_qpid/qpid_password').with_secret( true ) } - end - - context("failing if the rpc_backend is not present") do - before { params.delete( :rpc_backend) } - it { expect { is_expected.to raise_error(Puppet::Error) } } - end - end - shared_examples_for 'with SSL enabled with kombu' do before do params.merge!( @@ -263,10 +228,10 @@ it do is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('true') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('TLSv1') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('') end end @@ -274,16 +239,15 @@ before do params.merge!( :rabbit_use_ssl => false, - :kombu_ssl_version => 'TLSv1' ) end it do is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('false') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') - is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('') end end @@ -296,20 +260,34 @@ context 'without required parameters' do - context 'with rabbit_use_ssl => false and kombu_ssl_ca_certs parameter' do - before { params.merge!(:kombu_ssl_ca_certs => '/path/to/ssl/ca/certs') } + context 'with rabbit_use_ssl => false and kombu_ssl_ca_certs parameter' do + before { params.merge!(:kombu_ssl_ca_certs => '/path/to/ssl/ca/certs')} it_raises 'a Puppet::Error', /The kombu_ssl_ca_certs parameter requires rabbit_use_ssl to be set to true/ end context 'with rabbit_use_ssl => false and kombu_ssl_certfile parameter' do - before { params.merge!(:kombu_ssl_certfile => '/path/to/ssl/cert/file') } + before { params.merge!(:kombu_ssl_certfile => '/path/to/ssl/cert/file')} it_raises 'a Puppet::Error', /The kombu_ssl_certfile parameter requires rabbit_use_ssl to be set to true/ end context 'with rabbit_use_ssl => false and kombu_ssl_keyfile parameter' do - before { params.merge!(:kombu_ssl_keyfile => '/path/to/ssl/keyfile') } + before { params.merge!(:kombu_ssl_keyfile => '/path/to/ssl/keyfile')} it_raises 'a Puppet::Error', /The kombu_ssl_keyfile parameter requires rabbit_use_ssl to be set to true/ end + context 'with kombu_ssl_certfile set to default and custom kombu_ssl_keyfile parameter' do + before { params.merge!( + :rabbit_use_ssl => true, + :kombu_ssl_keyfile => '/path/to/ssl/keyfile', + )} + it_raises 'a Puppet::Error', /The kombu_ssl_certfile and kombu_ssl_keyfile parameters must be used together/ + end + context 'with kombu_ssl_keyfile set to default and custom kombu_ssl_certfile parameter' do + before { params.merge!( + :rabbit_use_ssl => true, + :kombu_ssl_certfile => '/path/to/ssl/cert/file', + )} + it_raises 'a Puppet::Error', /The kombu_ssl_certfile and kombu_ssl_keyfile parameters must be used together/ + end end end @@ -352,7 +330,7 @@ shared_examples_for 'without region_name set' do it 'doesnt have region_name set by default' do - is_expected.to contain_heat_config('DEFAULT/region_name_for_services').with_enure('absent') + is_expected.to contain_heat_config('DEFAULT/region_name_for_services').with_value('') end end @@ -426,6 +404,18 @@ end end + shared_examples_for 'with notification_driver set to a string' do + before do + params.merge!( + :notification_driver => 'bar.foo.rpc_notifier', + ) + end + + it 'has notification_driver set when specified' do + is_expected.to contain_heat_config('DEFAULT/notification_driver').with_value('bar.foo.rpc_notifier') + end + end + context 'on Debian platforms' do let :facts do @default_facts.merge({ diff --git a/heat/spec/classes/heat_logging_spec.rb b/heat/spec/classes/heat_logging_spec.rb index 7dae3754b..b448c4285 100644 --- a/heat/spec/classes/heat_logging_spec.rb +++ b/heat/spec/classes/heat_logging_spec.rb @@ -15,7 +15,7 @@ :logging_exception_prefix => '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s', :log_config_append => '/etc/heat/logging.conf', :publish_errors => true, - :default_log_levels => { + :default_log_levels => { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', 'iso8601' => 'WARN', @@ -57,11 +57,11 @@ shared_examples 'basic default logging settings' do it 'configures heat logging settins with default values' do - is_expected.to contain_heat_config('DEFAULT/use_syslog').with(:value => 'false') - is_expected.to contain_heat_config('DEFAULT/use_stderr').with(:value => 'true') + is_expected.to contain_heat_config('DEFAULT/use_syslog').with(:value => '') + is_expected.to contain_heat_config('DEFAULT/use_stderr').with(:value => '') is_expected.to contain_heat_config('DEFAULT/log_dir').with(:value => '/var/log/heat') - is_expected.to contain_heat_config('DEFAULT/verbose').with(:value => 'false') - is_expected.to contain_heat_config('DEFAULT/debug').with(:value => 'false') + is_expected.to contain_heat_config('DEFAULT/verbose').with(:value => '') + is_expected.to contain_heat_config('DEFAULT/debug').with(:value => '') end end @@ -120,7 +120,7 @@ :default_log_levels, :fatal_deprecations, :instance_format, :instance_uuid_format, :log_date_format, ].each { |param| - it { is_expected.to contain_heat_config("DEFAULT/#{param}").with_ensure('absent') } + it { is_expected.to contain_heat_config("DEFAULT/#{param}").with_value('') } } end diff --git a/horizon/CHANGELOG.md b/horizon/CHANGELOG.md index e7f8ab0dc..8966ea145 100644 --- a/horizon/CHANGELOG.md +++ b/horizon/CHANGELOG.md @@ -1,3 +1,34 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes + +####Features +- allow Orchestration of Image Backend +- add new parameters for multidomain support +- add api_versions parameter +- remove hardcoded 'neutron_options' list +- remove hardcoded 'cinder_options' list +- remove hardcoded 'hypervisor_options' list +- add ssl_no_verify parameter +- make redirect type configurable +- add CUSTOM_THEME_PATH variable +- add tag to package resource +- add cache_backend and cache_options params + +####Bugfixes +- do not run collectstatic or compress in Debian +- remove duplicate key for ':servername' + +####Maintenance +- acceptance: enable debug & verbosity for OpenStack logs +- initial msync run for all Puppet OpenStack modules +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check +- fix rspec 3.x syntax + ##2015-10-15 - 6.1.0 ###Summary diff --git a/horizon/README.md b/horizon/README.md index 686b896be..e358aed32 100644 --- a/horizon/README.md +++ b/horizon/README.md @@ -1,7 +1,7 @@ horizon ======= -6.1.0 - 2015.1 - Kilo +7.0.0 - 2015.2 - Liberty #### Table of Contents diff --git a/horizon/manifests/init.pp b/horizon/manifests/init.pp index 5f3a5c946..89f316f7a 100644 --- a/horizon/manifests/init.pp +++ b/horizon/manifests/init.pp @@ -195,7 +195,7 @@ # # [*django_session_engine*] # (optional) Selects the session engine for Django to use. -# Defaults to undefined - will not add entry to local settings. +# Defaults to undef - will not add entry to local settings. # # [*tuskar_ui*] # (optional) Boolean to enable Tuskar-UI related configuration (http://tuskar-ui.readthedocs.org/) @@ -208,7 +208,7 @@ # [*tuskar_ui_undercloud_admin_password*] # (optional) Tuskar-UI - Undercloud admin password used to authenticate admin user in Tuskar-UI. # It is required by Heat to perform certain actions. -# Defaults to undefined +# Defaults to undef # # [*tuskar_ui_deployment_mode*] # (optional) Tuskar-UI - Deployment mode ('poc' or 'scale') @@ -216,7 +216,7 @@ # # [*custom_theme_path*] # (optional) The directory location for the theme (e.g., "static/themes/blue") -# Default to undefined +# Default to undef # # [*redirect_type*] # (optional) What type of redirect to use when redirecting an http request @@ -241,7 +241,7 @@ # [*keystone_default_domain*] # (optional) Overrides the default domain used when running on single-domain model with Keystone V3. # All entities will be created in the default domain. -# Default to undefined +# Default to undef # # [*image_backend*] # (optional) Overrides the default image backend settings. This allows the list of supported @@ -249,6 +249,13 @@ # Example: image_backend => { 'image_formats' => { '' => 'Select type', 'qcow2' => 'QCOW2' } } # Default to empty hash # +# [*overview_days_range*] +# (optional) The default date range in the Overview panel meters - either minus N +# days (if the value is integer N), or from the beginning of the current month +# until today (if it's undefined). This setting should be used to limit the amount +# of data fetched by default when rendering the Overview panel. +# Defaults to undef. +# # === Examples # # class { 'horizon': @@ -308,6 +315,7 @@ $keystone_multidomain_support = false, $keystone_default_domain = undef, $image_backend = {}, + $overview_days_range = undef, # DEPRECATED PARAMETERS $can_set_mount_point = undef, $vhost_extra_params = undef, @@ -376,20 +384,16 @@ order => '50', } - package { 'python-lesscpy': - ensure => $package_ensure, - } - # debian/ubuntu do not use collect static as the packaging already handles # this as part of the packages. This was put in as a work around for Debian # who has since fixed their packaging. # See I813b5f6067bb6ecce279cab7278d9227c4d31d28 for the original history # behind this section. - if $::os_package_type == 'redhat' { + if $::os_package_type == 'rpm' { exec { 'refresh_horizon_django_cache': command => "${::horizon::params::manage_py} collectstatic --noinput --clear && ${::horizon::params::manage_py} compress --force", refreshonly => true, - require => [Package['python-lesscpy'], Package['horizon']], + require => Package['horizon'], } if $compress_offline { diff --git a/horizon/metadata.json b/horizon/metadata.json index 9e5bab6ea..8db0311d9 100644 --- a/horizon/metadata.json +++ b/horizon/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-horizon", - "version": "6.1.0", + "version": "7.0.0", "author": "Puppet Labs and OpenStack Contributors", "summary": "Puppet module for OpenStack Horizon", "license": "Apache-2.0", diff --git a/horizon/spec/classes/horizon_init_spec.rb b/horizon/spec/classes/horizon_init_spec.rb index 75ea55c32..b40f2b8f9 100644 --- a/horizon/spec/classes/horizon_init_spec.rb +++ b/horizon/spec/classes/horizon_init_spec.rb @@ -25,14 +25,13 @@ context 'with default parameters' do it { - is_expected.to contain_package('python-lesscpy').with_ensure('present') is_expected.to contain_package('horizon').with( :ensure => 'present', :tag => ['openstack', 'horizon-package'], ) } it { - if facts[:os_package_type] == 'redhat' + if facts[:os_package_type] == 'rpm' is_expected.to contain_exec('refresh_horizon_django_cache').with({ :command => '/usr/share/openstack-dashboard/manage.py collectstatic --noinput --clear && /usr/share/openstack-dashboard/manage.py compress --force', :refreshonly => true, @@ -42,7 +41,7 @@ end } it { - if facts[:os_package_type] == 'redhat' + if facts[:os_package_type] == 'rpm' is_expected.to contain_concat(platforms_params[:config_file]).that_notifies('Exec[refresh_horizon_django_cache]') else is_expected.to_not contain_concat(platforms_params[:config_file]).that_notifies('Exec[refresh_horizon_django_cache]') @@ -118,7 +117,8 @@ :custom_theme_path => 'static/themes/green', :api_versions => {'identity' => 3}, :keystone_multidomain_support => true, - :keystone_default_domain => 'domain.tld' + :keystone_default_domain => 'domain.tld', + :overview_days_range => 1 }) end @@ -158,6 +158,7 @@ " 'handlers': ['syslog'],", 'COMPRESS_OFFLINE = False', "FILE_UPLOAD_TEMP_DIR = '/var/spool/horizon'", + "OVERVIEW_DAYS_RANGE = 1" ]) end @@ -180,7 +181,7 @@ end it { - if facts[:os_package_type] == 'redhat' + if facts[:os_package_type] == 'rpm' is_expected.to contain_exec('refresh_horizon_django_cache') else is_expected.to_not contain_exec('refresh_horizon_django_cache') diff --git a/horizon/templates/local_settings.py.erb b/horizon/templates/local_settings.py.erb index a3a94286d..4cd1f929a 100644 --- a/horizon/templates/local_settings.py.erb +++ b/horizon/templates/local_settings.py.erb @@ -632,3 +632,12 @@ IRONIC_DISCOVERD_URL = "<%= @tuskar_ui_ironic_discoverd_url %>" UNDERCLOUD_ADMIN_PASSWORD = "<%= @tuskar_ui_undercloud_admin_password %>" DEPLOYMENT_MODE = "<%= @tuskar_ui_deployment_mode %>" <% end %> + +# The default date range in the Overview panel meters - either minus N +# days (if the value is integer N), or from the beginning of the current month +# until today (if set to None). This setting should be used to limit the amount +# of data fetched by default when rendering the Overview panel. +# OVERVIEW_DAYS_RANGE = None +<% if @overview_days_range %> +OVERVIEW_DAYS_RANGE = <%= @overview_days_range %> +<% end %> diff --git a/ironic/CHANGELOG.md b/ironic/CHANGELOG.md index 3a27b6822..b7a8df480 100644 --- a/ironic/CHANGELOG.md +++ b/ironic/CHANGELOG.md @@ -1,3 +1,53 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes +- change section name for AMQP qpid parameters +- change section name for AMQP rabbit parameters + +####Features +- add ironic::db::sync +- add bifrost manifest +- reflect provider change in puppet-openstacklib +- put all the logging related parameters to the logging class +- add ironic-inspector support +- simplify rpc_backend parameter +- introduce ironic::db class +- db: Use postgresql lib class for psycopg package +- allow customization of db sync command line +- allow customization of force_power_state_during_sync +- add ironic::config class +- add tag to package and service resources +- add support for identity_uri + +####Bugfixes +- rely on autorequire for config resource ordering +- fixed the comment for port in ironic api.pp +- add BOOTIF=${mac} to the inspector iPXE template + +####Maintenance +- acceptance: enable debug & verbosity for OpenStack logs +- initial msync run for all Puppet OpenStack modules +- fix rspec 3.x syntax +- acceptance: install openstack-selinux on redhat plateforms +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check +- acceptance: use common bits from puppet-openstack-integration + +##2015-10-14 - 6.1.0 +###Summary + +This is a feature and maintenance release in the Kilo series. + +####Features +- Create Heat Domain with Keystone_domain resource + +####Maintenance +- Remove deprecated parameter stack_user_domain +- acceptance: checkout stable/kilo puppet modules + ##2015-10-15 - 6.1.0 ###Summary diff --git a/ironic/README.md b/ironic/README.md index 89afab980..67d2f7452 100644 --- a/ironic/README.md +++ b/ironic/README.md @@ -1,7 +1,7 @@ puppet-ironic ============= -6.1.0 - 2015.1 - Kilo +7.0.0 - 2015.2 - Liberty #### Table of Contents @@ -34,11 +34,7 @@ Setup ### Beginning with ironic -To utilize the ironic module's functionality you will need to declare multiple resources. -The following is a modified excerpt from the [openstack module](httpd://github.com/stackforge/puppet-openstack). -This is not an exhaustive list of all the components needed. We recommend that you consult and understand the -[openstack module](https://github.com/stackforge/puppet-openstack) and the [core openstack](http://docs.openstack.org) -documentation to assist you in understanding the available deployment options. +To utilize the ironic module's functionality you will need to declare multiple resources. This is not an exhaustive list of all the components needed. We recommend that you consult and understand the [core openstack](http://docs.openstack.org) documentation to assist you in understanding the available deployment options. ```puppet # enable Ironic resources diff --git a/ironic/manifests/db.pp b/ironic/manifests/db.pp index 92c805f2e..43a266f62 100644 --- a/ironic/manifests/db.pp +++ b/ironic/manifests/db.pp @@ -10,39 +10,41 @@ # # [*database_idle_timeout*] # Timeout when db connections should be reaped. -# (Optional) Defaults to 3600. +# (Optional) Defaults to $::os_service_default # # [*database_max_retries*] # Maximum db connection retries during startup. # Setting -1 implies an infinite retry count. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_retry_interval*] # Interval between retries of opening a sql connection. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_min_pool_size*] # Minimum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 1. +# (Optional) Defaults to $::os_service_default # # [*database_max_pool_size*] # Maximum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_max_overflow*] # If set, use this value for max_overflow with sqlalchemy. -# (Optional) Defaults to 20. +# (Optional) Defaults to $::os_service_default # class ironic::db ( $database_connection = 'sqlite:////var/lib/ironic/ovs.sqlite', - $database_idle_timeout = 3600, - $database_max_retries = 10, - $database_retry_interval = 10, - $database_min_pool_size = 1, - $database_max_pool_size = 10, - $database_max_overflow = 20, + $database_idle_timeout = $::os_service_default, + $database_max_retries = $::os_service_default, + $database_retry_interval = $::os_service_default, + $database_min_pool_size = $::os_service_default, + $database_max_pool_size = $::os_service_default, + $database_max_overflow = $::os_service_default, ) { + include ::ironic::params + # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function # to use ironic:: if ironic::db:: isn't specified. $database_connection_real = pick($::ironic::database_connection, $database_connection) @@ -54,44 +56,46 @@ $database_max_overflow_real = pick($::ironic::database_max_overflow, $database_max_overflow) validate_re($database_connection_real, - '(sqlite|mysql|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') + '^(sqlite|mysql(\+pymysql)?|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') - if $database_connection_real { - case $database_connection_real { - /^mysql:\/\//: { - $backend_package = false - require 'mysql::bindings' - require 'mysql::bindings::python' - } - /^postgresql:\/\//: { + case $database_connection_real { + /^mysql(\+pymysql)?:\/\//: { + require 'mysql::bindings' + require 'mysql::bindings::python' + if $database_connection_real =~ /^mysql\+pymysql/ { + $backend_package = $::ironic::params::pymysql_package_name + } else { $backend_package = false - require 'postgresql::lib::python' - } - /^sqlite:\/\//: { - $backend_package = $::ironic::params::sqlite_package_name - } - default: { - fail('Unsupported backend configured') } } - - if $backend_package and !defined(Package[$backend_package]) { - package {'ironic-backend-package': - ensure => present, - name => $backend_package, - tag => 'openstack', - } + /^postgresql:\/\//: { + $backend_package = false + require 'postgresql::lib::python' + } + /^sqlite:\/\//: { + $backend_package = $::ironic::params::sqlite_package_name + } + default: { + fail('Unsupported backend configured') } + } - ironic_config { - 'database/connection': value => $database_connection_real, secret => true; - 'database/idle_timeout': value => $database_idle_timeout_real; - 'database/max_retries': value => $database_max_retries_real; - 'database/retry_interval': value => $database_retry_interval_real; - 'database/min_pool_size': value => $database_min_pool_size_real; - 'database/max_pool_size': value => $database_max_pool_size_real; - 'database/max_overflow': value => $database_max_overflow_real; + if $backend_package and !defined(Package[$backend_package]) { + package {'ironic-backend-package': + ensure => present, + name => $backend_package, + tag => 'openstack', } } + ironic_config { + 'database/connection': value => $database_connection_real, secret => true; + 'database/idle_timeout': value => $database_idle_timeout_real; + 'database/max_retries': value => $database_max_retries_real; + 'database/retry_interval': value => $database_retry_interval_real; + 'database/min_pool_size': value => $database_min_pool_size_real; + 'database/max_pool_size': value => $database_max_pool_size_real; + 'database/max_overflow': value => $database_max_overflow_real; + } + } diff --git a/ironic/manifests/db/mysql.pp b/ironic/manifests/db/mysql.pp index e9ffa7b30..e9b685b4a 100644 --- a/ironic/manifests/db/mysql.pp +++ b/ironic/manifests/db/mysql.pp @@ -44,9 +44,6 @@ # (optional) Additional hosts that are allowed to access this DB # Defaults to undef # -# [*cluster_id*] -# (optional) Deprecated. Does nothing - class ironic::db::mysql ( $password, $dbname = 'ironic', @@ -55,13 +52,8 @@ $allowed_hosts = undef, $charset = 'utf8', $collate = 'utf8_general_ci', - $cluster_id = undef, ) { - if $cluster_id { - warning('The cluster_id parameter is deprecated and has no effect.') - } - ::openstacklib::db::mysql { 'ironic': user => $user, password_hash => mysql_password($password), diff --git a/ironic/manifests/init.pp b/ironic/manifests/init.pp index 41a2d457f..c48b1703c 100644 --- a/ironic/manifests/init.pp +++ b/ironic/manifests/init.pp @@ -119,21 +119,6 @@ # multiple RabbitMQ Brokers. # Defaults to false # -# [*qpid_hostname*] -# [*qpid_port*] -# [*qpid_username*] -# [*qpid_password*] -# [*qpid_heartbeat*] -# [*qpid_protocol*] -# [*qpid_tcp_nodelay*] -# [*qpid_reconnect*] -# [*qpid_reconnect_timeout*] -# [*qpid_reconnect_limit*] -# [*qpid_reconnect_interval*] -# [*qpid_reconnect_interval_min*] -# [*qpid_reconnect_interval_max*] -# (optional) various QPID options -# # [*use_syslog*] # (optional) Use syslog for logging # Defaults to undef. @@ -200,6 +185,22 @@ # Enable dbsync # Defaults to true # +# DEPRECATED PARAMETERS +# +# [*qpid_hostname*] +# [*qpid_port*] +# [*qpid_username*] +# [*qpid_password*] +# [*qpid_heartbeat*] +# [*qpid_protocol*] +# [*qpid_tcp_nodelay*] +# [*qpid_reconnect*] +# [*qpid_reconnect_timeout*] +# [*qpid_reconnect_limit*] +# [*qpid_reconnect_interval*] +# [*qpid_reconnect_interval_min*] +# [*qpid_reconnect_interval_max*] +# class ironic ( $enabled = true, $package_ensure = 'present', @@ -227,19 +228,6 @@ $kombu_ssl_keyfile = undef, $kombu_ssl_version = 'TLSv1', $amqp_durable_queues = false, - $qpid_hostname = 'localhost', - $qpid_port = '5672', - $qpid_username = 'guest', - $qpid_password = 'guest', - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, - $qpid_reconnect = true, - $qpid_reconnect_timeout = 0, - $qpid_reconnect_limit = 0, - $qpid_reconnect_interval_min = 0, - $qpid_reconnect_interval_max = 0, - $qpid_reconnect_interval = 0, $use_syslog = false, $log_facility = 'LOG_USER', $database_connection = undef, @@ -256,6 +244,19 @@ $sync_db = true, # DEPRECATED PARAMETERS $rabbit_user = undef, + $qpid_hostname = undef, + $qpid_port = undef, + $qpid_username = undef, + $qpid_password = undef, + $qpid_heartbeat = undef, + $qpid_protocol = undef, + $qpid_tcp_nodelay = undef, + $qpid_reconnect = undef, + $qpid_reconnect_timeout = undef, + $qpid_reconnect_limit = undef, + $qpid_reconnect_interval_min = undef, + $qpid_reconnect_interval_max = undef, + $qpid_reconnect_interval = undef, ) { include ::ironic::logging @@ -369,21 +370,7 @@ } if $rpc_backend == 'ironic.openstack.common.rpc.impl_qpid' or $rpc_backend == 'qpid' { - ironic_config { - 'oslo_messaging_qpid/qpid_hostname': value => $qpid_hostname; - 'oslo_messaging_qpid/qpid_port': value => $qpid_port; - 'oslo_messaging_qpid/qpid_username': value => $qpid_username; - 'oslo_messaging_qpid/qpid_password': value => $qpid_password, secret => true; - 'oslo_messaging_qpid/qpid_heartbeat': value => $qpid_heartbeat; - 'oslo_messaging_qpid/qpid_protocol': value => $qpid_protocol; - 'oslo_messaging_qpid/qpid_tcp_nodelay': value => $qpid_tcp_nodelay; - 'oslo_messaging_qpid/qpid_reconnect': value => $qpid_reconnect; - 'oslo_messaging_qpid/qpid_reconnect_timeout': value => $qpid_reconnect_timeout; - 'oslo_messaging_qpid/qpid_reconnect_limit': value => $qpid_reconnect_limit; - 'oslo_messaging_qpid/qpid_reconnect_interval_min': value => $qpid_reconnect_interval_min; - 'oslo_messaging_qpid/qpid_reconnect_interval_max': value => $qpid_reconnect_interval_max; - 'oslo_messaging_qpid/qpid_reconnect_interval': value => $qpid_reconnect_interval; - } + warning('Qpid driver is removed from Oslo.messaging in the Mitaka release') } } diff --git a/ironic/manifests/inspector.pp b/ironic/manifests/inspector.pp index 773a79d6a..98b625be9 100644 --- a/ironic/manifests/inspector.pp +++ b/ironic/manifests/inspector.pp @@ -169,6 +169,10 @@ ensure => 'present', require => Package['ironic-inspector'], } + file { '/tftpboot': + ensure => 'directory', + seltype => 'tftpdir_t', + } if $pxe_transfer_protocol == 'tftp' { file { '/etc/ironic-inspector/dnsmasq.conf': @@ -176,9 +180,6 @@ content => template('ironic/inspector_dnsmasq_tftp.erb'), require => Package['ironic-inspector'], } - file { '/tftpboot': - ensure => 'directory', - } file { '/tftpboot/pxelinux.cfg': ensure => 'directory', } diff --git a/ironic/manifests/logging.pp b/ironic/manifests/logging.pp index f2c0eb58c..281e1dd45 100644 --- a/ironic/manifests/logging.pp +++ b/ironic/manifests/logging.pp @@ -6,23 +6,23 @@ # # [*verbose*] # (Optional) Should the daemons log verbose messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*debug*] # (Optional) Should the daemons log debug messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_syslog*] # (Optional) Use syslog for logging. -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_stderr*] # (optional) Use stderr for logging -# Defaults to 'true' +# Defaults to $::os_service_default # # [*log_facility*] # (Optional) Syslog facility to receive log lines. -# Defaults to 'LOG_USER' +# Defaults to $::os_service_default # # [*log_dir*] # (optional) Directory where logs should be stored. @@ -31,34 +31,34 @@ # # [*logging_context_format_string*] # (optional) Format string to use for log messages with context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [%(request_id)s %(user_identity)s] %(instance)s%(message)s' # # [*logging_default_format_string*] # (optional) Format string to use for log messages without context. -# Defaults to undef. +# Defaults to$::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [-] %(instance)s%(message)s' # # [*logging_debug_format_suffix*] # (optional) Formatted data to append to log format when level is DEBUG. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(funcName)s %(pathname)s:%(lineno)d' # # [*logging_exception_prefix*] # (optional) Prefix each line of exception output with this format. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s' # # [*log_config_append*] # The name of an additional logging configuration file. -# Defaults to undef. +# Defaults to $::os_service_default # See https://docs.python.org/2/howto/logging.html # # [*default_log_levels*] # (optional) Hash of logger (keys) and level (values) pairs. -# Defaults to undef. +# Defaults to $::os_service_default # Example: # { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', # 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', @@ -67,47 +67,47 @@ # # [*publish_errors*] # (optional) Publish error events (boolean value). -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*fatal_deprecations*] # (optional) Make deprecations fatal (boolean value) -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*instance_format*] # (optional) If an instance is passed with the log message, format it # like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: '[instance: %(uuid)s] ' # # [*instance_uuid_format*] # (optional) If an instance UUID is passed with the log message, format # it like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: instance_uuid_format='[instance: %(uuid)s] ' - +# # [*log_date_format*] # (optional) Format string for %%(asctime)s in log records. -# Defaults to undef. +# Defaults to $::os_service_default # Example: 'Y-%m-%d %H:%M:%S' class ironic::logging( - $use_syslog = false, - $use_stderr = true, - $log_facility = 'LOG_USER', + $use_syslog = $::os_service_default, + $use_stderr = $::os_service_default, + $log_facility = $::os_service_default, $log_dir = '/var/log/ironic', - $verbose = false, - $debug = false, - $logging_context_format_string = undef, - $logging_default_format_string = undef, - $logging_debug_format_suffix = undef, - $logging_exception_prefix = undef, - $log_config_append = undef, - $default_log_levels = undef, - $publish_errors = undef, - $fatal_deprecations = undef, - $instance_format = undef, - $instance_uuid_format = undef, - $log_date_format = undef, + $verbose = $::os_service_default, + $debug = $::os_service_default, + $logging_context_format_string = $::os_service_default, + $logging_default_format_string = $::os_service_default, + $logging_debug_format_suffix = $::os_service_default, + $logging_exception_prefix = $::os_service_default, + $log_config_append = $::os_service_default, + $default_log_levels = $::os_service_default, + $publish_errors = $::os_service_default, + $fatal_deprecations = $::os_service_default, + $instance_format = $::os_service_default, + $instance_uuid_format = $::os_service_default, + $log_date_format = $::os_service_default, ) { # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function @@ -119,139 +119,31 @@ $verbose_real = pick($::ironic::verbose,$verbose) $debug_real = pick($::ironic::debug,$debug) - ironic_config { - 'DEFAULT/debug' : value => $debug_real; - 'DEFAULT/verbose' : value => $verbose_real; - 'DEFAULT/use_stderr' : value => $use_stderr_real; - 'DEFAULT/use_syslog' : value => $use_syslog_real; - 'DEFAULT/log_dir' : value => $log_dir_real; - 'DEFAULT/syslog_log_facility': value => $log_facility_real; + if is_service_default($default_log_levels) { + $default_log_levels_real = $default_log_levels + } else { + $default_log_levels_real = join(sort(join_keys_to_values($default_log_levels, '=')), ',') } - if $logging_context_format_string { - ironic_config { - 'DEFAULT/logging_context_format_string' : - value => $logging_context_format_string; - } - } - else { - ironic_config { - 'DEFAULT/logging_context_format_string' : ensure => absent; - } - } - - if $logging_default_format_string { - ironic_config { - 'DEFAULT/logging_default_format_string' : - value => $logging_default_format_string; - } - } - else { - ironic_config { - 'DEFAULT/logging_default_format_string' : ensure => absent; - } - } - - if $logging_debug_format_suffix { - ironic_config { - 'DEFAULT/logging_debug_format_suffix' : - value => $logging_debug_format_suffix; - } - } - else { - ironic_config { - 'DEFAULT/logging_debug_format_suffix' : ensure => absent; - } - } - - if $logging_exception_prefix { - ironic_config { - 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; - } - } - else { - ironic_config { - 'DEFAULT/logging_exception_prefix' : ensure => absent; - } - } - - if $log_config_append { - ironic_config { - 'DEFAULT/log_config_append' : value => $log_config_append; - } - } - else { - ironic_config { - 'DEFAULT/log_config_append' : ensure => absent; - } - } - - if $default_log_levels { - ironic_config { - 'DEFAULT/default_log_levels' : - value => join(sort(join_keys_to_values($default_log_levels, '=')), ','); - } - } - else { - ironic_config { - 'DEFAULT/default_log_levels' : ensure => absent; - } - } - - if $publish_errors { - ironic_config { - 'DEFAULT/publish_errors' : value => $publish_errors; - } - } - else { - ironic_config { - 'DEFAULT/publish_errors' : ensure => absent; - } - } - - if $fatal_deprecations { - ironic_config { - 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; - } - } - else { - ironic_config { - 'DEFAULT/fatal_deprecations' : ensure => absent; - } - } - - if $instance_format { - ironic_config { - 'DEFAULT/instance_format' : value => $instance_format; - } - } - else { - ironic_config { - 'DEFAULT/instance_format' : ensure => absent; - } - } - - if $instance_uuid_format { - ironic_config { - 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; - } - } - else { - ironic_config { - 'DEFAULT/instance_uuid_format' : ensure => absent; - } - } - - if $log_date_format { - ironic_config { - 'DEFAULT/log_date_format' : value => $log_date_format; - } - } - else { - ironic_config { - 'DEFAULT/log_date_format' : ensure => absent; - } - } + ironic_config { + 'DEFAULT/debug': value => $debug_real; + 'DEFAULT/verbose': value => $verbose_real; + 'DEFAULT/use_stderr': value => $use_stderr_real; + 'DEFAULT/use_syslog': value => $use_syslog_real; + 'DEFAULT/log_dir': value => $log_dir_real; + 'DEFAULT/syslog_log_facility': value => $log_facility_real; + 'DEFAULT/logging_context_format_string': value => $logging_context_format_string; + 'DEFAULT/logging_default_format_string': value => $logging_default_format_string; + 'DEFAULT/logging_debug_format_suffix': value => $logging_debug_format_suffix; + 'DEFAULT/logging_exception_prefix': value => $logging_exception_prefix; + 'DEFAULT/log_config_append': value => $log_config_append; + 'DEFAULT/default_log_levels': value => $default_log_levels_real; + 'DEFAULT/publish_errors': value => $publish_errors; + 'DEFAULT/fatal_deprecations': value => $fatal_deprecations; + 'DEFAULT/instance_format': value => $instance_format; + 'DEFAULT/instance_uuid_format': value => $instance_uuid_format; + 'DEFAULT/log_date_format': value => $log_date_format; + } } diff --git a/ironic/manifests/params.pp b/ironic/manifests/params.pp index 700ac9e11..dc9f3c396 100644 --- a/ironic/manifests/params.pp +++ b/ironic/manifests/params.pp @@ -38,6 +38,7 @@ $inspector_service = 'openstack-ironic-inspector' $inspector_dnsmasq_service = 'openstack-ironic-inspector-dnsmasq' $sqlite_package_name = undef + $pymysql_package_name = undef } 'Debian': { $common_package_name = 'ironic-common' @@ -53,6 +54,7 @@ # this should be changed to whatever debian will use for dnsmasq $inspector_dnsmasq_service = 'ironic-inspector-dnsmasq' $sqlite_package_name = 'python-pysqlite2' + $pymysql_package_name = 'python-pymysql' } default: { fail("Unsupported osfamily ${::osfamily}") diff --git a/ironic/metadata.json b/ironic/metadata.json index e28b32457..b3fc4f31e 100644 --- a/ironic/metadata.json +++ b/ironic/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-ironic", - "version": "6.1.0", + "version": "7.0.0", "author": "eNovance and OpenStack Contributors", "summary": "Puppet module for OpenStack Ironic", "license": "Apache-2.0", @@ -32,9 +32,9 @@ "description": "Installs and configures OpenStack Ironic (Bare metal).", "dependencies": [ { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/openstacklib", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/vcsrepo", "version_requirement": ">=1.3.0 <2.0.0"} ] } diff --git a/ironic/spec/acceptance/basic_ironic_spec.rb b/ironic/spec/acceptance/basic_ironic_spec.rb index dd8ed08c1..26c8ed487 100644 --- a/ironic/spec/acceptance/basic_ironic_spec.rb +++ b/ironic/spec/acceptance/basic_ironic_spec.rb @@ -32,7 +32,7 @@ class { '::ironic': rabbit_userid => 'ironic', rabbit_password => 'an_even_bigger_secret', rabbit_host => '127.0.0.1', - database_connection => 'mysql://ironic:a_big_secret@127.0.0.1/ironic?charset=utf8', + database_connection => 'mysql+pymysql://ironic:a_big_secret@127.0.0.1/ironic?charset=utf8', debug => true, verbose => true, enabled_drivers => ['pxe_ssh'], diff --git a/ironic/spec/classes/inspector_db_sync_spec.rb b/ironic/spec/classes/inspector_db_sync_spec.rb index cf71ccab6..429ef3ec9 100644 --- a/ironic/spec/classes/inspector_db_sync_spec.rb +++ b/ironic/spec/classes/inspector_db_sync_spec.rb @@ -18,11 +18,11 @@ context 'on a RedHat osfamily' do let :facts do - { + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.0', :concat_basedir => '/var/lib/puppet/concat' - } + }) end it_configures 'inspector-dbsync' @@ -30,12 +30,12 @@ context 'on a Debian osfamily' do let :facts do - { + @default_facts.merge({ :operatingsystemrelease => '7.8', :operatingsystem => 'Debian', :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' - } + }) end it_configures 'inspector-dbsync' diff --git a/ironic/spec/classes/ironic_api_spec.rb b/ironic/spec/classes/ironic_api_spec.rb index 726531ee9..f7d56d383 100644 --- a/ironic/spec/classes/ironic_api_spec.rb +++ b/ironic/spec/classes/ironic_api_spec.rb @@ -127,7 +127,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -140,7 +140,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ironic/spec/classes/ironic_client_spec.rb b/ironic/spec/classes/ironic_client_spec.rb index b4b4eb49c..90ad76778 100644 --- a/ironic/spec/classes/ironic_client_spec.rb +++ b/ironic/spec/classes/ironic_client_spec.rb @@ -24,7 +24,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it { is_expected.to contain_class('ironic::client') } @@ -32,7 +32,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it { is_expected.to contain_class('ironic::client') } diff --git a/ironic/spec/classes/ironic_conductor_spec.rb b/ironic/spec/classes/ironic_conductor_spec.rb index f93bf4878..86a20bebd 100644 --- a/ironic/spec/classes/ironic_conductor_spec.rb +++ b/ironic/spec/classes/ironic_conductor_spec.rb @@ -80,7 +80,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -93,7 +93,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ironic/spec/classes/ironic_db_mysql_spec.rb b/ironic/spec/classes/ironic_db_mysql_spec.rb index 803ce0344..ad4237cd7 100644 --- a/ironic/spec/classes/ironic_db_mysql_spec.rb +++ b/ironic/spec/classes/ironic_db_mysql_spec.rb @@ -31,12 +31,12 @@ end let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it { is_expected.to contain_openstacklib__db__mysql('ironic').with( @@ -49,7 +49,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it { is_expected.to contain_openstacklib__db__mysql('ironic').with( diff --git a/ironic/spec/classes/ironic_db_spec.rb b/ironic/spec/classes/ironic_db_spec.rb index e152a7a26..e2667a628 100644 --- a/ironic/spec/classes/ironic_db_spec.rb +++ b/ironic/spec/classes/ironic_db_spec.rb @@ -7,18 +7,18 @@ context 'with default parameters' do it { is_expected.to contain_ironic_config('database/connection').with_value('sqlite:////var/lib/ironic/ovs.sqlite').with_secret(true) } - it { is_expected.to contain_ironic_config('database/idle_timeout').with_value('3600') } - it { is_expected.to contain_ironic_config('database/min_pool_size').with_value('1') } - it { is_expected.to contain_ironic_config('database/max_pool_size').with_value('10') } - it { is_expected.to contain_ironic_config('database/max_overflow').with_value('20') } - it { is_expected.to contain_ironic_config('database/max_retries').with_value('10') } - it { is_expected.to contain_ironic_config('database/retry_interval').with_value('10') } + it { is_expected.to contain_ironic_config('database/idle_timeout').with_value('') } + it { is_expected.to contain_ironic_config('database/min_pool_size').with_value('') } + it { is_expected.to contain_ironic_config('database/max_pool_size').with_value('') } + it { is_expected.to contain_ironic_config('database/max_overflow').with_value('') } + it { is_expected.to contain_ironic_config('database/max_retries').with_value('') } + it { is_expected.to contain_ironic_config('database/retry_interval').with_value('') } end context 'with specific parameters' do let :params do - { :database_connection => 'mysql://ironic:ironic@localhost/ironic', + { :database_connection => 'mysql+pymysql://ironic:ironic@localhost/ironic', :database_idle_timeout => '3601', :database_min_pool_size => '2', :database_max_pool_size => '21', @@ -27,7 +27,7 @@ :database_retry_interval => '11', } end - it { is_expected.to contain_ironic_config('database/connection').with_value('mysql://ironic:ironic@localhost/ironic').with_secret(true) } + it { is_expected.to contain_ironic_config('database/connection').with_value('mysql+pymysql://ironic:ironic@localhost/ironic').with_secret(true) } it { is_expected.to contain_ironic_config('database/idle_timeout').with_value('3601') } it { is_expected.to contain_ironic_config('database/min_pool_size').with_value('2') } it { is_expected.to contain_ironic_config('database/max_retries').with_value('11') } @@ -37,6 +37,14 @@ end + context 'with MySQL-python library as backend package' do + let :params do + { :database_connection => 'mysql://ironic:ironic@localhost/ironic' } + end + + it { is_expected.to contain_ironic_config('database/connection').with_value('mysql://ironic:ironic@localhost/ironic').with_secret(true) } + end + context 'with postgresql backend' do let :params do { :database_connection => 'postgresql://ironic:ironic@localhost/ironic', } @@ -56,27 +64,51 @@ it_raises 'a Puppet::Error', /validate_re/ end + context 'with incorrect database_connection string' do + let :params do + { :database_connection => 'foo+pymysql://ironic:ironic@localhost/ironic', } + end + + it_raises 'a Puppet::Error', /validate_re/ + end + end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => 'jessie', - } + }) end it_configures 'ironic::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://ironic:ironic@localhost/ironic' } + end + + it { is_expected.to contain_package('ironic-backend-package').with({ :ensure => 'present', :name => 'python-pymysql' }) } + end end context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.1', - } + }) end it_configures 'ironic::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://ironic:ironic@localhost/ironic' } + end + + it { is_expected.not_to contain_package('ironic-backend-package') } + end end end diff --git a/ironic/spec/classes/ironic_drivers_ipmi_spec.rb b/ironic/spec/classes/ironic_drivers_ipmi_spec.rb index 315f9d514..345f76886 100644 --- a/ironic/spec/classes/ironic_drivers_ipmi_spec.rb +++ b/ironic/spec/classes/ironic_drivers_ipmi_spec.rb @@ -52,7 +52,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'ironic ipmi driver' @@ -60,7 +60,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'ironic ipmi driver' diff --git a/ironic/spec/classes/ironic_drivers_pxe_spec.rb b/ironic/spec/classes/ironic_drivers_pxe_spec.rb index 26c13a2c9..f75d56db0 100644 --- a/ironic/spec/classes/ironic_drivers_pxe_spec.rb +++ b/ironic/spec/classes/ironic_drivers_pxe_spec.rb @@ -87,7 +87,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'ironic pxe driver' @@ -95,7 +95,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'ironic pxe driver' diff --git a/ironic/spec/classes/ironic_init_spec.rb b/ironic/spec/classes/ironic_init_spec.rb index b3de4a5f9..07e0805f0 100644 --- a/ironic/spec/classes/ironic_init_spec.rb +++ b/ironic/spec/classes/ironic_init_spec.rb @@ -237,58 +237,9 @@ end end - shared_examples_for 'with qpid rpc backend' do - before do - params.merge!({ :rpc_backend => 'qpid' }) - end - - it { is_expected.to contain_neutron_config('DEFAULT/rpc_backend').with_value('qpid') } - - context 'when default params' do - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_username').with_value('guest') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_password').with_value('guest').with_secret(true) } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_port').with_value('5672') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_heartbeat').with_value('60') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_tcp_nodelay').with_value('true') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect').with_value('true') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect_timeout').with_value('0') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect_limit').with_value('0') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect_interval_min').with_value('0') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect_interval_max').with_value('0') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect_interval').with_value('0') } - end - - context 'when passing params' do - before do - params.merge!({ - :qpid_password => 'pass', - :qpid_username => 'guest2', - :qpid_hostname => 'localhost2', - :qpid_port => '5673', - :qpid_protocol => 'udp', - :qpid_heartbeat => '89', - :qpid_tcp_nodelay => 'false', - :qpid_reconnect => 'false', - }) - end - - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_username').with_value('guest2') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_password').with_value('pass').with_secret(true) } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost2') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_port').with_value('5673') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_protocol').with_value('udp') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_heartbeat').with_value('89') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_tcp_nodelay').with_value('false') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect').with_value('false') } - end - - end - context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -300,7 +251,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ironic/spec/classes/ironic_inspector_spec.rb b/ironic/spec/classes/ironic_inspector_spec.rb index ea8cb4cb5..dccd155a4 100644 --- a/ironic/spec/classes/ironic_inspector_spec.rb +++ b/ironic/spec/classes/ironic_inspector_spec.rb @@ -119,6 +119,12 @@ 'content' => /default/, ) end + it 'should contain directory /tftpboot with selinux type tftpdir_t' do + is_expected.to contain_file('/tftpboot').with( + 'ensure' => 'directory', + 'seltype' => 'tftpdir_t' + ) + end context 'when overriding parameters' do before :each do @@ -164,7 +170,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -177,7 +183,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/ironic/spec/classes/ironic_keystone_auth_spec.rb b/ironic/spec/classes/ironic_keystone_auth_spec.rb index 83e62207f..45e2f9a1b 100644 --- a/ironic/spec/classes/ironic_keystone_auth_spec.rb +++ b/ironic/spec/classes/ironic_keystone_auth_spec.rb @@ -23,7 +23,7 @@ describe 'ironic::keystone::auth' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end describe 'with default class parameters' do diff --git a/ironic/spec/classes/ironic_logging_spec.rb b/ironic/spec/classes/ironic_logging_spec.rb index 02c975381..9de79db84 100644 --- a/ironic/spec/classes/ironic_logging_spec.rb +++ b/ironic/spec/classes/ironic_logging_spec.rb @@ -15,7 +15,7 @@ :logging_exception_prefix => '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s', :log_config_append => '/etc/ironic/logging.conf', :publish_errors => true, - :default_log_levels => { + :default_log_levels => { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', 'iso8601' => 'WARN', @@ -57,11 +57,11 @@ shared_examples 'basic default logging settings' do it 'configures ironic logging settins with default values' do - is_expected.to contain_ironic_config('DEFAULT/use_syslog').with(:value => 'false') - is_expected.to contain_ironic_config('DEFAULT/use_stderr').with(:value => 'true') + is_expected.to contain_ironic_config('DEFAULT/use_syslog').with(:value => '') + is_expected.to contain_ironic_config('DEFAULT/use_stderr').with(:value => '') is_expected.to contain_ironic_config('DEFAULT/log_dir').with(:value => '/var/log/ironic') - is_expected.to contain_ironic_config('DEFAULT/verbose').with(:value => 'false') - is_expected.to contain_ironic_config('DEFAULT/debug').with(:value => 'false') + is_expected.to contain_ironic_config('DEFAULT/verbose').with(:value => '') + is_expected.to contain_ironic_config('DEFAULT/debug').with(:value => '') end end @@ -120,13 +120,13 @@ :default_log_levels, :fatal_deprecations, :instance_format, :instance_uuid_format, :log_date_format, ].each { |param| - it { is_expected.to contain_ironic_config("DEFAULT/#{param}").with_ensure('absent') } + it { is_expected.to contain_ironic_config("DEFAULT/#{param}").with_value('') } } end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'ironic-logging' @@ -134,7 +134,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'ironic-logging' diff --git a/ironic/spec/classes/ironic_policy_spec.rb b/ironic/spec/classes/ironic_policy_spec.rb index 8e029ff2b..7d5cba9da 100644 --- a/ironic/spec/classes/ironic_policy_spec.rb +++ b/ironic/spec/classes/ironic_policy_spec.rb @@ -25,7 +25,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'ironic policies' @@ -33,7 +33,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'ironic policies' diff --git a/ironic/spec/spec_helper.rb b/ironic/spec/spec_helper.rb index 3df4cede1..9bc7bcf96 100644 --- a/ironic/spec/spec_helper.rb +++ b/ironic/spec/spec_helper.rb @@ -5,6 +5,9 @@ RSpec.configure do |c| c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' + c.before :each do + @default_facts = { :os_service_default => '' } + end end at_exit { RSpec::Puppet::Coverage.report! } diff --git a/ironic/templates/inspector_ipxe.erb b/ironic/templates/inspector_ipxe.erb index 7362ca0bf..d5129f30d 100644 --- a/ironic/templates/inspector_ipxe.erb +++ b/ironic/templates/inspector_ipxe.erb @@ -2,6 +2,6 @@ dhcp -kernel http://<%= @dnsmasq_local_ip %>:8088/agent.kernel ipa-inspection-callback-url=http://<%= @dnsmasq_local_ip %>:5050/v1/continue systemd.journald.forward_to_console=yes +kernel http://<%= @dnsmasq_local_ip %>:8088/agent.kernel ipa-inspection-callback-url=http://<%= @dnsmasq_local_ip %>:5050/v1/continue systemd.journald.forward_to_console=yes BOOTIF=${mac} initrd http://<%= @dnsmasq_local_ip %>:8088/agent.ramdisk boot diff --git a/keystone/README.md b/keystone/README.md index 53151af22..a9d41e210 100644 --- a/keystone/README.md +++ b/keystone/README.md @@ -23,22 +23,22 @@ Module Description The keystone module is a thorough attempt to make Puppet capable of managing the entirety of keystone. This includes manifests to provision region specific endpoint and database connections. Types are shipped as part of the keystone module to assist in manipulation of configuration files. -This module is tested in combination with other modules needed to build and leverage an entire Openstack software stack. These modules can be found, all pulled together in the [openstack module](https://github.com/stackforge/puppet-openstack). +This module is tested in combination with other modules needed to build and leverage an entire Openstack software stack. Setup ----- **What the keystone module affects** -* keystone, the identify service for Openstack. +* [Keystone](http://docs.openstack.org/developer/keystone/), the identify service for Openstack. ### Installing keystone - example% puppet module install openstack/keystone + puppet module install openstack/keystone ### Beginning with keystone -To utilize the keystone module's functionality you will need to declare multiple resources. The following is a modified excerpt from the [openstack module](https://github.com/stackforge/puppet-openstack). This is not an exhaustive list of all the components needed, we recommend you consult and understand the [openstack module](https://github.com/stackforge/puppet-openstack) and the [core openstack](http://docs.openstack.org) documentation. +To utilize the keystone module's functionality you will need to declare multiple resources. This is not an exhaustive list of all the components needed, we recommend you consult and understand the [core openstack](http://docs.openstack.org) documentation. **Define a keystone node** diff --git a/keystone/Rakefile b/keystone/Rakefile index 9692ffdd2..ed79bead4 100644 --- a/keystone/Rakefile +++ b/keystone/Rakefile @@ -58,7 +58,7 @@ task :spec_prep do zuul_clone_cmd += ['git://git.openstack.org', "#{repo}"] sh(*zuul_clone_cmd) else - sh("git clone https://git.openstack.org/#{repo} -b stable/liberty #{repo}") + sh("git clone https://git.openstack.org/#{repo} #{repo}") end script = ['env'] script += ["PUPPETFILE_DIR=#{Dir.pwd}/spec/fixtures/modules"] diff --git a/keystone/lib/puppet/provider/keystone_user/openstack.rb b/keystone/lib/puppet/provider/keystone_user/openstack.rb index d5d38ee48..7220ab7aa 100644 --- a/keystone/lib/puppet/provider/keystone_user/openstack.rb +++ b/keystone/lib/puppet/provider/keystone_user/openstack.rb @@ -109,6 +109,9 @@ def password # last chance - try a domain scoped token credentials.domain_name = domain end + + credentials.identity_api_version = '2' if credentials.auth_url =~ /v2\.0\/?$/ + begin token = Puppet::Provider::Openstack.request('token', 'issue', ['--format', 'value'], credentials) rescue Puppet::Error::OpenstackUnauthorizedError diff --git a/keystone/manifests/client.pp b/keystone/manifests/client.pp index ddf3d6ae6..cf668bfec 100644 --- a/keystone/manifests/client.pp +++ b/keystone/manifests/client.pp @@ -17,11 +17,5 @@ tag => 'openstack', } - if $ensure == 'present' { - include '::openstacklib::openstackclient' - } else { - class { '::openstacklib::openstackclient': - package_ensure => $ensure, - } - } + include '::openstacklib::openstackclient' } diff --git a/keystone/manifests/db.pp b/keystone/manifests/db.pp index a07b36ff9..9305c58dc 100644 --- a/keystone/manifests/db.pp +++ b/keystone/manifests/db.pp @@ -1,4 +1,4 @@ -# == Class: keystone::db +# class: keystone::db # # Configure the Keystone database # @@ -10,37 +10,37 @@ # # [*database_idle_timeout*] # Timeout when db connections should be reaped. -# (Optional) Defaults to 3600. +# (Optional) Defaults to $::os_service_default # # [*database_max_retries*] # Maximum number of database connection retries during startup. # Setting -1 implies an infinite retry count. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_retry_interval*] # Interval between retries of opening a database connection. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_min_pool_size*] # Minimum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 1. +# (Optional) Defaults to $::os_service_default # # [*database_max_pool_size*] # Maximum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_max_overflow*] # If set, use this value for max_overflow with sqlalchemy. -# (Optional) Defaults to 20. +# (Optional) Defaults to $::os_service_default # class keystone::db ( $database_connection = 'sqlite:////var/lib/keystone/keystone.sqlite', - $database_idle_timeout = 3600, - $database_min_pool_size = 1, - $database_max_pool_size = 10, - $database_max_retries = 10, - $database_retry_interval = 10, - $database_max_overflow = 20, + $database_idle_timeout = $::os_service_default, + $database_min_pool_size = $::os_service_default, + $database_max_pool_size = $::os_service_default, + $database_max_retries = $::os_service_default, + $database_retry_interval = $::os_service_default, + $database_max_overflow = $::os_service_default, ) { include ::keystone::params @@ -58,46 +58,44 @@ validate_re($database_connection_real, '^(sqlite|mysql(\+pymysql)?|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') - if $database_connection_real { - case $database_connection_real { - /^mysql(\+pymysql)?:\/\//: { - require 'mysql::bindings' - require 'mysql::bindings::python' - if $database_connection_real =~ /^mysql\+pymysql/ { - $backend_package = $::keystone::params::pymysql_package_name - } else { - $backend_package = false - } - } - /^postgresql:\/\//: { + case $database_connection_real { + /^mysql(\+pymysql)?:\/\//: { + require 'mysql::bindings' + require 'mysql::bindings::python' + if $database_connection_real =~ /^mysql\+pymysql/ { + $backend_package = $::keystone::params::pymysql_package_name + } else { $backend_package = false - require 'postgresql::lib::python' - } - /^sqlite:\/\//: { - $backend_package = $::keystone::params::sqlite_package_name - } - default: { - fail('Unsupported backend configured') } } - - if $backend_package and !defined(Package[$backend_package]) { - package {'keystone-backend-package': - ensure => present, - name => $backend_package, - tag => 'openstack', - } + /^postgresql:\/\//: { + $backend_package = false + require 'postgresql::lib::python' } + /^sqlite:\/\//: { + $backend_package = $::keystone::params::sqlite_package_name + } + default: { + fail('Unsupported backend configured') + } + } - keystone_config { - 'database/connection': value => $database_connection_real, secret => true; - 'database/idle_timeout': value => $database_idle_timeout_real; - 'database/min_pool_size': value => $database_min_pool_size_real; - 'database/max_retries': value => $database_max_retries_real; - 'database/retry_interval': value => $database_retry_interval_real; - 'database/max_pool_size': value => $database_max_pool_size_real; - 'database/max_overflow': value => $database_max_overflow_real; + if $backend_package and !defined(Package[$backend_package]) { + package {'keystone-backend-package': + ensure => present, + name => $backend_package, + tag => 'openstack', } } + keystone_config { + 'database/connection': value => $database_connection_real, secret => true; + 'database/idle_timeout': value => $database_idle_timeout_real; + 'database/min_pool_size': value => $database_min_pool_size_real; + 'database/max_retries': value => $database_max_retries_real; + 'database/retry_interval': value => $database_retry_interval_real; + 'database/max_pool_size': value => $database_max_pool_size_real; + 'database/max_overflow': value => $database_max_overflow_real; + } + } diff --git a/keystone/manifests/dev/install.pp b/keystone/manifests/dev/install.pp deleted file mode 100644 index f52800f23..000000000 --- a/keystone/manifests/dev/install.pp +++ /dev/null @@ -1,70 +0,0 @@ -# -# Installs keystone from source. This is not yet fully implemented -# -# == Parameters -# -# [*source_dir*] -# (optional) The source dire for dev installation -# Defaults to '/usr/local/keystone' -# -# == Dependencies -# == Examples -# == Authors -# -# Dan Bode dan@puppetlabs.com -# -# == Copyright -# -# Copyright 2012 Puppetlabs Inc, unless otherwise noted. -# -class keystone::dev::install( - $source_dir = '/usr/local/keystone' -) { - # make sure that I have python 2.7 installed - - Class['openstack::dev'] -> Class['keystone::dev::install'] - - # there are likely conficts with other packages - # introduced by these resources - package { [ - 'python-dev', - 'libxml2-dev', - 'libxslt1-dev', - 'libsasl2-dev', - 'libsqlite3-dev', - 'libssl-dev', - 'libldap2-dev', - 'sqlite3' - ]: - ensure => latest, - } - - vcsrepo { $source_dir: - ensure => present, - provider => git, - source => 'git://github.com/openstack/keystone.git', - } - - Exec { - cwd => $source_dir, - path => '/usr/bin', - refreshonly => true, - subscribe => Vcsrepo[$source_dir], - logoutput => true, - # I have disabled timeout since this seems to take forever - # this may be a bad idea :) - timeout => 0, - } - - # TODO - really, I need a way to take this file and - # convert it into package resources - exec { 'install_dev_deps': - command => 'pip install -r tools/pip-requires', - } - - exec { 'install_keystone_source': - command => 'python setup.py develop', - require => Exec['install_dev_deps'], - } - -} diff --git a/keystone/manifests/endpoint.pp b/keystone/manifests/endpoint.pp index c9e7ef5ea..a0891633f 100644 --- a/keystone/manifests/endpoint.pp +++ b/keystone/manifests/endpoint.pp @@ -36,12 +36,10 @@ # If keystone_project_domain is not specified, use $keystone_default_domain # Defaults to undef # -# === DEPRECATED -# # [*version*] # (optional) API version for endpoint. -# Defaults to 'v2.0' -# If the version is assigned to null value (forced to undef), then it won't be +# Defaults to 'v2.0'. Valid values are 'v2.0', 'v3', or the empty string ''. +# If the version is set to the empty string (''), then it won't be # used. This is the expected behaviour since Keystone V3 handles API versions # from the context. # @@ -61,10 +59,16 @@ $user_domain = undef, $project_domain = undef, $default_domain = undef, - $version = 'v2.0', # DEPRECATED + $version = 'unset', # defaults to 'v2.0' if unset by user ) { - if empty($version) { + if $version == 'unset' { + warning('In Mitaka, the default value of $keystone::endpoint::version will change to \'\'. To avoid this warning, please set the version parameter.') + $_version = 'v2.0' + } else { + $_version = $version + } + if empty($_version) { $admin_url_real = $admin_url $public_url_real = $public_url @@ -76,16 +80,14 @@ } } else { - warning('The version parameter is deprecated in Liberty.') - - $public_url_real = "${public_url}/${version}" - $admin_url_real = "${admin_url}/${version}" + $public_url_real = "${public_url}/${_version}" + $admin_url_real = "${admin_url}/${_version}" if $internal_url { - $internal_url_real = "${internal_url}/${version}" + $internal_url_real = "${internal_url}/${_version}" } else { - $internal_url_real = "${public_url}/${version}" + $internal_url_real = "${public_url}/${_version}" } } diff --git a/keystone/manifests/init.pp b/keystone/manifests/init.pp index 59b1851dc..899651f6d 100644 --- a/keystone/manifests/init.pp +++ b/keystone/manifests/init.pp @@ -73,7 +73,7 @@ # # [*revoke_driver*] # (optional) Driver for token revocation. -# Defaults to 'keystone.contrib.revoke.backends.sql.Revoke' +# Defaults to $::os_service_default # # [*revoke_by_id*] # (optional) Revoke token by token identifier. @@ -96,22 +96,22 @@ # (optional) Dogpile.cache backend module. It is recommended that Memcache with pooling # (keystone.cache.memcache_pool) or Redis (dogpile.cache.redis) be used in production. # This has no effects unless 'memcache_servers' is set. -# Defaults to 'keystone.common.cache.noop' +# Defaults to $::os_service_default # # [*cache_backend_argument*] # (optional) List of arguments in format of argname:value supplied to the backend module. # Specify this option once per argument to be passed to the dogpile.cache backend. # This has no effects unless 'memcache_servers' is set. -# Default to undef. +# Default to $::os_service_default # # [*debug_cache_backend*] # (optional) Extra debugging from the cache backend (cache keys, get/set/delete calls). # This has no effects unless 'memcache_servers' is set. -# Default to false. +# Default to $::os_service_default # # [*token_caching*] # (optional) Toggle for token system caching. This has no effects unless 'memcache_servers' is set. -# Default to true. +# Default to $::os_service_default # # [*manage_service*] # (Optional) If Puppet should manage service startup / shutdown. @@ -189,27 +189,27 @@ # # [*rabbit_host*] # (optional) Location of rabbitmq installation. -# Defaults to localhost. +# Defaults to $::os_service_default # # [*rabbit_port*] # (optional) Port for rabbitmq instance. -# Defaults to 5672. +# Defaults to $::os_service_default # # [*rabbit_hosts*] # (optional) Location of rabbitmq installation. -# Defaults to undef. +# Defaults to $::os_service_default # # [*rabbit_password*] # (optional) Password used to connect to rabbitmq. -# Defaults to guest. +# Defaults to $::os_service_default # # [*rabbit_userid*] # (optional) User used to connect to rabbitmq. -# Defaults to guest. +# Defaults to $::os_service_default # # [*rabbit_virtual_host*] # (optional) The RabbitMQ virtual host. -# Defaults to /. +# Defaults to $::os_service_default # # [*rabbit_heartbeat_timeout_threshold*] # (optional) Number of seconds after which the RabbitMQ broker is considered @@ -217,43 +217,44 @@ # Heartbeating helps to ensure the TCP connection to RabbitMQ isn't silently # closed, resulting in missed or lost messages from the queue. # (Requires kombu >= 3.0.7 and amqp >= 1.4.0) -# Defaults to 0 +# Defaults to $::os_service_default # # [*rabbit_heartbeat_rate*] # (optional) How often during the rabbit_heartbeat_timeout_threshold period to # check the heartbeat on RabbitMQ connection. (i.e. rabbit_heartbeat_rate=2 # when rabbit_heartbeat_timeout_threshold=60, the heartbeat will be checked # every 30 seconds. -# Defaults to 2 +# Defaults to $::os_service_default # # [*rabbit_use_ssl*] # (optional) Connect over SSL for RabbitMQ -# Defaults to false +# Defaults to $::os_serice_default # # [*kombu_ssl_ca_certs*] # (optional) SSL certification authority file (valid only if SSL enabled). -# Defaults to undef +# Defaults to $::os_service_default # # [*kombu_ssl_certfile*] # (optional) SSL cert file (valid only if SSL enabled). -# Defaults to undef +# Defaults to $::os_service_default # # [*kombu_ssl_keyfile*] # (optional) SSL key file (valid only if SSL enabled). -# Defaults to undef +# Defaults to $::os_service_default # # [*kombu_ssl_version*] # (optional) SSL version to use (valid only if SSL enabled). # Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be # available on some distributions. -# Defaults to 'TLSv1' +# Defaults to $::os_service_default # # [*notification_driver*] # RPC driver. Not enabled by default +# Defaults to $::os_service_default # # [*notification_topics*] # (optional) AMQP topics to publish to when using the RPC notification driver. -# Default to false. +# Default to $::os_service_default # # [*notification_format*] # Format for the notifications. Valid values are 'basic' and 'cadf'. @@ -261,7 +262,7 @@ # # [*control_exchange*] # (optional) AMQP exchange to connect to if using RabbitMQ or Qpid -# Default to false. +# Default to $::os_service_default # # [*public_bind_host*] # (optional) The IP address of the public network interface to listen on @@ -286,7 +287,7 @@ # keystone listens for connections) (string value) # If set to false, no public_endpoint will be defined in keystone.conf. # Sample value: 'http://localhost:5000/' -# Defaults to false +# Defaults to $::os_service_default # # [*admin_endpoint*] # (optional) The base admin endpoint URL for keystone that are @@ -294,7 +295,7 @@ # for connections) (string value) # If set to false, no admin_endpoint will be defined in keystone.conf. # Sample value: 'http://localhost:35357/' -# Defaults to false +# Defaults to $::os_service_default # # [*enable_ssl*] # (optional) Toggle for SSL support on the keystone eventlet servers. @@ -374,7 +375,7 @@ # # [*max_token_size*] # (optional) maximum allowable Keystone token size -# Defaults to undef +# Defaults to $::os_service_default # # [*admin_workers*] # (optional) The number of worker processes to serve the admin eventlet application. @@ -406,7 +407,7 @@ # # [*fernet_max_active_keys*] # (Optional) Number of maximum active Fernet keys. Integer > 0. -# Defaults to undef +# Defaults to $::os_service_default # # [*default_domain*] # (optional) When Keystone v3 support is enabled, v2 clients will need @@ -422,20 +423,24 @@ # (optional) Number of seconds memcached server is considered dead before it # is tried again. This is used for the cache memcache_dead_retry and the # memcache dead_retry values. -# Defaults to undef +# Defaults to $::os_service_default # # [*memcache_socket_timeout*] # (optional) Timeout in seconds for every call to a server. -# Defaults to undef +# Defaults to $::os_service_default # # [*memcache_pool_maxsize*] # (optional) Max total number of open connections to every memcached server. -# Defaults to undef +# Defaults to $::os_service_default # # [*memcache_pool_unused_timeout*] # (optional) Number of seconds a connection to memcached is held unused in # the pool before it is closed. -# Defaults to undef. +# Defaults to $::os_service_default +# +# [*policy_driver*] +# Policy backend driver. (string value) +# Defaults to $::os_service_default. # # == Dependencies # None @@ -487,10 +492,10 @@ $token_provider = 'keystone.token.providers.uuid.Provider', $token_driver = 'keystone.token.persistence.backends.sql.Token', $token_expiration = 3600, - $revoke_driver = 'keystone.contrib.revoke.backends.sql.Revoke', + $revoke_driver = $::os_service_default, $revoke_by_id = true, - $public_endpoint = false, - $admin_endpoint = false, + $public_endpoint = $::os_service_default, + $admin_endpoint = $::os_service_default, $enable_ssl = false, $ssl_certfile = '/etc/keystone/ssl/certs/keystone.pem', $ssl_keyfile = '/etc/keystone/ssl/private/keystonekey.pem', @@ -498,12 +503,12 @@ $ssl_ca_key = '/etc/keystone/ssl/private/cakey.pem', $ssl_cert_subject = '/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost', $cache_dir = '/var/cache/keystone', - $memcache_servers = false, + $memcache_servers = $::os_service_default, $manage_service = true, - $cache_backend = 'keystone.common.cache.noop', - $cache_backend_argument = undef, - $debug_cache_backend = false, - $token_caching = true, + $cache_backend = $::os_service_default, + $cache_backend_argument = $::os_service_default, + $debug_cache_backend = $::os_service_default, + $token_caching = $::os_service_default, $enabled = true, $database_connection = undef, $database_idle_timeout = undef, @@ -519,23 +524,23 @@ $signing_ca_key = '/etc/keystone/ssl/private/cakey.pem', $signing_cert_subject = '/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com', $signing_key_size = 2048, - $rabbit_host = 'localhost', - $rabbit_hosts = false, - $rabbit_password = 'guest', - $rabbit_port = '5672', - $rabbit_userid = 'guest', - $rabbit_virtual_host = '/', - $rabbit_heartbeat_timeout_threshold = 0, - $rabbit_heartbeat_rate = 2, - $rabbit_use_ssl = false, - $kombu_ssl_ca_certs = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_keyfile = undef, - $kombu_ssl_version = 'TLSv1', - $notification_driver = false, - $notification_topics = false, - $notification_format = undef, - $control_exchange = false, + $rabbit_host = $::os_service_default, + $rabbit_hosts = $::os_service_default, + $rabbit_password = $::os_service_default, + $rabbit_port = $::os_service_default, + $rabbit_userid = $::os_service_default, + $rabbit_virtual_host = $::os_service_default, + $rabbit_heartbeat_timeout_threshold = $::os_service_default, + $rabbit_heartbeat_rate = $::os_service_default, + $rabbit_use_ssl = $::os_service_default, + $kombu_ssl_ca_certs = $::os_service_default, + $kombu_ssl_certfile = $::os_service_default, + $kombu_ssl_keyfile = $::os_service_default, + $kombu_ssl_version = $::os_service_default, + $notification_driver = $::os_service_default, + $notification_topics = $::os_service_default, + $notification_format = $::os_service_default, + $control_exchange = $::os_service_default, $validate_service = false, $validate_insecure = false, $validate_auth_url = false, @@ -543,16 +548,17 @@ $paste_config = $::keystone::params::paste_config, $service_provider = $::keystone::params::service_provider, $service_name = $::keystone::params::service_name, - $max_token_size = undef, + $max_token_size = $::os_service_default, $sync_db = true, $enable_fernet_setup = false, $fernet_key_repository = '/etc/keystone/fernet-keys', - $fernet_max_active_keys = undef, + $fernet_max_active_keys = $::os_service_default, $default_domain = undef, - $memcache_dead_retry = undef, - $memcache_socket_timeout = undef, - $memcache_pool_maxsize = undef, - $memcache_pool_unused_timeout = undef, + $memcache_dead_retry = $::os_service_default, + $memcache_socket_timeout = $::os_service_default, + $memcache_pool_maxsize = $::os_service_default, + $memcache_pool_unused_timeout = $::os_service_default, + $policy_driver = $::os_service_default, # DEPRECATED PARAMETERS $admin_workers = max($::processorcount, 2), $public_workers = max($::processorcount, 2), @@ -572,14 +578,14 @@ warning('Version string /v2.0/ should not be included in keystone::public_endpoint') } - if $rabbit_use_ssl { - if !$kombu_ssl_ca_certs { + if ! is_service_default($rabbit_use_ssl) and $rabbit_use_ssl { + if is_service_default($kombu_ssl_ca_certs) { fail('The kombu_ssl_ca_certs parameter is required when rabbit_use_ssl is set to true') } - if !$kombu_ssl_certfile { + if is_service_default($kombu_ssl_certfile) { fail('The kombu_ssl_certfile parameter is required when rabbit_use_ssl is set to true') } - if !$kombu_ssl_keyfile { + if is_service_default($kombu_ssl_keyfile) { fail('The kombu_ssl_keyfile parameter is required when rabbit_use_ssl is set to true') } } @@ -645,23 +651,9 @@ } # Endpoint configuration - if $public_endpoint { - keystone_config { - 'DEFAULT/public_endpoint': value => $public_endpoint; - } - } else { - keystone_config { - 'DEFAULT/public_endpoint': ensure => absent; - } - } - if $admin_endpoint { - keystone_config { - 'DEFAULT/admin_endpoint': value => $admin_endpoint; - } - } else { - keystone_config { - 'DEFAULT/admin_endpoint': ensure => absent; - } + keystone_config { + 'DEFAULT/public_endpoint': value => $public_endpoint; + 'DEFAULT/admin_endpoint': value => $admin_endpoint; } # requirements for memcache token driver if ($token_driver =~ /memcache/ ) { @@ -671,20 +663,21 @@ } } - # token driver config keystone_config { 'token/driver': value => $token_driver; 'token/expiration': value => $token_expiration; } - if $revoke_driver { - keystone_config { - 'revoke/driver': value => $revoke_driver; - } - } else { - keystone_config { - 'revoke/driver': ensure => absent; - } + keystone_config { + 'revoke/driver': value => $revoke_driver; + } + + if ($policy_driver =~ /^keystone\.policy\.backends\..*Policy$/) { + warning('policy driver form \'keystone.policy.backends.*Policy\' is deprecated') + } + + keystone_config { + 'policy/driver': value => $policy_driver; } # ssl config @@ -704,54 +697,45 @@ } # memcache connection config - if $memcache_servers { + if ! is_service_default($memcache_servers) and $memcache_servers { validate_array($memcache_servers) Service<| title == 'memcached' |> -> Service['keystone'] keystone_config { 'cache/enabled': value => true; - 'cache/backend': value => $cache_backend; - 'cache/debug_cache_backend': value => $debug_cache_backend; - 'token/caching': value => $token_caching; 'memcache/servers': value => join($memcache_servers, ','); - 'memcache/dead_retry': value => $memcache_dead_retry; - 'memcache/socket_timeout': value => $memcache_socket_timeout; - 'memcache/pool_maxsize': value => $memcache_pool_maxsize; - 'memcache/pool_unused_timeout': value => $memcache_pool_unused_timeout; - 'cache/memcache_dead_retry': value => $memcache_dead_retry; - 'cache/memcache_socket_timeout': value => $memcache_socket_timeout; - 'cache/memcache_pool_maxsize': value => $memcache_pool_maxsize; - 'cache/memcache_pool_unused_timeout': value => $memcache_pool_unused_timeout; } - if $cache_backend_argument { + if ! is_service_default($cache_backend_argument) { validate_array($cache_backend_argument) keystone_config { - 'cache/backend_argument': value => join($cache_backend_argument, ','); + 'cache/backend_argument': value => join($cache_backend_argument, ','); } } else { keystone_config { - 'cache/backend_argument': ensure => absent; + 'cache/backend_argument': ensure => absent; } } } else { keystone_config { - 'cache/enabled': ensure => absent; - 'cache/backend': ensure => absent; - 'cache/backend_argument': ensure => absent; - 'cache/debug_cache_backend': ensure => absent; - 'token/caching': ensure => absent; - 'memcache/servers': ensure => absent; - 'memcache/dead_retry': ensure => absent; - 'memcache/socket_timeout': ensure => absent; - 'memcache/pool_maxsize': ensure => absent; - 'memcache/pool_unused_timeout': ensure => absent; - 'cache/memcache_dead_retry': ensure => absent; - 'cache/memcache_socket_timeout': ensure => absent; - 'cache/memcache_pool_maxsize': ensure => absent; - 'cache/memcache_pool_unused_timeout': ensure => absent; - + 'cache/enabled': ensure => absent; + 'cache/backend_argument': ensure => absent; + 'memcache/servers': ensure => absent; } } + keystone_config { + 'memcache/dead_retry': value => $memcache_dead_retry; + 'memcache/socket_timeout': value => $memcache_socket_timeout; + 'memcache/pool_maxsize': value => $memcache_pool_maxsize; + 'memcache/pool_unused_timeout': value => $memcache_pool_unused_timeout; + 'cache/memcache_dead_retry': value => $memcache_dead_retry; + 'cache/memcache_socket_timeout': value => $memcache_socket_timeout; + 'cache/memcache_pool_maxsize': value => $memcache_pool_maxsize; + 'cache/memcache_pool_unused_timeout': value => $memcache_pool_unused_timeout; + 'cache/backend': value => $cache_backend; + 'cache/debug_cache_backend': value => $debug_cache_backend; + 'token/caching': value => $token_caching; + } + # configure based on the catalog backend if $catalog_driver { $catalog_driver_real = $catalog_driver @@ -798,68 +782,40 @@ } } - keystone_config { 'token/provider': value => $token_provider } - - if $max_token_size { - keystone_config { 'DEFAULT/max_token_size': value => $max_token_size } - } else { - keystone_config { 'DEFAULT/max_token_size': ensure => absent } + keystone_config { + 'token/provider': value => $token_provider; + 'DEFAULT/max_token_size': value => $max_token_size; + 'DEFAULT/notification_driver': value => $notification_driver; + 'DEFAULT/notification_topics': value => $notification_topics; + 'DEFAULT/notification_format': value => $notification_format; + 'DEFAULT/control_exchange': value => $control_exchange; } - if $notification_driver { - keystone_config { 'DEFAULT/notification_driver': value => $notification_driver } - } else { - keystone_config { 'DEFAULT/notification_driver': ensure => absent } - } - if $notification_topics { - keystone_config { 'DEFAULT/notification_topics': value => $notification_topics } - } else { - keystone_config { 'DEFAULT/notification_topics': ensure => absent } - } - if $notification_format { - keystone_config { 'DEFAULT/notification_format': value => $notification_format } - } else { - keystone_config { 'DEFAULT/notification_format': ensure => absent } - } - if $control_exchange { - keystone_config { 'DEFAULT/control_exchange': value => $control_exchange } + if ! is_service_default($rabbit_hosts) and $rabbit_hosts { + keystone_config { + 'oslo_messaging_rabbit/rabbit_hosts': value => join($rabbit_hosts, ','); + 'oslo_messaging_rabbit/rabbit_ha_queues': value => true; + } } else { - keystone_config { 'DEFAULT/control_exchange': ensure => absent } + keystone_config { + 'oslo_messaging_rabbit/rabbit_host': value => $rabbit_host; + 'oslo_messaging_rabbit/rabbit_port': value => $rabbit_port; + 'oslo_messaging_rabbit/rabbit_ha_queues': value => false; + 'oslo_messaging_rabbit/rabbit_hosts': ensure => absent; + } } keystone_config { + 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl; 'oslo_messaging_rabbit/rabbit_password': value => $rabbit_password, secret => true; 'oslo_messaging_rabbit/rabbit_userid': value => $rabbit_userid; 'oslo_messaging_rabbit/rabbit_virtual_host': value => $rabbit_virtual_host; 'oslo_messaging_rabbit/heartbeat_timeout_threshold': value => $rabbit_heartbeat_timeout_threshold; 'oslo_messaging_rabbit/heartbeat_rate': value => $rabbit_heartbeat_rate; - } - - if $rabbit_hosts { - keystone_config { 'oslo_messaging_rabbit/rabbit_hosts': value => join($rabbit_hosts, ',') } - keystone_config { 'oslo_messaging_rabbit/rabbit_ha_queues': value => true } - } else { - keystone_config { 'oslo_messaging_rabbit/rabbit_host': value => $rabbit_host } - keystone_config { 'oslo_messaging_rabbit/rabbit_port': value => $rabbit_port } - keystone_config { 'oslo_messaging_rabbit/rabbit_hosts': value => "${rabbit_host}:${rabbit_port}" } - keystone_config { 'oslo_messaging_rabbit/rabbit_ha_queues': value => false } - } - - keystone_config { 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl } - if $rabbit_use_ssl { - keystone_config { - 'oslo_messaging_rabbit/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs; - 'oslo_messaging_rabbit/kombu_ssl_certfile': value => $kombu_ssl_certfile; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': value => $kombu_ssl_keyfile; - 'oslo_messaging_rabbit/kombu_ssl_version': value => $kombu_ssl_version; - } - } else { - keystone_config { - 'oslo_messaging_rabbit/kombu_ssl_ca_certs': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_certfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_version': ensure => absent; - } + 'oslo_messaging_rabbit/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs; + 'oslo_messaging_rabbit/kombu_ssl_certfile': value => $kombu_ssl_certfile; + 'oslo_messaging_rabbit/kombu_ssl_keyfile': value => $kombu_ssl_keyfile; + 'oslo_messaging_rabbit/kombu_ssl_version': value => $kombu_ssl_version; } keystone_config { @@ -944,7 +900,6 @@ # Fernet tokens support if $enable_fernet_setup { validate_string($fernet_key_repository) - exec { 'keystone-manage fernet_setup': path => '/usr/bin', user => 'keystone', @@ -955,26 +910,19 @@ } } - keystone_config {'token/revoke_by_id': value => $revoke_by_id} - if $fernet_key_repository { keystone_config { - 'fernet_tokens/key_repository': value => $fernet_key_repository; + 'fernet_tokens/key_repository': value => $fernet_key_repository; } } else { keystone_config { - 'fernet_tokens/key_repository': ensure => absent; + 'fernet_tokens/key_repository': ensure => absent; } } - if $fernet_max_active_keys { - keystone_config { - 'fernet_tokens/max_active_keys': value => $fernet_max_active_keys; - } - } else { - keystone_config { - 'fernet_tokens/max_active_keys': ensure => absent; - } + keystone_config { + 'token/revoke_by_id': value => $revoke_by_id; + 'fernet_tokens/max_active_keys': value => $fernet_max_active_keys; } if $default_domain { diff --git a/keystone/manifests/logging.pp b/keystone/manifests/logging.pp index 3a61d8cb2..1ad08a920 100644 --- a/keystone/manifests/logging.pp +++ b/keystone/manifests/logging.pp @@ -6,23 +6,23 @@ # # [*verbose*] # (Optional) Should the daemons log verbose messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*debug*] # (Optional) Should the daemons log debug messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_syslog*] # (Optional) Use syslog for logging. -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_stderr*] # (optional) Use stderr for logging -# Defaults to 'true' +# Defaults to $::os_service_default # # [*log_facility*] # (Optional) Syslog facility to receive log lines. -# Defaults to 'LOG_USER' +# Defaults to $::os_service_default # # [*log_dir*] # (optional) Directory where logs should be stored. @@ -31,38 +31,38 @@ # # [*log_file*] # (optional) File where logs should be stored. -# Defaults to false. +# Defaults to $::os_service_default # # [*logging_context_format_string*] # (optional) Format string to use for log messages with context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [%(request_id)s %(user_identity)s] %(instance)s%(message)s' # # [*logging_default_format_string*] # (optional) Format string to use for log messages without context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [-] %(instance)s%(message)s' # # [*logging_debug_format_suffix*] # (optional) Formatted data to append to log format when level is DEBUG. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(funcName)s %(pathname)s:%(lineno)d' # # [*logging_exception_prefix*] # (optional) Prefix each line of exception output with this format. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s' # # [*log_config_append*] # The name of an additional logging configuration file. -# Defaults to undef. +# Defaults to $::os_service_default # See https://docs.python.org/2/howto/logging.html # # [*default_log_levels*] # (optional) Hash of logger (keys) and level (values) pairs. -# Defaults to undef. +# Defaults to $::os_service_default # Example: # { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', # 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', @@ -74,48 +74,48 @@ # # [*publish_errors*] # (optional) Publish error events (boolean value). -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*fatal_deprecations*] # (optional) Make deprecations fatal (boolean value) -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*instance_format*] # (optional) If an instance is passed with the log message, format it # like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: '[instance: %(uuid)s] ' # # [*instance_uuid_format*] # (optional) If an instance UUID is passed with the log message, format # it like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: instance_uuid_format='[instance: %(uuid)s] ' - +# # [*log_date_format*] # (optional) Format string for %%(asctime)s in log records. -# Defaults to undef. +# Defaults to $::os_service_default # Example: 'Y-%m-%d %H:%M:%S' class keystone::logging( - $use_syslog = false, - $use_stderr = true, - $log_facility = 'LOG_USER', + $use_syslog = $::os_service_default, + $use_stderr = $::os_service_default, + $log_facility = $::os_service_default, $log_dir = '/var/log/keystone', - $log_file = false, - $verbose = false, - $debug = false, - $logging_context_format_string = undef, - $logging_default_format_string = undef, - $logging_debug_format_suffix = undef, - $logging_exception_prefix = undef, - $log_config_append = undef, - $default_log_levels = undef, - $publish_errors = undef, - $fatal_deprecations = undef, - $instance_format = undef, - $instance_uuid_format = undef, - $log_date_format = undef, + $log_file = $::os_service_default, + $verbose = $::os_service_default, + $debug = $::os_service_default, + $logging_context_format_string = $::os_service_default, + $logging_default_format_string = $::os_service_default, + $logging_debug_format_suffix = $::os_service_default, + $logging_exception_prefix = $::os_service_default, + $log_config_append = $::os_service_default, + $default_log_levels = $::os_service_default, + $publish_errors = $::os_service_default, + $fatal_deprecations = $::os_service_default, + $instance_format = $::os_service_default, + $instance_uuid_format = $::os_service_default, + $log_date_format = $::os_service_default, ) { # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function @@ -128,151 +128,31 @@ $verbose_real = pick($::keystone::verbose,$verbose) $debug_real = pick($::keystone::debug,$debug) - keystone_config { - 'DEFAULT/debug' : value => $debug_real; - 'DEFAULT/verbose' : value => $verbose_real; - 'DEFAULT/use_stderr' : value => $use_stderr_real; - 'DEFAULT/use_syslog' : value => $use_syslog_real; - 'DEFAULT/log_dir' : value => $log_dir_real; - 'DEFAULT/syslog_log_facility': value => $log_facility_real; + if is_service_default($default_log_levels) { + $default_log_levels_real = $default_log_levels + } else { + $default_log_levels_real = join(sort(join_keys_to_values($default_log_levels, '=')), ',') } - if $log_file_real { - keystone_config { - 'DEFAULT/log_file' : - value => $log_file_real; - } - } - else { - keystone_config { - 'DEFAULT/log_file' : ensure => absent; - } - } - - if $logging_context_format_string { - keystone_config { - 'DEFAULT/logging_context_format_string' : - value => $logging_context_format_string; - } - } - else { - keystone_config { - 'DEFAULT/logging_context_format_string' : ensure => absent; - } - } - - if $logging_default_format_string { - keystone_config { - 'DEFAULT/logging_default_format_string' : - value => $logging_default_format_string; - } - } - else { - keystone_config { - 'DEFAULT/logging_default_format_string' : ensure => absent; - } - } - - if $logging_debug_format_suffix { - keystone_config { - 'DEFAULT/logging_debug_format_suffix' : - value => $logging_debug_format_suffix; - } - } - else { - keystone_config { - 'DEFAULT/logging_debug_format_suffix' : ensure => absent; - } - } - - if $logging_exception_prefix { - keystone_config { - 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; - } - } - else { - keystone_config { - 'DEFAULT/logging_exception_prefix' : ensure => absent; - } - } - - if $log_config_append { - keystone_config { - 'DEFAULT/log_config_append' : value => $log_config_append; - } - } - else { - keystone_config { - 'DEFAULT/log_config_append' : ensure => absent; - } - } - - if $default_log_levels { - keystone_config { - 'DEFAULT/default_log_levels' : - value => join(sort(join_keys_to_values($default_log_levels, '=')), ','); - } - } - else { - keystone_config { - 'DEFAULT/default_log_levels' : ensure => absent; - } - } - - if $publish_errors { - keystone_config { - 'DEFAULT/publish_errors' : value => $publish_errors; - } - } - else { - keystone_config { - 'DEFAULT/publish_errors' : ensure => absent; - } - } - - if $fatal_deprecations { - keystone_config { - 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; - } - } - else { - keystone_config { - 'DEFAULT/fatal_deprecations' : ensure => absent; - } - } - - if $instance_format { - keystone_config { - 'DEFAULT/instance_format' : value => $instance_format; - } - } - else { - keystone_config { - 'DEFAULT/instance_format' : ensure => absent; - } - } - - if $instance_uuid_format { - keystone_config { - 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; - } - } - else { - keystone_config { - 'DEFAULT/instance_uuid_format' : ensure => absent; - } - } - - if $log_date_format { - keystone_config { - 'DEFAULT/log_date_format' : value => $log_date_format; - } - } - else { - keystone_config { - 'DEFAULT/log_date_format' : ensure => absent; - } - } - + keystone_config { + 'DEFAULT/use_syslog' : value => $use_syslog_real; + 'DEFAULT/use_stderr' : value => $use_stderr_real; + 'DEFAULT/syslog_log_facility' : value => $log_facility_real; + 'DEFAULT/log_dir' : value => $log_dir_real; + 'DEFAULT/log_file': value => $log_file_real; + 'DEFAULT/verbose' : value => $verbose_real; + 'DEFAULT/debug' : value => $debug_real; + 'DEFAULT/default_log_levels' : value => $default_log_levels_real; + 'DEFAULT/logging_context_format_string' : value => $logging_context_format_string; + 'DEFAULT/logging_default_format_string' : value => $logging_default_format_string; + 'DEFAULT/logging_debug_format_suffix' : value => $logging_debug_format_suffix; + 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; + 'DEFAULT/log_config_append' : value => $log_config_append; + 'DEFAULT/publish_errors' : value => $publish_errors; + 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; + 'DEFAULT/instance_format' : value => $instance_format; + 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; + 'DEFAULT/log_date_format' : value => $log_date_format; + } } diff --git a/keystone/manifests/params.pp b/keystone/manifests/params.pp index 0c8cc9b2c..d8c473702 100644 --- a/keystone/manifests/params.pp +++ b/keystone/manifests/params.pp @@ -32,7 +32,7 @@ $service_provider = undef $keystone_wsgi_script_source = '/usr/share/keystone/keystone.wsgi' $paste_config = '/usr/share/keystone/keystone-dist-paste.ini' - $pymysql_package_name = 'python2-PyMySQL' + $pymysql_package_name = undef } } } diff --git a/keystone/spec/classes/keystone_cron_token_flush_spec.rb b/keystone/spec/classes/keystone_cron_token_flush_spec.rb index 24ebcd8aa..3560e00a5 100644 --- a/keystone/spec/classes/keystone_cron_token_flush_spec.rb +++ b/keystone/spec/classes/keystone_cron_token_flush_spec.rb @@ -3,7 +3,7 @@ describe 'keystone::cron::token_flush' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :params do diff --git a/keystone/spec/classes/keystone_db_mysql_spec.rb b/keystone/spec/classes/keystone_db_mysql_spec.rb index a2c00eabd..3de968dab 100644 --- a/keystone/spec/classes/keystone_db_mysql_spec.rb +++ b/keystone/spec/classes/keystone_db_mysql_spec.rb @@ -10,7 +10,7 @@ end let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :params do diff --git a/keystone/spec/classes/keystone_db_postgresql_spec.rb b/keystone/spec/classes/keystone_db_postgresql_spec.rb index ecdad5a28..388a9f41d 100644 --- a/keystone/spec/classes/keystone_db_postgresql_spec.rb +++ b/keystone/spec/classes/keystone_db_postgresql_spec.rb @@ -12,11 +12,11 @@ context 'on a RedHat osfamily' do let :facts do - { + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.0', :concat_basedir => '/var/lib/puppet/concat' - } + }) end context 'with only required parameters' do @@ -34,12 +34,12 @@ context 'on a Debian osfamily' do let :facts do - { + @default_facts.merge({ :operatingsystemrelease => '7.8', :operatingsystem => 'Debian', :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' - } + }) end context 'with only required parameters' do diff --git a/keystone/spec/classes/keystone_db_spec.rb b/keystone/spec/classes/keystone_db_spec.rb index 1918c95d8..83e3ce895 100644 --- a/keystone/spec/classes/keystone_db_spec.rb +++ b/keystone/spec/classes/keystone_db_spec.rb @@ -7,12 +7,12 @@ context 'with default parameters' do it { is_expected.to contain_keystone_config('database/connection').with_value('sqlite:////var/lib/keystone/keystone.sqlite').with_secret(true) } - it { is_expected.to contain_keystone_config('database/idle_timeout').with_value('3600') } - it { is_expected.to contain_keystone_config('database/min_pool_size').with_value('1') } - it { is_expected.to contain_keystone_config('database/max_pool_size').with_value('10') } - it { is_expected.to contain_keystone_config('database/max_overflow').with_value('20') } - it { is_expected.to contain_keystone_config('database/max_retries').with_value('10') } - it { is_expected.to contain_keystone_config('database/retry_interval').with_value('10') } + it { is_expected.to contain_keystone_config('database/idle_timeout').with_value('') } + it { is_expected.to contain_keystone_config('database/min_pool_size').with_value('') } + it { is_expected.to contain_keystone_config('database/max_pool_size').with_value('') } + it { is_expected.to contain_keystone_config('database/max_overflow').with_value('') } + it { is_expected.to contain_keystone_config('database/max_retries').with_value('') } + it { is_expected.to contain_keystone_config('database/retry_interval').with_value('') } end @@ -34,8 +34,6 @@ it { is_expected.to contain_keystone_config('database/max_pool_size').with_value('21') } it { is_expected.to contain_keystone_config('database/max_overflow').with_value('21') } it { is_expected.to contain_keystone_config('database/retry_interval').with_value('11') } - it { is_expected.to contain_package('keystone-backend-package').with({ :ensure => 'present', :name => platform_params[:pymysql_package_name] }) } - end context 'with MySQL-python library as backend package' do @@ -77,31 +75,44 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => 'jessie', - } - end - - let :platform_params do - { :pymysql_package_name => 'python-pymysql' } + }) end it_configures 'keystone::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://keystone:keystone@localhost/keystone', } + end + + it 'install the proper backend package' do + is_expected.to contain_package('keystone-backend-package').with( + :ensure => 'present', + :name => 'python-pymysql', + :tag => 'openstack' + ) + end + end end context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.1', - } - end - - let :platform_params do - { :pymysql_package_name => 'python2-PyMySQL' } + }) end it_configures 'keystone::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://keystone:keystone@localhost/keystone', } + end + it { is_expected.not_to contain_package('keystone-backend-package') } + end end end diff --git a/keystone/spec/classes/keystone_logging_spec.rb b/keystone/spec/classes/keystone_logging_spec.rb index 36ee9b2c8..d82558d47 100644 --- a/keystone/spec/classes/keystone_logging_spec.rb +++ b/keystone/spec/classes/keystone_logging_spec.rb @@ -15,7 +15,7 @@ :logging_exception_prefix => '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s', :log_config_append => '/etc/keystone/logging.conf', :publish_errors => true, - :default_log_levels => { + :default_log_levels => { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', 'iso8601' => 'WARN', @@ -36,12 +36,12 @@ shared_examples 'basic default logging settings' do it 'configures glance logging settins with default values' do - is_expected.to contain_keystone_config('DEFAULT/use_syslog').with(:value => 'false') - is_expected.to contain_keystone_config('DEFAULT/use_stderr').with(:value => 'true') + is_expected.to contain_keystone_config('DEFAULT/use_syslog').with(:value => '') + is_expected.to contain_keystone_config('DEFAULT/use_stderr').with(:value => '') is_expected.to contain_keystone_config('DEFAULT/log_dir').with(:value => '/var/log/keystone') - is_expected.to contain_keystone_config('DEFAULT/log_file').with(:ensure => :absent) - is_expected.to contain_keystone_config('DEFAULT/verbose').with(:value => 'false') - is_expected.to contain_keystone_config('DEFAULT/debug').with(:value => 'false') + is_expected.to contain_keystone_config('DEFAULT/log_file').with(:value => '') + is_expected.to contain_keystone_config('DEFAULT/verbose').with(:value => '') + is_expected.to contain_keystone_config('DEFAULT/debug').with(:value => '') end end @@ -123,13 +123,13 @@ :default_log_levels, :fatal_deprecations, :instance_format, :instance_uuid_format, :log_date_format, ].each { |param| - it { is_expected.to contain_keystone_config("DEFAULT/#{param}").with_ensure('absent') } + it { is_expected.to contain_keystone_config("DEFAULT/#{param}").with_value('') } } end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'keystone-logging' @@ -137,7 +137,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'keystone-logging' diff --git a/keystone/spec/classes/keystone_policy_spec.rb b/keystone/spec/classes/keystone_policy_spec.rb index 56c8f8bc4..1414df6fb 100644 --- a/keystone/spec/classes/keystone_policy_spec.rb +++ b/keystone/spec/classes/keystone_policy_spec.rb @@ -25,7 +25,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'keystone policies' @@ -33,7 +33,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'keystone policies' diff --git a/keystone/spec/classes/keystone_python_spec.rb b/keystone/spec/classes/keystone_python_spec.rb index 500413e96..e65a13105 100644 --- a/keystone/spec/classes/keystone_python_spec.rb +++ b/keystone/spec/classes/keystone_python_spec.rb @@ -3,7 +3,7 @@ describe 'keystone::python' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it { is_expected.to contain_package('python-keystone').with_ensure("present") } diff --git a/keystone/spec/classes/keystone_spec.rb b/keystone/spec/classes/keystone_spec.rb index 8b1247004..c4707fc81 100644 --- a/keystone/spec/classes/keystone_spec.rb +++ b/keystone/spec/classes/keystone_spec.rb @@ -11,12 +11,12 @@ end let :facts do - global_facts.merge({ + @default_facts.merge(global_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => '7.0', :processorcount => '1' - }) + })) end default_params = { @@ -52,11 +52,11 @@ 'signing_keyfile' => '/etc/keystone/ssl/private/signing_key.pem', 'signing_ca_certs' => '/etc/keystone/ssl/certs/ca.pem', 'signing_ca_key' => '/etc/keystone/ssl/private/cakey.pem', - 'rabbit_host' => 'localhost', - 'rabbit_password' => 'guest', - 'rabbit_userid' => 'guest', - 'rabbit_heartbeat_timeout_threshold' => 0, - 'rabbit_heartbeat_rate' => 2, + 'rabbit_host' => '', + 'rabbit_password' => '', + 'rabbit_userid' => '', + 'rabbit_heartbeat_timeout_threshold' => '', + 'rabbit_heartbeat_rate' => '', 'admin_workers' => 20, 'public_workers' => 20, 'sync_db' => true, @@ -196,12 +196,12 @@ if param_hash['admin_endpoint'] is_expected.to contain_keystone_config('DEFAULT/admin_endpoint').with_value(param_hash['admin_endpoint']) else - is_expected.to contain_keystone_config('DEFAULT/admin_endpoint').with_ensure('absent') + is_expected.to contain_keystone_config('DEFAULT/admin_endpoint').with_value('') end if param_hash['public_endpoint'] is_expected.to contain_keystone_config('DEFAULT/public_endpoint').with_value(param_hash['public_endpoint']) else - is_expected.to contain_keystone_config('DEFAULT/public_endpoint').with_ensure('absent') + is_expected.to contain_keystone_config('DEFAULT/public_endpoint').with_value('') end end @@ -215,7 +215,7 @@ end it 'should remove max_token_size param by default' do - is_expected.to contain_keystone_config('DEFAULT/max_token_size').with_ensure('absent') + is_expected.to contain_keystone_config('DEFAULT/max_token_size').with_value('') end it 'should ensure proper setting of admin_workers and public_workers' do @@ -515,7 +515,7 @@ it { is_expected.to contain_keystone_config("memcache/servers").with_value('SERVER1:11211,SERVER2:11211') } it { is_expected.to contain_keystone_config('cache/enabled').with_value(true) } - it { is_expected.to contain_keystone_config('token/caching').with_value(true) } + it { is_expected.to contain_keystone_config('token/caching').with_value('') } it { is_expected.to contain_keystone_config('cache/backend').with_value('dogpile.cache.memcached') } it { is_expected.to contain_keystone_config('cache/backend_argument').with_value('url:SERVER1:12211') } it { is_expected.to contain_keystone_config('memcache/dead_retry').with_value('60') } @@ -538,18 +538,18 @@ end it { is_expected.to contain_keystone_config("cache/enabled").with_ensure('absent') } - it { is_expected.to contain_keystone_config("token/caching").with_ensure('absent') } - it { is_expected.to contain_keystone_config("cache/backend").with_ensure('absent') } + it { is_expected.to contain_keystone_config("token/caching").with_value('') } + it { is_expected.to contain_keystone_config("cache/backend").with_value('') } it { is_expected.to contain_keystone_config("cache/backend_argument").with_ensure('absent') } - it { is_expected.to contain_keystone_config("cache/debug_cache_backend").with_ensure('absent') } + it { is_expected.to contain_keystone_config("cache/debug_cache_backend").with_value('') } it { is_expected.to contain_keystone_config("memcache/servers").with_ensure('absent') } - it { is_expected.to contain_keystone_config('memcache/dead_retry').with_ensure('absent') } - it { is_expected.to contain_keystone_config('memcache/pool_maxsize').with_ensure('absent') } - it { is_expected.to contain_keystone_config('memcache/pool_unused_timeout').with_ensure('absent') } - it { is_expected.to contain_keystone_config('cache/memcache_dead_retry').with_ensure('absent') } - it { is_expected.to contain_keystone_config('cache/memcache_socket_timeout').with_ensure('absent') } - it { is_expected.to contain_keystone_config('cache/memcache_pool_maxsize').with_ensure('absent') } - it { is_expected.to contain_keystone_config('cache/memcache_pool_unused_timeout').with_ensure('absent') } + it { is_expected.to contain_keystone_config('memcache/dead_retry').with_value('') } + it { is_expected.to contain_keystone_config('memcache/pool_maxsize').with_value('') } + it { is_expected.to contain_keystone_config('memcache/pool_unused_timeout').with_value('') } + it { is_expected.to contain_keystone_config('cache/memcache_dead_retry').with_value('') } + it { is_expected.to contain_keystone_config('cache/memcache_socket_timeout').with_value('') } + it { is_expected.to contain_keystone_config('cache/memcache_pool_maxsize').with_value('') } + it { is_expected.to contain_keystone_config('cache/memcache_pool_unused_timeout').with_value('') } end describe 'raise error if memcache_servers is not an array' do @@ -590,18 +590,18 @@ } end it {is_expected.to contain_keystone_config('ssl/enable').with_value(false)} - it {is_expected.to contain_keystone_config('DEFAULT/public_endpoint').with_ensure('absent')} - it {is_expected.to contain_keystone_config('DEFAULT/admin_endpoint').with_ensure('absent')} + it {is_expected.to contain_keystone_config('DEFAULT/public_endpoint').with_value('')} + it {is_expected.to contain_keystone_config('DEFAULT/admin_endpoint').with_value('')} end describe 'not setting notification settings by default' do let :params do default_params end - it { is_expected.to contain_keystone_config('DEFAULT/notification_driver').with_value(nil) } - it { is_expected.to contain_keystone_config('DEFAULT/notification_topics').with_value(nil) } - it { is_expected.to contain_keystone_config('DEFAULT/notification_format').with_value(nil) } - it { is_expected.to contain_keystone_config('DEFAULT/control_exchange').with_value(nil) } + it { is_expected.to contain_keystone_config('DEFAULT/notification_driver').with_value('') } + it { is_expected.to contain_keystone_config('DEFAULT/notification_topics').with_value('') } + it { is_expected.to contain_keystone_config('DEFAULT/notification_format').with_value('') } + it { is_expected.to contain_keystone_config('DEFAULT/control_exchange').with_value('') } end describe 'with RabbitMQ communication SSLed' do @@ -627,20 +627,20 @@ describe 'with RabbitMQ communication not SSLed' do let :params do default_params.merge!({ - :rabbit_use_ssl => false, - :kombu_ssl_ca_certs => 'undef', - :kombu_ssl_certfile => 'undef', - :kombu_ssl_keyfile => 'undef', - :kombu_ssl_version => 'TLSv1' + :rabbit_use_ssl => '', + :kombu_ssl_ca_certs => '', + :kombu_ssl_certfile => '', + :kombu_ssl_keyfile => '', + :kombu_ssl_version => '' }) end it do - is_expected.to contain_keystone_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('false') - is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') - is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') - is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') - is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('') end end @@ -668,6 +668,14 @@ it { is_expected.to contain_keystone_config('DEFAULT/control_exchange').with_value('keystone') } end + describe 'setting sql policy driver' do + let :params do + default_params.merge({:policy_driver => 'sql' }) + end + + it { is_expected.to contain_keystone_config('policy/driver').with_value('sql') } + end + describe 'setting sql (default) catalog' do let :params do default_params @@ -735,10 +743,10 @@ describe 'setting service_provider' do let :facts do - global_facts.merge({ + @default_facts.merge(global_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '6.0' - }) + })) end describe 'with default service_provider' do @@ -802,15 +810,15 @@ default_params end - it { is_expected.to contain_keystone_config('paste_deploy/config_file').with_ensure('absent')} + it { is_expected.to contain_keystone_config('paste_deploy/config_file').with_ensure('absent') } end describe 'with default paste config on RedHat' do let :facts do - global_facts.merge({ + @default_facts.merge(global_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '6.0' - }) + })) end let :params do default_params @@ -881,10 +889,10 @@ context 'on RedHat platforms' do let :facts do - global_facts.merge({ + @default_facts.merge(global_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.0' - }) + })) end let :platform_parameters do @@ -900,11 +908,11 @@ context 'on Debian platforms' do let :facts do - global_facts.merge({ + @default_facts.merge(global_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => '7.0' - }) + })) end let :platform_parameters do diff --git a/keystone/spec/classes/keystone_wsgi_apache_spec.rb b/keystone/spec/classes/keystone_wsgi_apache_spec.rb index bad957613..dcffe5d99 100644 --- a/keystone/spec/classes/keystone_wsgi_apache_spec.rb +++ b/keystone/spec/classes/keystone_wsgi_apache_spec.rb @@ -269,10 +269,10 @@ context 'on RedHat platforms' do let :facts do - global_facts.merge({ + @default_facts.merge(global_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '6.0' - }) + })) end let :platform_parameters do @@ -289,11 +289,11 @@ context 'on Debian platforms' do let :facts do - global_facts.merge({ + @default_facts.merge(global_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => '7.0' - }) + })) end let :platform_parameters do diff --git a/keystone/spec/defines/keystone_resource_authtoken_spec.rb b/keystone/spec/defines/keystone_resource_authtoken_spec.rb index 06894070b..83673ed2c 100644 --- a/keystone/spec/defines/keystone_resource_authtoken_spec.rb +++ b/keystone/spec/defines/keystone_resource_authtoken_spec.rb @@ -182,7 +182,7 @@ context 'on a Debian osfamily' do let :facts do - { :osfamily => "Debian" } + @default_facts.merge({ :osfamily => "Debian" }) end include_examples 'shared examples' @@ -190,7 +190,7 @@ context 'on a RedHat osfamily' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end include_examples 'shared examples' diff --git a/keystone/spec/defines/keystone_resource_service_identity_spec.rb b/keystone/spec/defines/keystone_resource_service_identity_spec.rb index ae4dad9c6..b729b546f 100644 --- a/keystone/spec/defines/keystone_resource_service_identity_spec.rb +++ b/keystone/spec/defines/keystone_resource_service_identity_spec.rb @@ -141,7 +141,7 @@ context 'on a Debian osfamily' do let :facts do - { :osfamily => "Debian" } + @default_facts.merge({ :osfamily => "Debian" }) end include_examples 'keystone::resource::service_identity examples' @@ -149,7 +149,7 @@ context 'on a RedHat osfamily' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end include_examples 'keystone::resource::service_identity examples' diff --git a/keystone/spec/spec_helper.rb b/keystone/spec/spec_helper.rb index 54865b87d..8d455e4ce 100644 --- a/keystone/spec/spec_helper.rb +++ b/keystone/spec/spec_helper.rb @@ -7,6 +7,9 @@ RSpec.configure do |c| c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' + c.before :each do + @default_facts = { :os_service_default => '' } + end end at_exit { RSpec::Puppet::Coverage.report! } diff --git a/keystone/spec/spec_helper_acceptance.rb b/keystone/spec/spec_helper_acceptance.rb index fb03b6c16..ca8676a37 100644 --- a/keystone/spec/spec_helper_acceptance.rb +++ b/keystone/spec/spec_helper_acceptance.rb @@ -38,7 +38,7 @@ zuul_clone_cmd += "git://git.openstack.org #{repo}" on host, zuul_clone_cmd else - on host, "git clone https://git.openstack.org/#{repo} -b stable/liberty #{repo}" + on host, "git clone https://git.openstack.org/#{repo} #{repo}" end on host, "ZUUL_REF=#{zuul_ref} ZUUL_BRANCH=#{zuul_branch} ZUUL_URL=#{zuul_url} bash #{repo}/install_modules.sh" diff --git a/manila/CHANGELOG.md b/manila/CHANGELOG.md index 3f93342c0..5d7165874 100644 --- a/manila/CHANGELOG.md +++ b/manila/CHANGELOG.md @@ -1,3 +1,37 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes +- rabbitmq: do not manage rabbitmq service anymore +- move qpid settings to oslo_messaging_qpid section + +####Features +- keystone/auth: make service description configurable +- add related parameters to manila::quota +- add tag to package and service resources +- add support to configure GlusterFS drivers with Manila shares +- reflect provider change in puppet-openstacklib +- put all the logging related parameters to the logging class +- simplify rpc_backend parameter +- add options to enable Manila to run with NFS-Ganesha backend +- introduce manila::db class +- db: Use postgresql lib class for psycopg package +- add related parameters to oslo_messaging_amqp section + +####Bugfixes +- rely on autorequire for config resource ordering +- api: require ::keystone::python + +####Maintenance +- acceptance: enable debug & verbosity for OpenStack logs +- initial msync run for all Puppet OpenStack modules +- fix rspec 3.x syntax +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check +- acceptance: use common bits from puppet-openstack-integration + ##2015-10-10 - 6.1.0 ### Summary diff --git a/manila/README.md b/manila/README.md index 4cd3b6b68..403b3d96f 100644 --- a/manila/README.md +++ b/manila/README.md @@ -1,7 +1,7 @@ manila ======= -6.1.0 - 2015.1.0 - Kilo +7.0.0 - 2015.2 - Liberty #### Table of Contents diff --git a/manila/manifests/db.pp b/manila/manifests/db.pp index 823a98ac1..dfdc5b19e 100644 --- a/manila/manifests/db.pp +++ b/manila/manifests/db.pp @@ -10,39 +10,41 @@ # # [*database_idle_timeout*] # Timeout when db connections should be reaped. -# (Optional) Defaults to 3600. +# (Optional) Defaults to $::os_service_default # # [*database_min_pool_size*] # Minimum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 1. +# (Optional) Defaults to $::os_service_default # # [*database_max_pool_size*] # Maximum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_max_retries*] # Maximum db connection retries during startup. # Setting -1 implies an infinite retry count. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_retry_interval*] # Interval between retries of opening a sql connection. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_max_overflow*] # If set, use this value for max_overflow with sqlalchemy. -# (Optional) Defaults to 20. +# (Optional) Defaults to $::os_service_default # class manila::db ( $database_connection = 'sqlite:////var/lib/manila/manila.sqlite', - $database_idle_timeout = 3600, - $database_min_pool_size = 1, - $database_max_pool_size = 10, - $database_max_retries = 10, - $database_retry_interval = 10, - $database_max_overflow = 20, + $database_idle_timeout = $::os_service_default, + $database_min_pool_size = $::os_service_default, + $database_max_pool_size = $::os_service_default, + $database_max_retries = $::os_service_default, + $database_retry_interval = $::os_service_default, + $database_max_overflow = $::os_service_default, ) { + include ::manila::params + # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function # to use manila:: if manila::db:: isn't specified. $database_connection_real = pick($::manila::sql_connection, $database_connection) @@ -54,44 +56,46 @@ $database_max_overflow_real = pick($::manila::database_max_overflow, $database_max_overflow) validate_re($database_connection_real, - '(sqlite|mysql|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') + '^(sqlite|mysql(\+pymysql)?|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') - if $database_connection_real { - case $database_connection_real { - /^mysql:\/\//: { - $backend_package = false - require 'mysql::bindings' - require 'mysql::bindings::python' - } - /^postgresql:\/\//: { + case $database_connection_real { + /^mysql(\+pymysql)?:\/\//: { + require 'mysql::bindings' + require 'mysql::bindings::python' + if $database_connection_real =~ /^mysql\+pymysql/ { + $backend_package = $::manila::params::pymysql_package_name + } else { $backend_package = false - require 'postgresql::lib::python' - } - /^sqlite:\/\//: { - $backend_package = $::manila::params::sqlite_package_name - } - default: { - fail('Unsupported backend configured') } } - - if $backend_package and !defined(Package[$backend_package]) { - package {'manila-backend-package': - ensure => present, - name => $backend_package, - tag => 'openstack', - } + /^postgresql:\/\//: { + $backend_package = false + require 'postgresql::lib::python' + } + /^sqlite:\/\//: { + $backend_package = $::manila::params::sqlite_package_name + } + default: { + fail('Unsupported backend configured') } + } - manila_config { - 'database/connection': value => $database_connection_real, secret => true; - 'database/idle_timeout': value => $database_idle_timeout_real; - 'database/min_pool_size': value => $database_min_pool_size_real; - 'database/max_retries': value => $database_max_retries_real; - 'database/retry_interval': value => $database_retry_interval_real; - 'database/max_pool_size': value => $database_max_pool_size_real; - 'database/max_overflow': value => $database_max_overflow_real; + if $backend_package and !defined(Package[$backend_package]) { + package {'manila-backend-package': + ensure => present, + name => $backend_package, + tag => 'openstack', } } + manila_config { + 'database/connection': value => $database_connection_real, secret => true; + 'database/idle_timeout': value => $database_idle_timeout_real; + 'database/min_pool_size': value => $database_min_pool_size_real; + 'database/max_retries': value => $database_max_retries_real; + 'database/retry_interval': value => $database_retry_interval_real; + 'database/max_pool_size': value => $database_max_pool_size_real; + 'database/max_overflow': value => $database_max_overflow_real; + } + } diff --git a/manila/manifests/init.pp b/manila/manifests/init.pp index 2232e4fde..1c821c0e5 100644 --- a/manila/manifests/init.pp +++ b/manila/manifests/init.pp @@ -102,69 +102,6 @@ # Use durable queues in amqp. # (Optional) Defaults to false. # -# ==== Various QPID options (Optional) -# -# [*qpid_hostname*] -# (Optional) hostname of the qpid server. -# Defaults to 'localhost' -# -# [*qpid_port*] -# (Optional) Port of the qpid server. -# Defaults to 5672. -# -# [*qpid_username*] -# (Optional) User to connect to the qpid server. -# Defaults to 'guest' -# -# [*qpid_password*] -# (Optional) Password to connect to the qpid_server. -# Defaults to empty. -# -# [*qpid_heartbeat*] -# (Optional) Seconds between connection keepalive heartbeats. -# Defaults to 60s. -# -# [*qpid_protocol*] -# (Optional) Transport to use, either 'tcp' or 'ssl'. -# Defaults to tcp. -# -# [*qpid_tcp_nodelay*] -# (Optional) Whether to disable the Nagle algorithm. -# Defaults to true. -# -# [*qpid_reconnect*] -# (Optional) Enable the experimental use of reconnect on connection -# lost. -# Defaults to true. -# -# [*qpid_reconnect_timeout*] -# (Optional) How long to wait before considering a reconnect attempt -# to have failed. This value should not be longer than rpc_response_timeout. -# Defaults to 0. -# -# [*qpid_reconnect_limit*] -# (Optional) Limit of reconnect on connection lost. -# Defaults to 0. -# -# [*qpid_reconnect_interval*] -# (Optional) Interval between retries of opening a qpid connection. (integer -# value) -# Defaults to 0. -# -# [*qpid_reconnect_interval_min*] -# (Optional) Minimal interval between retries of opening a qpid connection. (integer -# value) -# Defaults to 0. -# -# [*qpid_reconnect_interval_max*] -# (Optional) Miximal interval between retries of opening a qpid connection. (integer -# value) -# Defaults to 0. -# -# [*qpid_sasl_mechanisms*] -# (Optional) ENable one or more SASL mechanisms. -# Defaults to false. -# # [*use_stderr*] # (optional) Use stderr for logging # Defaults to undef @@ -265,8 +202,69 @@ # (optional) Accept clients using either SSL or plain TCP # Defaults to false # - - +# DEPRECATED PARAMETERS +# +# [*qpid_hostname*] +# (Optional) hostname of the qpid server. +# Defaults to undef. +# +# [*qpid_port*] +# (Optional) Port of the qpid server. +# Defaults to undef. +# +# [*qpid_username*] +# (Optional) User to connect to the qpid server. +# Defaults to undef. +# +# [*qpid_password*] +# (Optional) Password to connect to the qpid_server. +# Defaults to undef. +# +# [*qpid_heartbeat*] +# (Optional) Seconds between connection keepalive heartbeats. +# Defaults to undef. +# +# [*qpid_protocol*] +# (Optional) Transport to use, either 'tcp' or 'ssl'. +# Defaults to undef. +# +# [*qpid_tcp_nodelay*] +# (Optional) Whether to disable the Nagle algorithm. +# Defaults to undef. +# +# [*qpid_reconnect*] +# (Optional) Enable the experimental use of reconnect on connection +# lost. +# Defaults to undef. +# +# [*qpid_reconnect_timeout*] +# (Optional) How long to wait before considering a reconnect attempt +# to have failed. This value should not be longer than rpc_response_timeout. +# Defaults to undef. +# +# [*qpid_reconnect_limit*] +# (Optional) Limit of reconnect on connection lost. +# Defaults to undef. +# +# [*qpid_reconnect_interval*] +# (Optional) Interval between retries of opening a qpid connection. (integer +# value) +# Defaults to undef. +# +# [*qpid_reconnect_interval_min*] +# (Optional) Minimal interval between retries of opening a qpid connection. (integer +# value) +# Defaults to undef. +# +# [*qpid_reconnect_interval_max*] +# (Optional) Miximal interval between retries of opening a qpid connection. (integer +# value) +# Defaults to undef. +# +# [*qpid_sasl_mechanisms*] +# (Optional) ENable one or more SASL mechanisms. +# Defaults to undef. +# class manila ( $sql_connection = undef, $sql_idle_timeout = undef, @@ -290,20 +288,6 @@ $kombu_ssl_keyfile = undef, $kombu_ssl_version = 'TLSv1', $amqp_durable_queues = false, - $qpid_hostname = 'localhost', - $qpid_port = '5672', - $qpid_username = 'guest', - $qpid_password = false, - $qpid_sasl_mechanisms = false, - $qpid_reconnect = true, - $qpid_reconnect_timeout = 0, - $qpid_reconnect_limit = 0, - $qpid_reconnect_interval_min = 0, - $qpid_reconnect_interval_max = 0, - $qpid_reconnect_interval = 0, - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, $package_ensure = 'present', $use_ssl = false, $ca_file = false, @@ -331,6 +315,21 @@ $amqp_ssl_cert_file = undef, $amqp_ssl_key_file = undef, $amqp_ssl_key_password = undef, + # DEPRECATED PARAMETERS + $qpid_hostname = undef, + $qpid_port = undef, + $qpid_username = undef, + $qpid_password = undef, + $qpid_sasl_mechanisms = undef, + $qpid_reconnect = undef, + $qpid_reconnect_timeout = undef, + $qpid_reconnect_limit = undef, + $qpid_reconnect_interval_min = undef, + $qpid_reconnect_interval_max = undef, + $qpid_reconnect_interval = undef, + $qpid_heartbeat = undef, + $qpid_protocol = undef, + $qpid_tcp_nodelay = undef, ) { include ::manila::db @@ -449,41 +448,7 @@ } if $rpc_backend == 'manila.openstack.common.rpc.impl_qpid' or $rpc_backend == 'qpid' { - - if ! $qpid_password { - fail('Please specify a qpid_password parameter.') - } - - manila_config { - 'oslo_messaging_qpid/qpid_hostname': value => $qpid_hostname; - 'oslo_messaging_qpid/qpid_port': value => $qpid_port; - 'oslo_messaging_qpid/qpid_username': value => $qpid_username; - 'oslo_messaging_qpid/qpid_password': value => $qpid_password, secret => true; - 'oslo_messaging_qpid/qpid_heartbeat': value => $qpid_heartbeat; - 'oslo_messaging_qpid/qpid_protocol': value => $qpid_protocol; - 'oslo_messaging_qpid/qpid_tcp_nodelay': value => $qpid_tcp_nodelay; - 'oslo_messaging_qpid/amqp_durable_queues': value => $amqp_durable_queues; - 'oslo_messaging_qpid/qpid_reconnect': value => $qpid_reconnect; - 'oslo_messaging_qpid/qpid_reconnect_timeout': value => $qpid_reconnect_timeout; - 'oslo_messaging_qpid/qpid_reconnect_limit': value => $qpid_reconnect_limit; - 'oslo_messaging_qpid/qpid_reconnect_interval_min': value => $qpid_reconnect_interval_min; - 'oslo_messaging_qpid/qpid_reconnect_interval_max': value => $qpid_reconnect_interval_max; - 'oslo_messaging_qpid/qpid_reconnect_interval': value => $qpid_reconnect_interval; - } - - if is_array($qpid_sasl_mechanisms) { - manila_config { - 'oslo_messaging_qpid/qpid_sasl_mechanisms': value => join($qpid_sasl_mechanisms, ' '); - } - } elsif $qpid_sasl_mechanisms { - manila_config { - 'oslo_messaging_qpid/qpid_sasl_mechanisms': value => $qpid_sasl_mechanisms; - } - } else { - manila_config { - 'oslo_messaging_qpid/qpid_sasl_mechanisms': ensure => absent; - } - } + warning('Qpidd\'s driver was removed from Oslo.messaging in Mitaka release') } diff --git a/manila/manifests/keystone/auth.pp b/manila/manifests/keystone/auth.pp index dcc4be1cf..677ebd6ef 100644 --- a/manila/manifests/keystone/auth.pp +++ b/manila/manifests/keystone/auth.pp @@ -41,6 +41,36 @@ # (optional) The endpoint's internal url. (Defaults to 'http://127.0.0.1:8786/v1/%(tenant_id)s') # This url should *not* contain any trailing '/'. # +# [*password_v2*] +# Password for Manila v2 user. Optional. Defaults to undef. +# +# [*email_v2*] +# Email for Manila v2 user. Optional. Defaults to 'manilav2@localhost'. +# +# [*auth_name_v2*] +# Username for Manila v2 service. Optional. Defaults to 'manilav2'. +# +# [*configure_endpoint_v2*] +# Should Manila v2 endpoint be configured? Optional. Defaults to 'true'. +# +# [*service_type_v2*] +# Type of service v2. Optional. Defaults to 'sharev2'. +# +# [*service_description_v2*] +# Description for keystone service v2. Optional. Defaults to 'Manila Service v2'. +# +# [*public_url_v2*] +# (optional) The v2 endpoint's public url. (Defaults to 'http://127.0.0.1:8786/v2/%(tenant_id)s') +# This url should *not* contain any trailing '/'. +# +# [*admin_url_v2*] +# (optional) The endpoint's admin url. (Defaults to 'http://127.0.0.1:8786/v2/%(tenant_id)s') +# This url should *not* contain any trailing '/'. +# +# [*internal_url_v2*] +# (optional) The endpoint's internal url. (Defaults to 'http://127.0.0.1:8786/v2/%(tenant_id)s') +# This url should *not* contain any trailing '/'. +# # [*share_version*] # (optional) DEPRECATED: Use public_url, internal_url and admin_url instead. # API version endpoint. (Defaults to 'v1') @@ -96,27 +126,44 @@ # class manila::keystone::auth ( $password, - $auth_name = 'manila', - $email = 'manila@localhost', - $tenant = 'services', - $configure_endpoint = true, - $service_type = 'share', - $service_description = 'Manila Service', - $region = 'RegionOne', - $public_url = 'http://127.0.0.1:8786/v1/%(tenant_id)s', - $admin_url = 'http://127.0.0.1:8786/v1/%(tenant_id)s', - $internal_url = 'http://127.0.0.1:8786/v1/%(tenant_id)s', + $password_v2 = undef, + $auth_name_v2 = 'manilav2', + $auth_name = 'manila', + $email = 'manila@localhost', + $email_v2 = 'manilav2@localhost', + $tenant = 'services', + $configure_endpoint = true, + $configure_endpoint_v2 = true, + $service_type = 'share', + $service_type_v2 = 'sharev2', + $service_description = 'Manila Service', + $service_description_v2 = 'Manila Service v2', + $region = 'RegionOne', + $public_url = 'http://127.0.0.1:8786/v1/%(tenant_id)s', + $public_url_v2 = 'http://127.0.0.1:8786/v2/%(tenant_id)s', + $admin_url = 'http://127.0.0.1:8786/v1/%(tenant_id)s', + $admin_url_v2 = 'http://127.0.0.1:8786/v2/%(tenant_id)s', + $internal_url = 'http://127.0.0.1:8786/v1/%(tenant_id)s', + $internal_url_v2 = 'http://127.0.0.1:8786/v2/%(tenant_id)s', # DEPRECATED PARAMETERS - $share_version = undef, - $port = undef, - $public_protocol = undef, - $public_address = undef, - $internal_protocol = undef, - $internal_address = undef, - $admin_protocol = undef, - $admin_address = undef, + $share_version = undef, + $port = undef, + $public_protocol = undef, + $public_address = undef, + $internal_protocol = undef, + $internal_address = undef, + $admin_protocol = undef, + $admin_address = undef, ) { + # for interface backward compatibility, we can't enforce to set a new parameter + # so we take 'password' parameter by default but allow to override it. + if ! $password_v2 { + $password_v2_real = $password + } else { + $password_v2_real = $password_v2 + } + if $share_version { warning('The share_version parameter is deprecated, use public_url, internal_url and admin_url instead.') } @@ -180,6 +227,7 @@ } Keystone_user_role["${auth_name}@${tenant}"] ~> Service <| name == 'manila-api' |> + Keystone_user_role["${auth_name_v2}@${tenant}"] ~> Service <| name == 'manila-api' |> keystone::resource::service_identity { $auth_name: configure_user => true, @@ -196,4 +244,18 @@ internal_url => $internal_url_real, } + keystone::resource::service_identity { $auth_name_v2: + configure_user => true, + configure_user_role => true, + configure_endpoint => $configure_endpoint_v2, + service_type => $service_type_v2, + service_description => $service_description_v2, + region => $region, + password => $password_v2_real, + email => $email_v2, + tenant => $tenant, + public_url => $public_url_v2, + admin_url => $admin_url_v2, + internal_url => $internal_url_v2, + } } diff --git a/manila/manifests/logging.pp b/manila/manifests/logging.pp index 6b3136014..9fd0a6f7e 100644 --- a/manila/manifests/logging.pp +++ b/manila/manifests/logging.pp @@ -6,23 +6,23 @@ # # [*verbose*] # (Optional) Should the daemons log verbose messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*debug*] # (Optional) Should the daemons log debug messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_syslog*] # (Optional) Use syslog for logging. -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_stderr*] # (optional) Use stderr for logging -# Defaults to 'true' +# Defaults to $::os_service_default # # [*log_facility*] # (Optional) Syslog facility to receive log lines. -# Defaults to 'LOG_USER' +# Defaults to $::os_service_default # # [*log_dir*] # (optional) Directory where logs should be stored. @@ -31,34 +31,34 @@ # # [*logging_context_format_string*] # (optional) Format string to use for log messages with context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [%(request_id)s %(user_identity)s] %(instance)s%(message)s' # # [*logging_default_format_string*] # (optional) Format string to use for log messages without context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [-] %(instance)s%(message)s' # # [*logging_debug_format_suffix*] # (optional) Formatted data to append to log format when level is DEBUG. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(funcName)s %(pathname)s:%(lineno)d' # # [*logging_exception_prefix*] # (optional) Prefix each line of exception output with this format. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s' # # [*log_config_append*] # The name of an additional logging configuration file. -# Defaults to undef. +# Defaults to $::os_service_default # See https://docs.python.org/2/howto/logging.html # # [*default_log_levels*] # (optional) Hash of logger (keys) and level (values) pairs. -# Defaults to undef. +# Defaults to $::os_service_default # Example: # { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', # 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', @@ -67,47 +67,47 @@ # # [*publish_errors*] # (optional) Publish error events (boolean value). -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*fatal_deprecations*] # (optional) Make deprecations fatal (boolean value) -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*instance_format*] # (optional) If an instance is passed with the log message, format it # like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: '[instance: %(uuid)s] ' # # [*instance_uuid_format*] # (optional) If an instance UUID is passed with the log message, format # it like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: instance_uuid_format='[instance: %(uuid)s] ' - +# # [*log_date_format*] # (optional) Format string for %%(asctime)s in log records. -# Defaults to undef. +# Defaults to $::os_service_default # Example: 'Y-%m-%d %H:%M:%S' class manila::logging( - $use_syslog = false, - $use_stderr = true, - $log_facility = 'LOG_USER', + $use_syslog = $::os_service_default, + $use_stderr = $::os_service_default, + $log_facility = $::os_service_default, $log_dir = '/var/log/manila', - $verbose = false, - $debug = false, - $logging_context_format_string = undef, - $logging_default_format_string = undef, - $logging_debug_format_suffix = undef, - $logging_exception_prefix = undef, - $log_config_append = undef, - $default_log_levels = undef, - $publish_errors = undef, - $fatal_deprecations = undef, - $instance_format = undef, - $instance_uuid_format = undef, - $log_date_format = undef, + $verbose = $::os_service_default, + $debug = $::os_service_default, + $logging_context_format_string = $::os_service_default, + $logging_default_format_string = $::os_service_default, + $logging_debug_format_suffix = $::os_service_default, + $logging_exception_prefix = $::os_service_default, + $log_config_append = $::os_service_default, + $default_log_levels = $::os_service_default, + $publish_errors = $::os_service_default, + $fatal_deprecations = $::os_service_default, + $instance_format = $::os_service_default, + $instance_uuid_format = $::os_service_default, + $log_date_format = $::os_service_default, ) { # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function @@ -119,6 +119,12 @@ $verbose_real = pick($::manila::verbose,$verbose) $debug_real = pick($::manila::debug,$debug) + if is_service_default($default_log_levels) { + $default_log_levels_real = $default_log_levels + } else { + $default_log_levels_real = join(sort(join_keys_to_values($default_log_levels, '=')), ',') + } + manila_config { 'DEFAULT/debug' : value => $debug_real; 'DEFAULT/verbose' : value => $verbose_real; @@ -128,130 +134,19 @@ 'DEFAULT/syslog_log_facility': value => $log_facility_real; } - if $logging_context_format_string { - manila_config { - 'DEFAULT/logging_context_format_string' : - value => $logging_context_format_string; - } - } - else { - manila_config { - 'DEFAULT/logging_context_format_string' : ensure => absent; - } - } - - if $logging_default_format_string { - manila_config { - 'DEFAULT/logging_default_format_string' : - value => $logging_default_format_string; - } - } - else { - manila_config { - 'DEFAULT/logging_default_format_string' : ensure => absent; - } - } - - if $logging_debug_format_suffix { - manila_config { - 'DEFAULT/logging_debug_format_suffix' : - value => $logging_debug_format_suffix; - } - } - else { - manila_config { - 'DEFAULT/logging_debug_format_suffix' : ensure => absent; - } - } - - if $logging_exception_prefix { - manila_config { - 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; - } - } - else { - manila_config { - 'DEFAULT/logging_exception_prefix' : ensure => absent; - } - } - - if $log_config_append { - manila_config { - 'DEFAULT/log_config_append' : value => $log_config_append; - } - } - else { - manila_config { - 'DEFAULT/log_config_append' : ensure => absent; - } - } - - if $default_log_levels { - manila_config { - 'DEFAULT/default_log_levels' : - value => join(sort(join_keys_to_values($default_log_levels, '=')), ','); - } - } - else { - manila_config { - 'DEFAULT/default_log_levels' : ensure => absent; - } - } - - if $publish_errors { - manila_config { - 'DEFAULT/publish_errors' : value => $publish_errors; - } - } - else { - manila_config { - 'DEFAULT/publish_errors' : ensure => absent; - } - } - - if $fatal_deprecations { - manila_config { - 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; - } - } - else { - manila_config { - 'DEFAULT/fatal_deprecations' : ensure => absent; - } - } - - if $instance_format { - manila_config { - 'DEFAULT/instance_format' : value => $instance_format; - } - } - else { - manila_config { - 'DEFAULT/instance_format' : ensure => absent; - } - } - - if $instance_uuid_format { - manila_config { - 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; - } - } - else { - manila_config { - 'DEFAULT/instance_uuid_format' : ensure => absent; - } - } - - if $log_date_format { - manila_config { - 'DEFAULT/log_date_format' : value => $log_date_format; - } - } - else { - manila_config { - 'DEFAULT/log_date_format' : ensure => absent; - } - } + manila_config { + 'DEFAULT/logging_context_format_string': value => $logging_context_format_string; + 'DEFAULT/logging_default_format_string': value => $logging_default_format_string; + 'DEFAULT/logging_debug_format_suffix': value => $logging_debug_format_suffix; + 'DEFAULT/logging_exception_prefix': value => $logging_exception_prefix; + 'DEFAULT/log_config_append': value => $log_config_append; + 'DEFAULT/default_log_levels': value => $default_log_levels_real; + 'DEFAULT/publish_errors': value => $publish_errors; + 'DEFAULT/fatal_deprecations': value => $fatal_deprecations; + 'DEFAULT/instance_format': value => $instance_format; + 'DEFAULT/instance_uuid_format': value => $instance_uuid_format; + 'DEFAULT/log_date_format': value => $log_date_format; + } } diff --git a/manila/manifests/params.pp b/manila/manifests/params.pp index d92d83075..ef3c92b8f 100644 --- a/manila/manifests/params.pp +++ b/manila/manifests/params.pp @@ -22,6 +22,7 @@ $gluster_client_package_name = 'glusterfs-client' $gluster_package_name = 'glusterfs-common' $sqlite_package_name = 'python-pysqlite2' + $pymysql_package_name = 'python-pymysql' } elsif($::osfamily == 'RedHat') { @@ -41,6 +42,7 @@ $gluster_client_package_name = 'glusterfs-fuse' $gluster_package_name = 'glusterfs' $sqlite_package_name = undef + $pymysql_package_name = undef if $::operatingsystem == 'RedHat' and (versioncmp($::operatingsystemmajrelease, '7') >= 0) { $iscsi_helper = 'lioadm' diff --git a/manila/manifests/qpid.pp b/manila/manifests/qpid.pp index c3da33d80..6bc7bd461 100644 --- a/manila/manifests/qpid.pp +++ b/manila/manifests/qpid.pp @@ -1,57 +1,35 @@ # == Class: manila::qpid # -# Class for installing qpid server for manila +# Deprecated class for installing qpid server for manila # # === Parameters # # [*enabled*] # (Optional) Whether to enable the service -# Defaults to true. +# Defaults to undef. # # [*user*] # (Optional) The user to create in qpid -# Defaults to 'guest'. +# Defaults to undef. # # [*password*] # (Optional) The password to create for the user -# Defaults to 'guest'. +# Defaults to undef. # # [*file*] # (Optional) Sasl file for the user -# Defaults to '/var/lib/qpidd/qpidd.sasldb'. +# Defaults to undef. # # [*realm*] # (Optional) Realm for the user -# Defaults to 'OPENSTACK'. -# +# Defaults to undef. # class manila::qpid ( - $enabled = true, - $user = 'guest', - $password = 'guest', - $file = '/var/lib/qpidd/qpidd.sasldb', - $realm = 'OPENSTACK' + $enabled = undef, + $user = undef, + $password = undef, + $file = undef, + $realm = undef ) { - - # only configure manila after the queue is up - Class['qpid::server'] -> Package<| title == 'manila' |> - - if ($enabled) { - $service_ensure = 'running' - - qpid_user { $user: - password => $password, - file => $file, - realm => $realm, - provider => 'saslpasswd2', - require => Class['qpid::server'], - } - - } else { - $service_ensure = 'stopped' - } - - class { '::qpid::server': - service_ensure => $service_ensure, - } + warning('Qpid driver is removed from Oslo.messaging in the Mitaka release') } diff --git a/manila/metadata.json b/manila/metadata.json index 48a597472..38b340dda 100644 --- a/manila/metadata.json +++ b/manila/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-manila", - "version": "6.1.0", + "version": "7.0.0", "author": "NetApp and OpenStack Contributors", "summary": "Puppet module for OpenStack Manila", "license": "Apache-2.0", @@ -33,10 +33,10 @@ "dependencies": [ { "name": "dprince/qpid", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, - { "name": "openstack/glance", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=7.0.0 <8.0.0" }, + { "name": "openstack/glance", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/rabbitmq", "version_requirement": ">=2.0.2 <6.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=7.0.0 <8.0.0" } ] } diff --git a/manila/spec/acceptance/basic_manila_spec.rb b/manila/spec/acceptance/basic_manila_spec.rb index 905746dc1..7fb6b3fde 100644 --- a/manila/spec/acceptance/basic_manila_spec.rb +++ b/manila/spec/acceptance/basic_manila_spec.rb @@ -6,59 +6,11 @@ it 'should work with no errors' do pp= <<-EOS - Exec { logoutput => 'on_failure' } - - # Common resources - case $::osfamily { - 'Debian': { - include ::apt - class { '::openstack_extras::repo::debian::ubuntu': - release => 'liberty', - repo => 'proposed', - package_require => true, - } - $package_provider = 'apt' - } - 'RedHat': { - class { '::openstack_extras::repo::redhat::redhat': - manage_rdo => false, - repo_hash => { - 'openstack-common-testing' => { - 'baseurl' => 'http://cbs.centos.org/repos/cloud7-openstack-common-testing/x86_64/os/', - 'descr' => 'openstack-common-testing', - 'gpgcheck' => 'no', - }, - 'openstack-liberty-testing' => { - 'baseurl' => 'http://cbs.centos.org/repos/cloud7-openstack-liberty-testing/x86_64/os/', - 'descr' => 'openstack-liberty-testing', - 'gpgcheck' => 'no', - }, - 'openstack-liberty-trunk' => { - 'baseurl' => 'http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/', - 'descr' => 'openstack-liberty-trunk', - 'gpgcheck' => 'no', - }, - }, - } - package { 'openstack-selinux': ensure => 'latest' } - $package_provider = 'yum' - } - default: { - fail("Unsupported osfamily (${::osfamily})") - } - } - - class { '::mysql::server': } - - class { '::rabbitmq': - delete_guest_user => true, - package_provider => $package_provider, - } - - rabbitmq_vhost { '/': - provider => 'rabbitmqctl', - require => Class['rabbitmq'], - } + include ::openstack_integration + include ::openstack_integration::repos + include ::openstack_integration::rabbitmq + include ::openstack_integration::mysql + include ::openstack_integration::keystone rabbitmq_user { 'manila': admin => true, @@ -75,29 +27,9 @@ class { '::rabbitmq': require => Class['rabbitmq'], } - # Keystone resources, needed by Manila to run - class { '::keystone::db::mysql': - password => 'keystone', - } - class { '::keystone': - verbose => true, - debug => true, - database_connection => 'mysql://keystone:keystone@127.0.0.1/keystone', - admin_token => 'admin_token', - enabled => true, - } - class { '::keystone::roles::admin': - email => 'test@example.tld', - password => 'a_big_secret', - } - class { '::keystone::endpoint': - public_url => "https://${::fqdn}:5000/", - admin_url => "https://${::fqdn}:35357/", - } - # Manila resources class { '::manila': - sql_connection => 'mysql://manila:a_big_secret@127.0.0.1/manila?charset=utf8', + sql_connection => 'mysql+pymysql://manila:a_big_secret@127.0.0.1/manila?charset=utf8', rabbit_userid => 'manila', rabbit_password => 'an_even_bigger_secret', rabbit_host => '127.0.0.1', @@ -108,7 +40,8 @@ class { '::manila::db::mysql': password => 'a_big_secret', } class { '::manila::keystone::auth': - password => 'a_big_secret', + password => 'a_big_secret', + password_v2 => 'a_big_secret', } class { '::manila::client': } class { '::manila::compute::nova': } diff --git a/manila/spec/classes/manila_api_spec.rb b/manila/spec/classes/manila_api_spec.rb index 086ec4b0a..d0c19529f 100644 --- a/manila/spec/classes/manila_api_spec.rb +++ b/manila/spec/classes/manila_api_spec.rb @@ -6,7 +6,7 @@ {:keystone_password => 'foo'} end let :facts do - {:osfamily => 'Debian'} + @default_facts.merge({:osfamily => 'Debian'}) end describe 'with only required params' do diff --git a/manila/spec/classes/manila_backends_spec.rb b/manila/spec/classes/manila_backends_spec.rb index fc5cb4df8..0e5310f3a 100644 --- a/manila/spec/classes/manila_backends_spec.rb +++ b/manila/spec/classes/manila_backends_spec.rb @@ -52,7 +52,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'manila backends' @@ -60,7 +60,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'manila backends' diff --git a/manila/spec/classes/manila_client_spec.rb b/manila/spec/classes/manila_client_spec.rb index d8732704d..e5d0a223b 100644 --- a/manila/spec/classes/manila_client_spec.rb +++ b/manila/spec/classes/manila_client_spec.rb @@ -3,7 +3,7 @@ describe 'manila::client' do it { is_expected.to contain_package('python-manilaclient').with_ensure('present') } let :facts do - {:osfamily => 'Debian'} + @default_facts.merge({:osfamily => 'Debian'}) end context 'with params' do let :params do diff --git a/manila/spec/classes/manila_db_mysql_spec.rb b/manila/spec/classes/manila_db_mysql_spec.rb index 5ed2ddba8..c4eeb43be 100644 --- a/manila/spec/classes/manila_db_mysql_spec.rb +++ b/manila/spec/classes/manila_db_mysql_spec.rb @@ -8,7 +8,7 @@ end let :facts do - {:osfamily => 'Debian'} + @default_facts.merge({:osfamily => 'Debian'}) end let :pre_condition do diff --git a/manila/spec/classes/manila_db_postgresql_spec.rb b/manila/spec/classes/manila_db_postgresql_spec.rb index 0c8f6a8aa..5bdb3091a 100644 --- a/manila/spec/classes/manila_db_postgresql_spec.rb +++ b/manila/spec/classes/manila_db_postgresql_spec.rb @@ -12,11 +12,11 @@ context 'on a RedHat osfamily' do let :facts do - { + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.0', :concat_basedir => '/var/lib/puppet/concat' - } + }) end context 'with only required parameters' do @@ -34,12 +34,12 @@ context 'on a Debian osfamily' do let :facts do - { + @default_facts.merge({ :operatingsystemrelease => '7.8', :operatingsystem => 'Debian', :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' - } + }) end context 'with only required parameters' do diff --git a/manila/spec/classes/manila_db_spec.rb b/manila/spec/classes/manila_db_spec.rb index 7f75fac1c..d9a1bad8f 100644 --- a/manila/spec/classes/manila_db_spec.rb +++ b/manila/spec/classes/manila_db_spec.rb @@ -7,18 +7,18 @@ context 'with default parameters' do it { is_expected.to contain_manila_config('database/connection').with_value('sqlite:////var/lib/manila/manila.sqlite').with_secret(true) } - it { is_expected.to contain_manila_config('database/idle_timeout').with_value('3600') } - it { is_expected.to contain_manila_config('database/min_pool_size').with_value('1') } - it { is_expected.to contain_manila_config('database/max_pool_size').with_value('10') } - it { is_expected.to contain_manila_config('database/max_overflow').with_value('20') } - it { is_expected.to contain_manila_config('database/max_retries').with_value('10') } - it { is_expected.to contain_manila_config('database/retry_interval').with_value('10') } + it { is_expected.to contain_manila_config('database/idle_timeout').with_value('') } + it { is_expected.to contain_manila_config('database/min_pool_size').with_value('') } + it { is_expected.to contain_manila_config('database/max_pool_size').with_value('') } + it { is_expected.to contain_manila_config('database/max_overflow').with_value('') } + it { is_expected.to contain_manila_config('database/max_retries').with_value('') } + it { is_expected.to contain_manila_config('database/retry_interval').with_value('') } end context 'with specific parameters' do let :params do - { :database_connection => 'mysql://manila:manila@localhost/manila', + { :database_connection => 'mysql+pymysql://manila:manila@localhost/manila', :database_idle_timeout => '3601', :database_min_pool_size => '2', :database_max_pool_size => '21', @@ -27,7 +27,7 @@ :database_retry_interval => '11', } end - it { is_expected.to contain_manila_config('database/connection').with_value('mysql://manila:manila@localhost/manila').with_secret(true) } + it { is_expected.to contain_manila_config('database/connection').with_value('mysql+pymysql://manila:manila@localhost/manila').with_secret(true) } it { is_expected.to contain_manila_config('database/idle_timeout').with_value('3601') } it { is_expected.to contain_manila_config('database/min_pool_size').with_value('2') } it { is_expected.to contain_manila_config('database/max_retries').with_value('11') } @@ -37,6 +37,14 @@ end + context 'with MySQL-python library as backend package' do + let :params do + { :database_connection => 'mysql://manila:manila@localhost/manila' } + end + + it { is_expected.to contain_manila_config('database/connection').with_value('mysql://manila:manila@localhost/manila').with_secret(true) } + end + context 'with postgresql backend' do let :params do { :database_connection => 'postgresql://manila:manila@localhost/manila', } @@ -56,27 +64,51 @@ it_raises 'a Puppet::Error', /validate_re/ end + context 'with incorrect database_connection string' do + let :params do + { :database_connection => 'foo+pymysql://manila:manila@localhost/manila', } + end + + it_raises 'a Puppet::Error', /validate_re/ + end + end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => 'jessie', - } + }) end it_configures 'manila::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://manila:manila@localhost/manila' } + end + + it { is_expected.to contain_package('manila-backend-package').with({ :ensure => 'present', :name => 'python-pymysql' }) } + end end context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.1', - } + }) end it_configures 'manila::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://manila:manila@localhost/manila' } + end + + it { is_expected.not_to contain_package('manila-backend-package') } + end end end diff --git a/manila/spec/classes/manila_db_sync_spec.rb b/manila/spec/classes/manila_db_sync_spec.rb index bbdb6fffe..36790f816 100644 --- a/manila/spec/classes/manila_db_sync_spec.rb +++ b/manila/spec/classes/manila_db_sync_spec.rb @@ -3,7 +3,7 @@ describe 'manila::db::sync' do let :facts do - {:osfamily => 'Debian'} + @default_facts.merge({:osfamily => 'Debian'}) end it { is_expected.to contain_exec('manila-manage db_sync').with( :command => 'manila-manage db sync', diff --git a/manila/spec/classes/manila_ganesha_spec.rb b/manila/spec/classes/manila_ganesha_spec.rb index 478688fd3..6d5c29448 100644 --- a/manila/spec/classes/manila_ganesha_spec.rb +++ b/manila/spec/classes/manila_ganesha_spec.rb @@ -27,7 +27,7 @@ context 'on Red Hat platforms' do let :facts do - {:osfamily => 'RedHat'} + @default_facts.merge({:osfamily => 'RedHat'}) end it_configures 'manila NFS Ganesha options for share drivers' end diff --git a/manila/spec/classes/manila_keystone_auth_spec.rb b/manila/spec/classes/manila_keystone_auth_spec.rb index 7047b5d1d..27acfb6db 100644 --- a/manila/spec/classes/manila_keystone_auth_spec.rb +++ b/manila/spec/classes/manila_keystone_auth_spec.rb @@ -3,7 +3,8 @@ describe 'manila::keystone::auth' do let :params do - {:password => 'pw'} + {:password => 'pw', + :password_v2 => 'pw2'} end describe 'with only required params' do @@ -25,6 +26,21 @@ :description => 'Manila Service' ) + is_expected.to contain_keystone_user('manilav2').with( + :ensure => 'present', + :password => 'pw2', + :email => 'manilav2@localhost', + ) + is_expected.to contain_keystone_user_role('manilav2@services').with( + :ensure => 'present', + :roles => ['admin'] + ) + is_expected.to contain_keystone_service('manilav2').with( + :ensure => 'present', + :type => 'sharev2', + :description => 'Manila Service v2' + ) + end it { is_expected.to contain_keystone_endpoint('RegionOne/manila').with( :ensure => 'present', @@ -32,16 +48,25 @@ :admin_url => 'http://127.0.0.1:8786/v1/%(tenant_id)s', :internal_url => 'http://127.0.0.1:8786/v1/%(tenant_id)s' ) } + it { is_expected.to contain_keystone_endpoint('RegionOne/manilav2').with( + :ensure => 'present', + :public_url => 'http://127.0.0.1:8786/v2/%(tenant_id)s', + :admin_url => 'http://127.0.0.1:8786/v2/%(tenant_id)s', + :internal_url => 'http://127.0.0.1:8786/v2/%(tenant_id)s' + ) } end context 'when overriding endpoint parameters' do before do params.merge!( - :region => 'RegionThree', - :public_url => 'https://10.0.42.1:4242/v42/%(tenant_id)s', - :admin_url => 'https://10.0.42.2:4242/v42/%(tenant_id)s', - :internal_url => 'https://10.0.42.3:4242/v42/%(tenant_id)s' + :region => 'RegionThree', + :public_url => 'https://10.0.42.1:4242/v42/%(tenant_id)s', + :admin_url => 'https://10.0.42.2:4242/v42/%(tenant_id)s', + :internal_url => 'https://10.0.42.3:4242/v42/%(tenant_id)s', + :public_url_v2 => 'https://10.0.42.1:4242/v43/%(tenant_id)s', + :admin_url_v2 => 'https://10.0.42.2:4242/v43/%(tenant_id)s', + :internal_url_v2 => 'https://10.0.42.3:4242/v43/%(tenant_id)s' ) end @@ -51,6 +76,12 @@ :admin_url => 'https://10.0.42.2:4242/v42/%(tenant_id)s', :internal_url => 'https://10.0.42.3:4242/v42/%(tenant_id)s' )} + it { is_expected.to contain_keystone_endpoint('RegionThree/manilav2').with( + :ensure => 'present', + :public_url => 'https://10.0.42.1:4242/v43/%(tenant_id)s', + :admin_url => 'https://10.0.42.2:4242/v43/%(tenant_id)s', + :internal_url => 'https://10.0.42.3:4242/v43/%(tenant_id)s' + )} end context 'when deprecated endpoint parameters' do @@ -79,9 +110,11 @@ describe 'when endpoint should not be configured' do before do params.merge!( - :configure_endpoint => false + :configure_endpoint => false, + :configure_endpoint_v2 => false ) end it { is_expected.to_not contain_keystone_endpoint('RegionOne/manila') } + it { is_expected.to_not contain_keystone_endpoint('RegionOne/manilav2') } end end diff --git a/manila/spec/classes/manila_logging_spec.rb b/manila/spec/classes/manila_logging_spec.rb index 0abc76644..f26b968cc 100644 --- a/manila/spec/classes/manila_logging_spec.rb +++ b/manila/spec/classes/manila_logging_spec.rb @@ -57,11 +57,11 @@ shared_examples 'basic default logging settings' do it 'configures manila logging settins with default values' do - is_expected.to contain_manila_config('DEFAULT/use_syslog').with(:value => 'false') - is_expected.to contain_manila_config('DEFAULT/use_stderr').with(:value => 'true') + is_expected.to contain_manila_config('DEFAULT/use_syslog').with(:value => '') + is_expected.to contain_manila_config('DEFAULT/use_stderr').with(:value => '') is_expected.to contain_manila_config('DEFAULT/log_dir').with(:value => '/var/log/manila') - is_expected.to contain_manila_config('DEFAULT/verbose').with(:value => 'false') - is_expected.to contain_manila_config('DEFAULT/debug').with(:value => 'false') + is_expected.to contain_manila_config('DEFAULT/verbose').with(:value => '') + is_expected.to contain_manila_config('DEFAULT/debug').with(:value => '') end end @@ -120,13 +120,13 @@ :default_log_levels, :fatal_deprecations, :instance_format, :instance_uuid_format, :log_date_format, ].each { |param| - it { is_expected.to contain_manila_config("DEFAULT/#{param}").with_ensure('absent') } + it { is_expected.to contain_manila_config("DEFAULT/#{param}").with_value('') } } end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'manila-logging' @@ -134,7 +134,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'manila-logging' diff --git a/manila/spec/classes/manila_params_spec.rb b/manila/spec/classes/manila_params_spec.rb index 32a73c624..28cc30d3d 100644 --- a/manila/spec/classes/manila_params_spec.rb +++ b/manila/spec/classes/manila_params_spec.rb @@ -3,7 +3,7 @@ describe 'manila::params' do let :facts do - {:osfamily => 'Debian'} + @default_facts.merge({:osfamily => 'Debian'}) end it 'should compile' do subject diff --git a/manila/spec/classes/manila_qpid_spec.rb b/manila/spec/classes/manila_qpid_spec.rb deleted file mode 100644 index f3fec931e..000000000 --- a/manila/spec/classes/manila_qpid_spec.rb +++ /dev/null @@ -1,51 +0,0 @@ -require 'spec_helper' - -describe 'manila::qpid' do - - let :facts do - {:puppetversion => '2.7', - :osfamily => 'RedHat'} - end - - describe 'with defaults' do - - it 'should contain all of the default resources' do - - is_expected.to contain_class('qpid::server').with( - :service_ensure => 'running', - :port => '5672' - ) - - end - - it 'should contain user' do - - is_expected.to contain_qpid_user('guest').with( - :password => 'guest', - :file => '/var/lib/qpidd/qpidd.sasldb', - :realm => 'OPENSTACK', - :provider => 'saslpasswd2' - ) - - end - - end - - describe 'when disabled' do - let :params do - { - :enabled => false - } - end - - it 'should be disabled' do - - is_expected.to_not contain_qpid_user('guest') - is_expected.to contain_class('qpid::server').with( - :service_ensure => 'stopped' - ) - - end - end - -end diff --git a/manila/spec/classes/manila_rabbitmq_spec.rb b/manila/spec/classes/manila_rabbitmq_spec.rb index d9a47d3f5..c96b5f08e 100644 --- a/manila/spec/classes/manila_rabbitmq_spec.rb +++ b/manila/spec/classes/manila_rabbitmq_spec.rb @@ -3,9 +3,9 @@ describe 'manila::rabbitmq' do let :facts do - { :puppetversion => '2.7', + @default_facts.merge({ :puppetversion => '2.7', :osfamily => 'Debian', - } + }) end describe 'with defaults' do diff --git a/manila/spec/classes/manila_scheduler_spec.rb b/manila/spec/classes/manila_scheduler_spec.rb index 9d3080be3..d5cb523d6 100644 --- a/manila/spec/classes/manila_scheduler_spec.rb +++ b/manila/spec/classes/manila_scheduler_spec.rb @@ -5,7 +5,7 @@ describe 'on debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end describe 'with default parameters' do @@ -56,7 +56,7 @@ describe 'on rhel platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end describe 'with default parameters' do diff --git a/manila/spec/classes/manila_share_glusterfs_spec.rb b/manila/spec/classes/manila_share_glusterfs_spec.rb index e63221ffe..70d194d21 100644 --- a/manila/spec/classes/manila_share_glusterfs_spec.rb +++ b/manila/spec/classes/manila_share_glusterfs_spec.rb @@ -23,7 +23,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'glusterfs share driver' @@ -31,7 +31,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'glusterfs share driver' diff --git a/manila/spec/classes/manila_share_spec.rb b/manila/spec/classes/manila_share_spec.rb index c25f52ec1..fea08001b 100644 --- a/manila/spec/classes/manila_share_spec.rb +++ b/manila/spec/classes/manila_share_spec.rb @@ -29,7 +29,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -41,7 +41,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/manila/spec/classes/manila_spec.rb b/manila/spec/classes/manila_spec.rb index 19e6b9c76..efc10b113 100644 --- a/manila/spec/classes/manila_spec.rb +++ b/manila/spec/classes/manila_spec.rb @@ -1,11 +1,11 @@ require 'spec_helper' describe 'manila' do let :req_params do - {:rabbit_password => 'guest', :sql_connection => 'mysql://user:password@host/database'} + {:rabbit_password => 'guest', :sql_connection => 'mysql+pymysql://user:password@host/database'} end let :facts do - {:osfamily => 'Debian'} + @default_facts.merge({:osfamily => 'Debian'}) end describe 'with only required params' do @@ -49,10 +49,10 @@ :value => 'guest' ) is_expected.to contain_manila_config('DEFAULT/verbose').with( - :value => false + :value => '' ) is_expected.to contain_manila_config('DEFAULT/debug').with( - :value => false + :value => '' ) is_expected.to contain_manila_config('DEFAULT/storage_availability_zone').with( :value => 'nova' @@ -121,69 +121,6 @@ end end - describe 'with qpid rpc supplied' do - - let :params do - { - :sql_connection => 'mysql://user:password@host/database', - :qpid_password => 'guest', - :rpc_backend => 'qpid' - } - end - - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost') } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_port').with_value('5672') } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_username').with_value('guest') } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_password').with_value('guest').with_secret(true) } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_reconnect').with_value(true) } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_reconnect_timeout').with_value('0') } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_reconnect_limit').with_value('0') } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_reconnect_interval_min').with_value('0') } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_reconnect_interval_max').with_value('0') } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_reconnect_interval').with_value('0') } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_heartbeat').with_value('60') } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') } - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_tcp_nodelay').with_value(true) } - end - - describe 'with qpid rpc and no qpid_sasl_mechanisms' do - let :params do - { - :sql_connection => 'mysql://user:password@host/database', - :qpid_password => 'guest', - :rpc_backend => 'qpid' - } - end - - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_sasl_mechanisms').with_ensure('absent') } - end - - describe 'with qpid rpc and qpid_sasl_mechanisms string' do - let :params do - { - :sql_connection => 'mysql://user:password@host/database', - :qpid_password => 'guest', - :qpid_sasl_mechanisms => 'PLAIN', - :rpc_backend => 'qpid' - } - end - - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_sasl_mechanisms').with_value('PLAIN') } - end - - describe 'with qpid rpc and qpid_sasl_mechanisms array' do - let :params do - { - :sql_connection => 'mysql://user:password@host/database', - :qpid_password => 'guest', - :qpid_sasl_mechanisms => [ 'DIGEST-MD5', 'GSSAPI', 'PLAIN' ], - :rpc_backend => 'qpid' - } - end - - it { is_expected.to contain_manila_config('oslo_messaging_qpid/qpid_sasl_mechanisms').with_value('DIGEST-MD5 GSSAPI PLAIN') } - end - describe 'with SSL enabled' do let :params do req_params.merge!({ @@ -317,7 +254,7 @@ let :params do { - :sql_connection => 'mysql://user:password@host/database', + :sql_connection => 'mysql+pymysql://user:password@host/database', :rpc_backend => 'zmq', } end diff --git a/manila/spec/defines/manila_backend_glusterfs_spec.rb b/manila/spec/defines/manila_backend_glusterfs_spec.rb index 447c300ea..10d1f8bf5 100644 --- a/manila/spec/defines/manila_backend_glusterfs_spec.rb +++ b/manila/spec/defines/manila_backend_glusterfs_spec.rb @@ -26,7 +26,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'glusterfs share driver' @@ -34,7 +34,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'glusterfs share driver' diff --git a/manila/spec/defines/manila_backend_glusternative_spec.rb b/manila/spec/defines/manila_backend_glusternative_spec.rb index 0ad65ab05..982776340 100644 --- a/manila/spec/defines/manila_backend_glusternative_spec.rb +++ b/manila/spec/defines/manila_backend_glusternative_spec.rb @@ -28,7 +28,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'glusternative volume driver' @@ -36,7 +36,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'glusternative volume driver' diff --git a/manila/spec/defines/manila_backend_glusternfs_spec.rb b/manila/spec/defines/manila_backend_glusternfs_spec.rb index 57a5ae594..8681e4eb3 100644 --- a/manila/spec/defines/manila_backend_glusternfs_spec.rb +++ b/manila/spec/defines/manila_backend_glusternfs_spec.rb @@ -30,7 +30,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'glusternfs volume driver' @@ -38,7 +38,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'glusternfs volume driver' diff --git a/manila/spec/spec_helper.rb b/manila/spec/spec_helper.rb index 3df4cede1..9bc7bcf96 100644 --- a/manila/spec/spec_helper.rb +++ b/manila/spec/spec_helper.rb @@ -5,6 +5,9 @@ RSpec.configure do |c| c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' + c.before :each do + @default_facts = { :os_service_default => '' } + end end at_exit { RSpec::Puppet::Coverage.report! } diff --git a/midonet/.gitreview b/midonet/.gitreview index 0aaf6104e..e3a441674 100644 --- a/midonet/.gitreview +++ b/midonet/.gitreview @@ -2,4 +2,3 @@ host=review.gerrithub.io port=29418 project=midonet/puppet-midonet.git -defaultbranch=stable/v2015.06 diff --git a/midonet/README.md b/midonet/README.md index 8a20d8ffe..7b47843e2 100644 --- a/midonet/README.md +++ b/midonet/README.md @@ -22,8 +22,8 @@ Puppet module for install MidoNet components. MidoNet is an Apache licensed production grade network virtualization software for Infrastructure-as-a-Service (IaaS) clouds. This module provides the puppet -manifests to install all the components to deploy easily MidoNet in a production -environment. +manifests to install all the components to deploy easily MidoNet in a +production environment. To know all the components and how they relate each other, check out [midonet reference architecture @@ -52,11 +52,11 @@ be useful. ## Usage To use this module in a more advanced way, please check out the -[reference](#reference) section of this document. It is worth to highlight that all -the input variables have already a default input value, in a yaml document. +[reference](#reference) section of this document. It is worth to highlight that +all the input variables have already a default input value, in a yaml document. (We use R.I.Piennar [module data](https://www.devco.net/archives/2013/12/08/better-puppet-modules-using-hiera-data.php)) -To leverage this feature, please add the following in your `/etc/hiera.yaml` (or -the Hiera configuration file that you are using): +To leverage this feature, please add the following in your `/etc/puppet/hiera.yaml` +(or the Hiera configuration file that you are using): --- :backends: diff --git a/midonet/metadata.json b/midonet/metadata.json index 2da22a95c..6701551a5 100644 --- a/midonet/metadata.json +++ b/midonet/metadata.json @@ -1,6 +1,6 @@ { "name": "midonet-midonet", - "version": "2015.6.7", + "version": "2015.6.0", "author": "MidoNet", "summary": "Configure and install MidoNet components", "license": "Apache-2.0", @@ -14,7 +14,8 @@ { "name":"midonet-cassandra","version_requirement":">=1.0.0" }, { "name":"puppetlabs-apt","version_requirement":">=1.7.0 <2.0.0" }, { "name":"puppetlabs-java","version_requirement":">=1.3.0" }, - { "name":"puppetlabs-tomcat","version_requirement":">=1.2.0" } + { "name":"puppetlabs-tomcat","version_requirement":">=1.2.0" }, + { "name":"puppetlabs-stdlib","version_requirement":">= 4.2.0 < 5.0.0" } ], "tags": ["openstack", "sdn", "midonet"], "operatingsystem_support": [ diff --git a/midonet/test/init.pp b/midonet/test/init.pp deleted file mode 100644 index 3a01941c0..000000000 --- a/midonet/test/init.pp +++ /dev/null @@ -1,19 +0,0 @@ -# The baseline for module testing used by Puppet Labs is that each manifest -# should have a corresponding test manifest that declares that class or defined -# type. -# -# Tests are then run by using puppet apply --noop (to check for compilation -# errors and view a log of events) or by fully applying the test in a virtual -# environment (to compare the resulting system state to the desired state). -# -# Learn more about module testing here: -# http://docs.puppetlabs.com/guides/tests_smoke.html -# - -# Fake the facter when it compiles. The augeas version that will be installed -# will be this one -if empty($::augeasversion) { - $augeasversion = '1.0.0' -} - -class {'midonet':} diff --git a/midonet/test/integration/default/bats/verify_server.bats b/midonet/test/integration/default/bats/verify_server.bats deleted file mode 100644 index adefa79ec..000000000 --- a/midonet/test/integration/default/bats/verify_server.bats +++ /dev/null @@ -1,160 +0,0 @@ -# Test verify methods for midonet_repository - -command_exists() { - command -v "$@" > /dev/null 2>&1 -} - -# Code copied unashamedly from http://get.docker.io -get_distro() { - lsb_dist='' - if command_exists lsb_release; then - lsb_dist="$(lsb_release -si)" - fi - if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then - lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" - fi - if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then - lsb_dist='debian' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then - lsb_dist='red-hat' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then - lsb_dist="$(. /etc/os-release && echo "$ID")" - fi - - distro=$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]') -} - -get_distro - -@test 'midonet repo is set' { - - case $distro in - ubuntu) - run file /etc/apt/sources.list.d/midonet.list - [ "$status" -eq 0 ] - run file /etc/apt/sources.list.d/midonet-openstack-integration.list - [ "$status" -eq 0 ] - ;; - centos|red-hat) - run ls /etc/yum.repos.d/midonet.repo - [ "$status" -eq 0 ] - run ls /etc/yum.repos.d/midonet-openstack-integration.repo - [ "$status" -eq 0 ] - ;; - *) - exit 1; - esac -} - -@test 'midonet packages are available' { - case $distro in - ubuntu) - run bash -c "apt-cache search mido | grep midolman" - [ "$status" -eq 0 ] - run bash -c "apt-cache search mido | grep midonet-api" - [ "$status" -eq 0 ] - run bash -c "apt-cache search mido | grep python-midonetclient" - [ "$status" -eq 0 ] - run bash -c "apt-cache search mido | grep python-neutron-plugin-midonet" - [ "$status" -eq 0 ] - run bash -c "apt-cache search dsc20" - [ "$status" -eq 0 ] - ;; - centos|red-hat) - run bash -c "yum search mido | grep midolman" - [ "$status" -eq 0 ] - run bash -c "yum search mido | grep midonet-api" - [ "$status" -eq 0 ] - run bash -c "yum search mido | grep python-midonetclient" - [ "$status" -eq 0 ] - run bash -c "yum search mido | grep python-neutron-plugin-midonet" - [ "$status" -eq 0 ] - run bash -c "yum search dsc20-2.0.10-1" - [ "$status" -eq 0 ] - ;; - *) - exit 1; - esac -} - -@test 'zookeeper is running' { - case $distro in - ubuntu) - run bash -c "sudo /usr/share/zookeeper/bin/zkServer.sh status || sudo /usr/sbin/zkServer.sh status" - [ "$status" -eq 0 ] - ;; - centos|red-hat) - run sudo /usr/sbin/zkServer.sh status - [ "$status" -eq 0 ] - ;; - *) - exit 1; - esac -} - -@test 'cassandra is running' { - case $distro in - ubuntu) - run sudo service cassandra status - [ "$status" -eq 0 ] - ;; - centos|red-hat) - run sudo service cassandra status - [ "$status" -eq 0 ] - ;; - *) - exit 1; - esac -} - -@test 'midonet-agent is running' { - case $distro in - ubuntu) - run sudo service midolman status - [ "$status" -eq 0 ] - ;; - centos|red-hat) - run sudo service midolman status - [ "$status" -eq 0 ] - ;; - *) - exit 1; - esac -} - -@test 'midonet-api is running' { - case $distro in - ubuntu) - run sudo service tomcat7 status - [ "$status" -eq 0 ] - ;; - centos|red-hat) - run sudo service tomcat status - [ "$status" -eq 0 ] - ;; - *) - exit 1; - esac -} - -@test 'midonet-cli is installed' { - case $distro in - ubuntu) - run bash -c "dpkg -l | grep python-midonetclient" - [ "$status" -eq 0 ] - ;; - centos|red-hat) - run bash -c "rpm -qa | grep python-midonetclient" - [ "$status" -eq 0 ] - ;; - *) - exit 1; - esac -} - -@test 'midonetrc file is created' { - run bash -c "file /root/.midonetrc" - [ "$status" -eq 0 ] -} diff --git a/neutron/CHANGELOG.md b/neutron/CHANGELOG.md index 600bae1c2..1f9add36f 100644 --- a/neutron/CHANGELOG.md +++ b/neutron/CHANGELOG.md @@ -1,3 +1,74 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes +- change section name for AMQP qpid parameters +- remove deprcated mysql options +- delete namespaces by default +- filename change for config-dir for ml2 cisco nexus mech driver +- unset deprecated parameter use_namespaces by default +- drop neutron_plugin_ovs type/provider + +####Features +- add driver option for metering agent +- split SR-IOV configuration file into driver and agent pieces +- disable third-party software management +- jumbo frames between instances +- remove POSIX users, groups, and file modes +- add support for RabbitMQ connection heartbeat +- keystone/auth: make service description configurable +- add tag to package and service resources +- allow neutron-dhcp-agent broadcast reply +- add neutron::db::sync and neutron::db +- add an ability to manage use_stderr parameter +- reflect provider change in puppet-openstacklib +- make vswitch optional for ovs agent configuration +- allow greater flexibility in the kind of parameter passed to ml2.pp +- deprecate nova_admin_* options +- drop nova_admin_tenant_id_setter +- db: Use postgresql lib class for psycopg package +- allow customization of db sync command line +- support for Nuage Neutron plugin +- suport for Big Switch ML2 plugin +- add an ability to set up drop_flows_on_start option +- use json output instead plain-text +- use json instead of regexp to parse subnet data +- simplify rpc_backend parameter +- add support for prevent_arp_spoofing option +- add support for rpc_response_timeout option +- remove use of template based config from cisco ml2 nexus plugin +- update Cisco ML2 configuration +- make package change run the db_sync +- accommodate for VPNaaS package name change in RDO + +####Bugfixes +- rely on autorequire for config resource ordering +- make sure Neutron_config is applied before Neutron_network +- make sure Nova_admin_tenant_id_setter is executed after Keystone_endpoint +- add workaround for v2 tenant names +- swap Facter[].value with Facter.value() +- dbsync should only notify services that use the db +- map allocation_pool property to set +- restart Neutron agents if packages are updated +- remove unecessary constraint between Package and Config resource +- fix 'shared' parameter check in neutron_network provider + +####Maintenance +- acceptance: enable debug & verbosity for OpenStack logs +- remove unused variable in params.pp +- initial msync run for all Puppet OpenStack modules +- lint: Add and fix missing parameter documentation +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check +- add test coverage for neutron::db::mysql +- fix rspec 3.x syntax +- acceptance: use common bits from puppet-openstack-integration +- fix unit tests against Puppet 4.3.0 +- acceptance: enable loadbalancer service_plugin +- acceptance: test OVS bridge listing + ##2015-10-10 - 6.1.0 ### Summary diff --git a/neutron/README.md b/neutron/README.md index 85cc5f0d8..cb80c9ee6 100644 --- a/neutron/README.md +++ b/neutron/README.md @@ -1,7 +1,7 @@ neutron =================================== -6.1.0 - 2015.1 - Kilo +7.0.0 - 2015.2.0 - Liberty #### Table of Contents @@ -23,7 +23,7 @@ Module Description The neutron module is an attempt to make Puppet capable of managing the entirety of neutron. This includes manifests to provision such things as keystone endpoints, RPC configurations specific to neutron, database connections, and network driver plugins. Types are shipped as part of the neutron module to assist in manipulation of the Openstack configuration files. -This module is tested in combination with other modules needed to build and leverage an entire Openstack installation. These modules can be found, all pulled together in the [openstack module](https://github.com/stackforge/puppet-openstack). +This module is tested in combination with other modules needed to build and leverage an entire Openstack installation. Setup ----- @@ -38,7 +38,7 @@ Setup ### Beginning with neutron -To utilize the neutron module's functionality you will need to declare multiple resources. The following is a modified excerpt from the [openstack module](httpd://github.com/stackforge/puppet-openstack). It provides an example of setting up an Open vSwitch neutron installation. This is not an exhaustive list of all the components needed. We recommend that you consult and understand the [openstack module](https://github.com/stackforge/puppet-openstack) and the [core openstack](http://docs.openstack.org) documentation to assist you in understanding the available deployment options. +To utilize the neutron module's functionality you will need to declare multiple resources. The following example displays the setting up of an Open vSwitch neutron installation. This is not an exhaustive list of all the components needed. We recommend that you consult and understand the [core openstack](http://docs.openstack.org) documentation to assist you in understanding the available deployment options. ```puppet # enable the neutron service diff --git a/neutron/examples/neutron.pp b/neutron/examples/neutron.pp index f9648c051..7e5afc7c2 100644 --- a/neutron/examples/neutron.pp +++ b/neutron/examples/neutron.pp @@ -13,8 +13,8 @@ # The API server talks to keystone for authorisation class { '::neutron::server': - keystone_password => 'password', - connection => 'mysql://neutron:password@192.168.1.1/neutron', + auth_password => 'password', + database_connection => 'mysql://neutron:password@192.168.1.1/neutron', } # Configure nova notifications system diff --git a/neutron/manifests/agents/dhcp.pp b/neutron/manifests/agents/dhcp.pp index 0c1049205..b359748e5 100644 --- a/neutron/manifests/agents/dhcp.pp +++ b/neutron/manifests/agents/dhcp.pp @@ -15,7 +15,7 @@ # Defaults to true # # [*debug*] -# (optional) Show debugging output in log. Defaults to false. +# (optional) Show debugging output in log. Defaults to $::os_service_default. # # [*state_path*] # (optional) Where to store dnsmasq state files. This directory must be @@ -31,7 +31,7 @@ # # [*dhcp_domain*] # (optional) domain to use for building the hostnames -# Defaults to 'openstacklocal' +# Defaults to $::os_service_default # # [*dhcp_driver*] # (optional) Defaults to 'neutron.agent.linux.dhcp.Dnsmasq'. @@ -43,7 +43,7 @@ # # [*dnsmasq_config_file*] # (optional) Override the default dnsmasq settings with this file. -# Defaults to undef +# Defaults to $::os_service_default # # [*dhcp_delete_namespaces*] # (optional) Delete namespace after removing a dhcp server @@ -62,7 +62,7 @@ # # [*dhcp_broadcast_reply*] # (optional) Use broadcast in DHCP replies -# Defaults to false. +# Defaults to $::os_service_default. # # === Deprecated Parameters # @@ -70,26 +70,26 @@ # (optional) Deprecated. 'True' value will be enforced in future releases. # Allow overlapping IP (Must have kernel build with # CONFIG_NET_NS=y and iproute2 package that supports namespaces). -# Defaults to undef. +# Defaults to $::os_service_default. # class neutron::agents::dhcp ( $package_ensure = present, $enabled = true, $manage_service = true, - $debug = false, + $debug = $::os_service_default, $state_path = '/var/lib/neutron', $resync_interval = 30, $interface_driver = 'neutron.agent.linux.interface.OVSInterfaceDriver', - $dhcp_domain = 'openstacklocal', + $dhcp_domain = $::os_service_default, $dhcp_driver = 'neutron.agent.linux.dhcp.Dnsmasq', $root_helper = 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf', - $dnsmasq_config_file = undef, + $dnsmasq_config_file = $::os_service_default, $dhcp_delete_namespaces = true, $enable_isolated_metadata = false, $enable_metadata_network = false, - $dhcp_broadcast_reply = false, + $dhcp_broadcast_reply = $::os_service_default, # DEPRECATED PARAMETERS - $use_namespaces = undef, + $use_namespaces = $::os_service_default, ) { include ::neutron::params @@ -132,25 +132,16 @@ 'DEFAULT/root_helper': value => $root_helper; 'DEFAULT/dhcp_delete_namespaces': value => $dhcp_delete_namespaces; 'DEFAULT/dhcp_broadcast_reply': value => $dhcp_broadcast_reply; + 'DEFAULT/dnsmasq_config_file': value => $dnsmasq_config_file; } - if $use_namespaces != undef { + if ! is_service_default ($use_namespaces) { warning('The use_namespaces parameter is deprecated and will be removed in future releases') neutron_dhcp_agent_config { 'DEFAULT/use_namespaces': value => $use_namespaces; } } - if $dnsmasq_config_file { - neutron_dhcp_agent_config { - 'DEFAULT/dnsmasq_config_file': value => $dnsmasq_config_file; - } - } else { - neutron_dhcp_agent_config { - 'DEFAULT/dnsmasq_config_file': ensure => absent; - } - } - if $::neutron::params::dhcp_agent_package { Package['neutron'] -> Package['neutron-dhcp-agent'] Package['neutron-dhcp-agent'] -> Neutron_config<||> diff --git a/neutron/manifests/agents/l3.pp b/neutron/manifests/agents/l3.pp index d8a2602a8..ab9ee0587 100644 --- a/neutron/manifests/agents/l3.pp +++ b/neutron/manifests/agents/l3.pp @@ -32,40 +32,40 @@ # # [*router_id*] # (optional) The ID of the external router in neutron -# Defaults to blank +# Defaults to $::os_service_default # # [*gateway_external_network_id*] # (optional) The ID of the external network in neutron -# Defaults to blank +# Defaults to $::os_service_default # # [*handle_internal_only_routers*] # (optional) L3 Agent will handle non-external routers -# Defaults to true +# Defaults to $::os_service_default # # [*metadata_port*] # (optional) The port of the metadata server -# Defaults to 9697 +# Defaults to $::os_service_default # # [*send_arp_for_ha*] # (optional) Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 # to disable this feature. -# Defaults to 3 +# Defaults to $::os_service_default # # [*periodic_interval*] # (optional) seconds between re-sync routers' data if needed -# Defaults to 40 +# Defaults to $::os_service_default # # [*periodic_fuzzy_delay*] # (optional) seconds to start to sync routers' data after starting agent -# Defaults to 5 +# Defaults to $::os_service_default # # [*enable_metadata_proxy*] # (optional) can be set to False if the Nova metadata server is not available -# Defaults to True +# Defaults to $::os_service_default # # [*network_device_mtu*] # (optional) The MTU size for the interfaces managed by the L3 agent -# Defaults to undef +# Defaults to $::os_service_default # Should be deprecated in the next major release in favor of a global parameter # # [*router_delete_namespaces*] @@ -82,7 +82,7 @@ # # [*ha_vrrp_auth_password*] # (optional) VRRP authentication password. Required if ha_enabled = true. -# Defaults to undef +# Defaults to $::os_service_default # # [*ha_vrrp_advert_int*] # (optional) The advertisement interval in seconds. @@ -104,7 +104,7 @@ # (optional) Deprecated. 'True' value will be enforced in future releases. # Allow overlapping IP (Must have kernel build with # CONFIG_NET_NS=y and iproute2 package that supports namespaces). -# Defaults to undef. +# Defaults to $::os_service_default. # class neutron::agents::l3 ( $package_ensure = 'present', @@ -113,24 +113,24 @@ $debug = false, $external_network_bridge = 'br-ex', $interface_driver = 'neutron.agent.linux.interface.OVSInterfaceDriver', - $router_id = undef, - $gateway_external_network_id = undef, - $handle_internal_only_routers = true, - $metadata_port = '9697', - $send_arp_for_ha = '3', - $periodic_interval = '40', - $periodic_fuzzy_delay = '5', - $enable_metadata_proxy = true, - $network_device_mtu = undef, + $router_id = $::os_service_default, + $gateway_external_network_id = $::os_service_default, + $handle_internal_only_routers = $::os_service_default, + $metadata_port = $::os_service_default, + $send_arp_for_ha = $::os_service_default, + $periodic_interval = $::os_service_default, + $periodic_fuzzy_delay = $::os_service_default, + $enable_metadata_proxy = $::os_service_default, + $network_device_mtu = $::os_service_default, $router_delete_namespaces = true, $ha_enabled = false, $ha_vrrp_auth_type = 'PASS', - $ha_vrrp_auth_password = undef, + $ha_vrrp_auth_password = $::os_service_default, $ha_vrrp_advert_int = '3', $agent_mode = 'legacy', # DEPRECATED PARAMETERS $allow_automatic_l3agent_failover = false, - $use_namespaces = undef, + $use_namespaces = $::os_service_default, ) { include ::neutron::params @@ -164,24 +164,16 @@ 'DEFAULT/enable_metadata_proxy': value => $enable_metadata_proxy; 'DEFAULT/router_delete_namespaces': value => $router_delete_namespaces; 'DEFAULT/agent_mode': value => $agent_mode; + 'DEFAULT/network_device_mtu': value => $network_device_mtu; + 'DEFAULT/use_namespaces': value => $use_namespaces; } - if $use_namespaces != undef { + if ! is_service_default ($use_namespaces) { warning('The use_namespaces parameter is deprecated and will be removed in future releases') - neutron_l3_agent_config { - 'DEFAULT/use_namespaces': value => $use_namespaces; - } } - if $network_device_mtu { + if ! is_service_default ($network_device_mtu) { warning('The neutron::agents::l3::network_device_mtu parameter is deprecated, use neutron::network_device_mtu instead.') - neutron_l3_agent_config { - 'DEFAULT/network_device_mtu': value => $network_device_mtu; - } - } else { - neutron_l3_agent_config { - 'DEFAULT/network_device_mtu': ensure => absent; - } } if $::neutron::params::l3_agent_package { diff --git a/neutron/manifests/agents/lbaas.pp b/neutron/manifests/agents/lbaas.pp index 32d40c597..25ea6024f 100644 --- a/neutron/manifests/agents/lbaas.pp +++ b/neutron/manifests/agents/lbaas.pp @@ -15,7 +15,7 @@ # Defaults to true # # [*debug*] -# (optional) Show debugging output in log. Defaults to false. +# (optional) Show debugging output in log. Defaults to $::os_service_default. # # [*interface_driver*] # (optional) Defaults to 'neutron.agent.linux.interface.OVSInterfaceDriver'. @@ -38,19 +38,19 @@ # (optional) Deprecated. 'True' value will be enforced in future releases. # Allow overlapping IP (Must have kernel build with # CONFIG_NET_NS=y and iproute2 package that supports namespaces). -# Defaults to undef. +# Defaults to $::os_service_default. # class neutron::agents::lbaas ( $package_ensure = present, $enabled = true, $manage_service = true, - $debug = false, + $debug = $::os_service_default, $interface_driver = 'neutron.agent.linux.interface.OVSInterfaceDriver', $device_driver = 'neutron_lbaas.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver', $user_group = $::neutron::params::nobody_user_group, $manage_haproxy_package = true, # DEPRECATED PARAMETERS - $use_namespaces = undef, + $use_namespaces = $::os_service_default, ) { include ::neutron::params @@ -80,7 +80,7 @@ 'haproxy/user_group': value => $user_group; } - if $use_namespaces != undef { + if ! is_service_default ($use_namespaces) { warning('The use_namespaces parameter is deprecated and will be removed in future releases') neutron_lbaas_agent_config { 'DEFAULT/use_namespaces': value => $use_namespaces; diff --git a/neutron/manifests/agents/metadata.pp b/neutron/manifests/agents/metadata.pp index f4a7f13d3..06608c1d7 100644 --- a/neutron/manifests/agents/metadata.pp +++ b/neutron/manifests/agents/metadata.pp @@ -37,19 +37,19 @@ # turn off verification of the certificate for ssl (Defaults to false) # # [*auth_ca_cert*] -# CA cert to check against with for ssl keystone. (Defaults to undef) +# CA cert to check against with for ssl keystone. (Defaults to $::os_service_default) # # [*auth_region*] -# The authentication region. (Defaults to undef) +# The authentication region. (Defaults to $::os_service_default) # # [*metadata_ip*] -# The IP address of the metadata service. Defaults to '127.0.0.1'. +# The IP address of the metadata service. Defaults to $::os_service_default. # # [*metadata_port*] -# The TCP port of the metadata service. Defaults to 8775. +# The TCP port of the metadata service. Defaults to $::os_service_default. # # [*metadata_protocol*] -# The protocol to use for requests to Nova metadata server. Defaults to 'http'. +# The protocol to use for requests to Nova metadata server. Defaults to $::os_service_default. # # [*metadata_workers*] # (optional) Number of separate worker processes to spawn. @@ -61,14 +61,13 @@ # # [*metadata_backlog*] # (optional) Number of backlog requests to configure the metadata server socket with. -# Defaults to 4096 +# Defaults to $::os_service_default # # [*metadata_memory_cache_ttl*] # (optional) Specifies time in seconds a metadata cache entry is valid in # memory caching backend. # Set to 0 will cause cache entries to never expire. -# Set to undef or false to disable cache. -# Defaults to 5 +# Set to $::os_service_default or false to disable cache. # class neutron::agents::metadata ( @@ -82,14 +81,14 @@ $auth_user = 'neutron', $auth_url = 'http://localhost:35357/v2.0', $auth_insecure = false, - $auth_ca_cert = undef, - $auth_region = undef, - $metadata_ip = '127.0.0.1', - $metadata_port = '8775', - $metadata_protocol = 'http', + $auth_ca_cert = $::os_service_default, + $auth_region = $::os_service_default, + $metadata_ip = $::os_service_default, + $metadata_port = $::os_service_default, + $metadata_protocol = $::os_service_default, $metadata_workers = $::processorcount, - $metadata_backlog = '4096', - $metadata_memory_cache_ttl = 5, + $metadata_backlog = $::os_service_default, + $metadata_memory_cache_ttl = $::os_service_default, ) { include ::neutron::params @@ -99,8 +98,10 @@ neutron_metadata_agent_config { 'DEFAULT/debug': value => $debug; + 'DEFAULT/auth_ca_cert': value => $auth_ca_cert; 'DEFAULT/auth_url': value => $auth_url; 'DEFAULT/auth_insecure': value => $auth_insecure; + 'DEFAULT/auth_region': value => $auth_region; 'DEFAULT/admin_tenant_name': value => $auth_tenant; 'DEFAULT/admin_user': value => $auth_user; 'DEFAULT/admin_password': value => $auth_password, secret => true; @@ -112,17 +113,7 @@ 'DEFAULT/metadata_backlog': value => $metadata_backlog; } - if $auth_region { - neutron_metadata_agent_config { - 'DEFAULT/auth_region': value => $auth_region; - } - } else { - neutron_metadata_agent_config { - 'DEFAULT/auth_region': ensure => absent; - } - } - - if $metadata_memory_cache_ttl { + if ! is_service_default ($metadata_memory_cache_ttl) and ($metadata_memory_cache_ttl) { neutron_metadata_agent_config { 'DEFAULT/cache_url': value => "memory://?default_ttl=${metadata_memory_cache_ttl}"; } @@ -132,16 +123,6 @@ } } - if $auth_ca_cert { - neutron_metadata_agent_config { - 'DEFAULT/auth_ca_cert': value => $auth_ca_cert; - } - } else { - neutron_metadata_agent_config { - 'DEFAULT/auth_ca_cert': ensure => absent; - } - } - if $::neutron::params::metadata_agent_package { Package['neutron-metadata'] -> Service['neutron-metadata'] package { 'neutron-metadata': diff --git a/neutron/manifests/agents/metering.pp b/neutron/manifests/agents/metering.pp index ef94a01ca..b86751a7c 100644 --- a/neutron/manifests/agents/metering.pp +++ b/neutron/manifests/agents/metering.pp @@ -54,7 +54,7 @@ # (optional) Deprecated. 'True' value will be enforced in future releases. # Allow overlapping IP (Must have kernel build with # CONFIG_NET_NS=y and iproute2 package that supports namespaces). -# Defaults to undef. +# Defaults to $::os_service_default. # class neutron::agents::metering ( @@ -64,10 +64,10 @@ $debug = false, $interface_driver = 'neutron.agent.linux.interface.OVSInterfaceDriver', $driver = 'neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver', - $measure_interval = '30', - $report_interval = '300', + $measure_interval = $::os_service_default, + $report_interval = $::os_service_default, # DEPRECATED PARAMETERS - $use_namespaces = undef, + $use_namespaces = $::os_service_default, ) { include ::neutron::params @@ -86,7 +86,7 @@ 'DEFAULT/report_interval': value => $report_interval; } - if $use_namespaces != undef { + if ! is_service_default ($use_namespaces) { warning('The use_namespaces parameter is deprecated and will be removed in future releases') neutron_metering_agent_config { 'DEFAULT/use_namespaces': value => $use_namespaces; diff --git a/neutron/manifests/agents/ml2/linuxbridge.pp b/neutron/manifests/agents/ml2/linuxbridge.pp index 0770d13d5..03a5925f4 100644 --- a/neutron/manifests/agents/ml2/linuxbridge.pp +++ b/neutron/manifests/agents/ml2/linuxbridge.pp @@ -64,11 +64,11 @@ $manage_service = true, $tunnel_types = [], $local_ip = false, - $vxlan_group = '224.0.0.1', - $vxlan_ttl = false, - $vxlan_tos = false, - $polling_interval = 2, - $l2_population = false, + $vxlan_group = $::os_service_default, + $vxlan_ttl = $::os_service_default, + $vxlan_tos = $::os_service_default, + $polling_interval = $::os_service_default, + $l2_population = $::os_service_default, $physical_interface_mappings = [], $firewall_driver = 'neutron.agent.linux.iptables_firewall.IptablesFirewallDriver' ) { @@ -86,25 +86,10 @@ fail('The local_ip parameter is required when vxlan tunneling is enabled') } - if $vxlan_group { - neutron_agent_linuxbridge { 'vxlan/vxlan_group': value => $vxlan_group } - } else { - neutron_agent_linuxbridge { 'vxlan/vxlan_group': ensure => absent } - } - - if $vxlan_ttl { - neutron_agent_linuxbridge { 'vxlan/vxlan_ttl': value => $vxlan_ttl } - } else { - neutron_agent_linuxbridge { 'vxlan/vxlan_ttl': ensure => absent } - } - - if $vxlan_tos { - neutron_agent_linuxbridge { 'vxlan/vxlan_tos': value => $vxlan_tos } - } else { - neutron_agent_linuxbridge { 'vxlan/vxlan_tos': ensure => absent } - } - neutron_agent_linuxbridge { + 'vxlan/vxlan_ttl': value => $vxlan_ttl; + 'vxlan/vxlan_group': value => $vxlan_group; + 'vxlan/vxlan_tos': value => $vxlan_tos; 'vxlan/enable_vxlan': value => true; 'vxlan/local_ip': value => $local_ip; 'vxlan/l2_population': value => $l2_population; @@ -113,8 +98,6 @@ neutron_agent_linuxbridge { 'vxlan/enable_vxlan': value => false; 'vxlan/local_ip': ensure => absent; - 'vxlan/vxlan_group': ensure => absent; - 'vxlan/l2_population': ensure => absent; } } diff --git a/neutron/manifests/agents/ml2/ovs.pp b/neutron/manifests/agents/ml2/ovs.pp index 5540f3b32..3549b55f8 100644 --- a/neutron/manifests/agents/ml2/ovs.pp +++ b/neutron/manifests/agents/ml2/ovs.pp @@ -71,17 +71,17 @@ # [*polling_interval*] # (optional) The number of seconds the agent will wait between # polling for local device changes. -# Defaults to '2" +# Defaults to $::os_service_default # # [*l2_population*] # (optional) Extension to use alongside ml2 plugin's l2population # mechanism driver. -# Defaults to false +# Defaults to $::os_service_default # # [*arp_responder*] # (optional) Enable or not the ARP responder. # Recommanded when using l2 population mechanism driver. -# Defaults to false +# Defaults to $::os_service_default # # [*firewall_driver*] # (optional) Firewall driver for realizing neutron security group function. @@ -90,7 +90,7 @@ # [*enable_distributed_routing*] # (optional) Set to True on L2 agents to enable support # for distributed virtual routing. -# Defaults to false +# Defaults to $::os_service_default # # [*drop_flows_on_start*] # (optional) Set to True to drop all flows during agent start for a clean @@ -107,12 +107,17 @@ # # [*prevent_arp_spoofing*] # (optional) Enable or not ARP Spoofing Protection -# Defaults to true +# Defaults to $::os_service_default +# +# [*extensions*] +# (optional) Extensions list to use +# Defaults to $::os_service_default # class neutron::agents::ml2::ovs ( $package_ensure = 'present', $enabled = true, $manage_service = true, + $extensions = $::os_service_default, $bridge_uplinks = [], $bridge_mappings = [], $integration_bridge = 'br-int', @@ -121,14 +126,14 @@ $local_ip = false, $tunnel_bridge = 'br-tun', $vxlan_udp_port = 4789, - $polling_interval = 2, - $l2_population = false, - $arp_responder = false, + $polling_interval = $::os_service_default, + $l2_population = $::os_service_default, + $arp_responder = $::os_service_default, $firewall_driver = 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver', - $enable_distributed_routing = false, + $enable_distributed_routing = $::os_service_default, $drop_flows_on_start = false, $manage_vswitch = true, - $prevent_arp_spoofing = true, + $prevent_arp_spoofing = $::os_service_default, ) { include ::neutron::params @@ -140,8 +145,10 @@ fail('Local ip for ovs agent must be set when tunneling is enabled') } - if $enable_tunneling and $enable_distributed_routing and ! $l2_population { - fail('L2 population must be enabled when DVR and tunneling are enabled') + if ($enable_tunneling) and (!is_service_default($enable_distributed_routing)) and (!is_service_default($l2_population)) { + if $enable_distributed_routing and ! $l2_population { + fail('L2 population must be enabled when DVR and tunneling are enabled') + } } Neutron_agent_ovs<||> ~> Service['neutron-ovs-agent-service'] @@ -182,6 +189,7 @@ 'agent/enable_distributed_routing': value => $enable_distributed_routing; 'agent/drop_flows_on_start': value => $drop_flows_on_start; 'agent/prevent_arp_spoofing': value => $prevent_arp_spoofing; + 'agent/extensions': value => join(any2array($extensions), ','); 'ovs/integration_bridge': value => $integration_bridge; } diff --git a/neutron/manifests/agents/ml2/sriov.pp b/neutron/manifests/agents/ml2/sriov.pp index 7c5a24868..ca32ed858 100644 --- a/neutron/manifests/agents/ml2/sriov.pp +++ b/neutron/manifests/agents/ml2/sriov.pp @@ -52,7 +52,10 @@ # semicolon separated list of virtual functions to exclude from network_device. # The network_device in the mapping should appear in the physical_device_mappings list. # - +# [*extensions*] +# (optional) Extensions list to use +# Defaults to $::os_service_default +# class neutron::agents::ml2::sriov ( $package_ensure = 'present', $enabled = true, @@ -60,6 +63,7 @@ $physical_device_mappings = [], $polling_interval = 2, $exclude_devices = [], + $extensions = $::os_service_default, ) { include ::neutron::params @@ -70,6 +74,7 @@ 'sriov_nic/polling_interval': value => $polling_interval; 'sriov_nic/exclude_devices': value => join($exclude_devices, ','); 'sriov_nic/physical_device_mappings': value => join($physical_device_mappings, ','); + 'agent/extensions': value => join(any2array($extensions), ','); } Package['neutron-sriov-nic-agent'] -> Neutron_sriov_agent_config <||> diff --git a/neutron/manifests/agents/vpnaas.pp b/neutron/manifests/agents/vpnaas.pp index 69636b505..ad659fe0d 100644 --- a/neutron/manifests/agents/vpnaas.pp +++ b/neutron/manifests/agents/vpnaas.pp @@ -38,10 +38,10 @@ # (optional) Defaults to 'neutron.agent.linux.interface.OVSInterfaceDriver'. # # [*external_network_bridge*] -# (optional) Defaults to undef +# (optional) Defaults to $::os_service_default # # [*ipsec_status_check_interval*] -# (optional) Status check interval. Defaults to '60'. +# (optional) Status check interval. Defaults to $::os_service_default. # class neutron::agents::vpnaas ( $package_ensure = present, @@ -49,8 +49,8 @@ $manage_service = true, $vpn_device_driver = 'neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver', $interface_driver = 'neutron.agent.linux.interface.OVSInterfaceDriver', - $external_network_bridge = undef, - $ipsec_status_check_interval = '60' + $external_network_bridge = $::os_service_default, + $ipsec_status_check_interval = $::os_service_default ) { include ::neutron::params @@ -80,14 +80,8 @@ 'DEFAULT/interface_driver': value => $interface_driver; } - if ($external_network_bridge) { - neutron_vpnaas_agent_config { - 'DEFAULT/external_network_bridge': value => $external_network_bridge; - } - } else { - neutron_vpnaas_agent_config { - 'DEFAULT/external_network_bridge': ensure => absent; - } + neutron_vpnaas_agent_config { + 'DEFAULT/external_network_bridge': value => $external_network_bridge; } if $::neutron::params::vpnaas_agent_package { diff --git a/neutron/manifests/db.pp b/neutron/manifests/db.pp index 1b013e88b..d7184fd06 100644 --- a/neutron/manifests/db.pp +++ b/neutron/manifests/db.pp @@ -10,39 +10,41 @@ # # [*database_idle_timeout*] # Timeout when db connections should be reaped. -# (Optional) Defaults to 3600. +# (Optional) Defaults to $::os_service_default. # # [*database_min_pool_size*] # Minimum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 1. +# (Optional) Defaults to $::os_service_default. # # [*database_max_pool_size*] # Maximum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default. # # [*database_max_retries*] # Maximum db connection retries during startup. # Setting -1 implies an infinite retry count. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default. # # [*database_retry_interval*] # Interval between retries of opening a sql connection. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default. # # [*database_max_overflow*] # If set, use this value for max_overflow with sqlalchemy. -# (Optional) Defaults to 20. +# (Optional) Defaults to $::os_service_default. # class neutron::db ( $database_connection = 'sqlite:////var/lib/neutron/ovs.sqlite', - $database_idle_timeout = 3600, - $database_min_pool_size = 1, - $database_max_pool_size = 10, - $database_max_retries = 10, - $database_retry_interval = 10, - $database_max_overflow = 20, + $database_idle_timeout = $::os_service_default, + $database_min_pool_size = $::os_service_default, + $database_max_pool_size = $::os_service_default, + $database_max_retries = $::os_service_default, + $database_retry_interval = $::os_service_default, + $database_max_overflow = $::os_service_default, ) { + include ::neutron::params + # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function # to use neutron:: if neutron::db:: isn't specified. $database_connection_real = pick($::neutron::server::database_connection, $database_connection) @@ -54,14 +56,18 @@ $database_max_overflow_real = pick($::neutron::server::database_max_overflow, $database_max_overflow) validate_re($database_connection_real, - '(sqlite|mysql|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') + '^(sqlite|mysql(\+pymysql)?|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') if $database_connection_real { case $database_connection_real { - /^mysql:\/\//: { - $backend_package = false + /^mysql(\+pymysql)?:\/\//: { require 'mysql::bindings' require 'mysql::bindings::python' + if $database_connection_real =~ /^mysql\+pymysql/ { + $backend_package = $::neutron::params::pymysql_package_name + } else { + $backend_package = false + } } /^postgresql:\/\//: { $backend_package = false diff --git a/neutron/manifests/init.pp b/neutron/manifests/init.pp index 5a6e7c151..879176304 100644 --- a/neutron/manifests/init.pp +++ b/neutron/manifests/init.pp @@ -15,19 +15,19 @@ # # [*verbose*] # (optional) Verbose logging -# Defaults to False +# Defaults to $::os_service_default # # [*debug*] # (optional) Print debug messages in the logs -# Defaults to False +# Defaults to $::os_service_default # # [*bind_host*] # (optional) The IP/interface to bind to -# Defaults to 0.0.0.0 (all interfaces) +# Defaults to $::os_service_default # # [*bind_port*] # (optional) The port to use -# Defaults to 9696 +# Defaults to $::os_service_default # # [*core_plugin*] # (optional) Neutron plugin provider @@ -43,62 +43,62 @@ # [*service_plugins*] # (optional) Advanced service modules. # Could be an array that can have these elements: -# router, firewall, lbaas, vpnaas, metering -# Defaults to empty +# router, firewall, lbaas, vpnaas, metering, qos +# Defaults to $::os_service_default # # [*auth_strategy*] # (optional) How to authenticate -# Defaults to 'keystone'. 'noauth' is the only other valid option +# Defaults to 'keystone'. 'noauth' and 'keystone' are the only valid options # # [*base_mac*] # (optional) The MAC address pattern to use. -# Defaults to fa:16:3e:00:00:00 +# Defaults to $::os_service_default # # [*mac_generation_retries*] # (optional) How many times to try to generate a unique mac -# Defaults to 16 +# Defaults to $::os_service_default # # [*dhcp_lease_duration*] # (optional) DHCP lease -# Defaults to 86400 seconds +# Defaults to $::os_service_default # # [*dhcp_agents_per_network*] # (optional) Number of DHCP agents scheduled to host a network. # This enables redundant DHCP agents for configured networks. -# Defaults to 1 +# Defaults to $::os_service_default # # [*network_device_mtu*] # (optional) The MTU size for the interfaces managed by neutron -# Defaults to undef +# Defaults to $::os_service_default # # [*dhcp_agent_notification*] # (optional) Allow sending resource operation notification to DHCP agent. -# Defaults to true +# Defaults to $::os_service_default # # [*advertise_mtu*] # (optional) VMs will receive DHCP and RA MTU option when the network's preferred MTU is known -# Defaults to false +# Defaults to $::os_service_default # # [*allow_bulk*] # (optional) Enable bulk crud operations -# Defaults to true +# Defaults to $::os_service_default # # [*allow_pagination*] # (optional) Enable pagination -# Defaults to false +# Defaults to $::os_service_default # # [*allow_sorting*] # (optional) Enable sorting -# Defaults to false +# Defaults to $::os_service_default # # [*allow_overlapping_ips*] # (optional) Enables network namespaces -# Defaults to false +# Defaults to $::os_service_default # # [*api_extensions_path*] # (optional) Specify additional paths for API extensions that the # module in use needs to load. -# Defaults to undef +# Defaults to $::os_service_default # # [*root_helper*] # (optional) Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real @@ -111,7 +111,7 @@ # agent_down_time, best if it is half or less than agent_down_time. # agent_down_time is a config for neutron-server, set by class neutron::server # report_interval is a config for neutron agents, set by class neutron -# Defaults to: 30 +# Defaults to: $::os_service_default # # [*memcache_servers*] # List of memcache servers in format of server:port. @@ -127,7 +127,7 @@ # # [*rpc_response_timeout*] # (optional) Seconds to wait for a response from a call -# Defaults to 60 +# Defaults to $::os_service_default # # [*rabbit_password*] # [*rabbit_host*] @@ -137,7 +137,7 @@ # # [*rabbit_virtual_host*] # (optional) virtualhost to use. -# Defaults to '/' +# Defaults to $::os_service_default # # [*rabbit_hosts*] # (optional) array of rabbitmq servers for HA. @@ -162,19 +162,19 @@ # # [*rabbit_use_ssl*] # (optional) Connect over SSL for RabbitMQ -# Defaults to false +# Defaults to $::os_service_default # # [*kombu_ssl_ca_certs*] # (optional) SSL certification authority file (valid only if SSL enabled). -# Defaults to undef +# Defaults to $::os_service_default # # [*kombu_ssl_certfile*] # (optional) SSL cert file (valid only if SSL enabled). -# Defaults to undef +# Defaults to $::os_service_default # # [*kombu_ssl_keyfile*] # (optional) SSL key file (valid only if SSL enabled). -# Defaults to undef +# Defaults to $::os_service_default # # [*kombu_ssl_version*] # (optional) SSL version to use (valid only if SSL enabled). @@ -187,50 +187,35 @@ # to MQ provider. This is used in some cases where you may need to wait # for the provider to propery premote the master before attempting to # reconnect. See https://review.openstack.org/#/c/76686 -# Defaults to '1.0' -# -# [*qpid_hostname*] -# [*qpid_port*] -# [*qpid_username*] -# [*qpid_password*] -# [*qpid_heartbeat*] -# [*qpid_protocol*] -# [*qpid_tcp_nodelay*] -# [*qpid_reconnect*] -# [*qpid_reconnect_timeout*] -# [*qpid_reconnect_limit*] -# [*qpid_reconnect_interval*] -# [*qpid_reconnect_interval_min*] -# [*qpid_reconnect_interval_max*] -# (optional) various QPID options +# Defaults to $::os_service_default # # [*use_ssl*] # (optinal) Enable SSL on the API server -# Defaults to false, not set +# Defaults to $::os_service_default # # [*cert_file*] # (optinal) certificate file to use when starting api server securely -# defaults to false, not set +# defaults to $::os_service_default # # [*key_file*] # (optional) Private key file to use when starting API server securely -# Defaults to false, not set +# Defaults to $::os_service_default # # [*ca_file*] # (optional) CA certificate file to use to verify connecting clients -# Defaults to false, not set +# Defaults to $::os_service_default # # [*use_syslog*] # (optional) Use syslog for logging -# Defaults to false +# Defaults to $::os_service_default # # [*use_stderr*] # (optional) Use stderr for logging -# Defaults to true +# Defaults to $::os_service_default # # [*log_facility*] # (optional) Syslog facility to receive log lines -# Defaults to LOG_USER +# Defaults to $::os_service_default # # [*log_file*] # (optional) Where to log @@ -244,106 +229,127 @@ # [*state_path*] # (optional) Where to store state files. This directory must be writable # by the user executing the agent -# Defaults to: /var/lib/neutron +# Defaults to: $::os_service_default # # [*lock_path*] # (optional) Where to store lock files. This directory must be writeable # by the user executing the agent -# Defaults to: /var/lib/neutron/lock +# Defaults to: $::os_service_default +# +# DEPRECATED PARAMETERS +# +# [*qpid_hostname*] +# [*qpid_port*] +# [*qpid_username*] +# [*qpid_password*] +# [*qpid_heartbeat*] +# [*qpid_protocol*] +# [*qpid_tcp_nodelay*] +# [*qpid_reconnect*] +# [*qpid_reconnect_timeout*] +# [*qpid_reconnect_limit*] +# [*qpid_reconnect_interval*] +# [*qpid_reconnect_interval_min*] +# [*qpid_reconnect_interval_max*] # class neutron ( $enabled = true, $package_ensure = 'present', - $verbose = false, - $debug = false, - $bind_host = '0.0.0.0', - $bind_port = '9696', + $verbose = $::os_service_default, + $debug = $::os_service_default, + $bind_host = $::os_service_default, + $bind_port = $::os_service_default, $core_plugin = 'openvswitch', - $service_plugins = undef, + $service_plugins = $::os_service_default, $auth_strategy = 'keystone', - $base_mac = 'fa:16:3e:00:00:00', - $mac_generation_retries = 16, - $dhcp_lease_duration = 86400, - $dhcp_agents_per_network = 1, - $network_device_mtu = undef, - $dhcp_agent_notification = true, - $advertise_mtu = false, - $allow_bulk = true, - $allow_pagination = false, - $allow_sorting = false, - $allow_overlapping_ips = false, - $api_extensions_path = undef, + $base_mac = $::os_service_default, + $mac_generation_retries = $::os_service_default, + $dhcp_lease_duration = $::os_service_default, + $dhcp_agents_per_network = $::os_service_default, + $network_device_mtu = $::os_service_default, + $dhcp_agent_notification = $::os_service_default, + $advertise_mtu = $::os_service_default, + $allow_bulk = $::os_service_default, + $allow_pagination = $::os_service_default, + $allow_sorting = $::os_service_default, + $allow_overlapping_ips = $::os_service_default, + $api_extensions_path = $::os_service_default, $root_helper = 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf', - $report_interval = '30', + $report_interval = $::os_service_default, $memcache_servers = false, $control_exchange = 'neutron', $rpc_backend = 'rabbit', - $rpc_response_timeout = 60, + $rpc_response_timeout = $::os_service_default, $rabbit_password = false, $rabbit_host = 'localhost', $rabbit_hosts = false, - $rabbit_port = '5672', + $rabbit_port = 5672, $rabbit_user = 'guest', - $rabbit_virtual_host = '/', + $rabbit_virtual_host = $::os_service_default, $rabbit_heartbeat_timeout_threshold = 0, $rabbit_heartbeat_rate = 2, - $rabbit_use_ssl = false, - $kombu_ssl_ca_certs = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_keyfile = undef, + $rabbit_use_ssl = $::os_service_default, + $kombu_ssl_ca_certs = $::os_service_default, + $kombu_ssl_certfile = $::os_service_default, + $kombu_ssl_keyfile = $::os_service_default, $kombu_ssl_version = 'TLSv1', - $kombu_reconnect_delay = '1.0', - $qpid_hostname = 'localhost', - $qpid_port = '5672', - $qpid_username = 'guest', - $qpid_password = 'guest', - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, - $qpid_reconnect = true, - $qpid_reconnect_timeout = 0, - $qpid_reconnect_limit = 0, - $qpid_reconnect_interval_min = 0, - $qpid_reconnect_interval_max = 0, - $qpid_reconnect_interval = 0, - $use_ssl = false, - $cert_file = false, - $key_file = false, - $ca_file = false, - $use_syslog = false, - $use_stderr = true, - $log_facility = 'LOG_USER', + $kombu_reconnect_delay = $::os_service_default, + $use_ssl = $::os_service_default, + $cert_file = $::os_service_default, + $key_file = $::os_service_default, + $ca_file = $::os_service_default, + $use_syslog = $::os_service_default, + $use_stderr = $::os_service_default, + $log_facility = $::os_service_default, $log_file = false, $log_dir = '/var/log/neutron', - $state_path = '/var/lib/neutron', - $lock_path = '/var/lib/neutron/lock', + $state_path = $::os_service_default, + $lock_path = $::os_service_default, + # DEPRECATED PARAMETERS + $qpid_hostname = undef, + $qpid_port = undef, + $qpid_username = undef, + $qpid_password = undef, + $qpid_heartbeat = undef, + $qpid_protocol = undef, + $qpid_tcp_nodelay = undef, + $qpid_reconnect = undef, + $qpid_reconnect_timeout = undef, + $qpid_reconnect_limit = undef, + $qpid_reconnect_interval_min = undef, + $qpid_reconnect_interval_max = undef, + $qpid_reconnect_interval = undef, ) { include ::neutron::params - if $use_ssl { - if !$cert_file { + if ! is_service_default($use_ssl) and ($use_ssl) { + if is_service_default($cert_file) { fail('The cert_file parameter is required when use_ssl is set to true') } - if !$key_file { + if is_service_default($key_file) { fail('The key_file parameter is required when use_ssl is set to true') } } - if $ca_file and !$use_ssl { - fail('The ca_file parameter requires that use_ssl to be set to true') + if ! is_service_default($use_ssl) and !($use_ssl) { + if ! is_service_default($ca_file) and ($ca_file) { + fail('The ca_file parameter requires that use_ssl to be set to true') + } } - if $kombu_ssl_ca_certs and !$rabbit_use_ssl { - fail('The kombu_ssl_ca_certs parameter requires rabbit_use_ssl to be set to true') - } - if $kombu_ssl_certfile and !$rabbit_use_ssl { - fail('The kombu_ssl_certfile parameter requires rabbit_use_ssl to be set to true') - } - if $kombu_ssl_keyfile and !$rabbit_use_ssl { - fail('The kombu_ssl_keyfile parameter requires rabbit_use_ssl to be set to true') + if ! is_service_default($rabbit_use_ssl) and !($rabbit_use_ssl) { + if ! is_service_default($kombu_ssl_ca_certs) and ($kombu_ssl_ca_certs) { + fail('The kombu_ssl_ca_certs parameter requires rabbit_use_ssl to be set to true') + } + if ! is_service_default($kombu_ssl_certfile) and ($kombu_ssl_certfile) { + fail('The kombu_ssl_certfile parameter requires rabbit_use_ssl to be set to true') + } + if ! is_service_default($kombu_ssl_keyfile) and ($kombu_ssl_keyfile) { + fail('The kombu_ssl_keyfile parameter requires rabbit_use_ssl to be set to true') + } } - if ($kombu_ssl_certfile and !$kombu_ssl_keyfile) or ($kombu_ssl_keyfile and !$kombu_ssl_certfile) { + if (is_service_default($kombu_ssl_certfile) and ! is_service_default($kombu_ssl_keyfile)) or (is_service_default($kombu_ssl_keyfile) and ! is_service_default($kombu_ssl_certfile)) { fail('The kombu_ssl_certfile and kombu_ssl_keyfile parameters must be used together') } @@ -364,6 +370,8 @@ 'DEFAULT/verbose': value => $verbose; 'DEFAULT/debug': value => $debug; 'DEFAULT/use_stderr': value => $use_stderr; + 'DEFAULT/use_syslog': value => $use_syslog; + 'DEFAULT/syslog_log_facility': value => $log_facility; 'DEFAULT/bind_host': value => $bind_host; 'DEFAULT/bind_port': value => $bind_port; 'DEFAULT/auth_strategy': value => $auth_strategy; @@ -384,6 +392,7 @@ 'DEFAULT/state_path': value => $state_path; 'DEFAULT/lock_path': value => $lock_path; 'DEFAULT/rpc_response_timeout': value => $rpc_response_timeout; + 'DEFAULT/network_device_mtu': value => $network_device_mtu; 'agent/root_helper': value => $root_helper; 'agent/report_interval': value => $report_interval; } @@ -407,18 +416,7 @@ } } - if $network_device_mtu { - neutron_config { - 'DEFAULT/network_device_mtu': value => $network_device_mtu; - } - } else { - neutron_config { - 'DEFAULT/network_device_mtu': ensure => absent; - } - } - - - if $service_plugins { + if ! is_service_default ($service_plugins) and ($service_plugins) { if is_array($service_plugins) { neutron_config { 'DEFAULT/service_plugins': value => join($service_plugins, ',') } } else { @@ -459,27 +457,12 @@ 'oslo_messaging_rabbit/heartbeat_rate': value => $rabbit_heartbeat_rate; 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl; 'oslo_messaging_rabbit/kombu_reconnect_delay': value => $kombu_reconnect_delay; + 'oslo_messaging_rabbit/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs; + 'oslo_messaging_rabbit/kombu_ssl_certfile': value => $kombu_ssl_certfile; + 'oslo_messaging_rabbit/kombu_ssl_keyfile': value => $kombu_ssl_keyfile; } - if $rabbit_use_ssl { - - if $kombu_ssl_ca_certs { - neutron_config { 'oslo_messaging_rabbit/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs; } - } else { - neutron_config { 'oslo_messaging_rabbit/kombu_ssl_ca_certs': ensure => absent; } - } - - if $kombu_ssl_certfile or $kombu_ssl_keyfile { - neutron_config { - 'oslo_messaging_rabbit/kombu_ssl_certfile': value => $kombu_ssl_certfile; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': value => $kombu_ssl_keyfile; - } - } else { - neutron_config { - 'oslo_messaging_rabbit/kombu_ssl_certfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': ensure => absent; - } - } + if ! is_service_default($rabbit_use_ssl) and ($rabbit_use_ssl) { if $kombu_ssl_version { neutron_config { 'oslo_messaging_rabbit/kombu_ssl_version': value => $kombu_ssl_version; } @@ -489,9 +472,6 @@ } else { neutron_config { - 'oslo_messaging_rabbit/kombu_ssl_ca_certs': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_certfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': ensure => absent; 'oslo_messaging_rabbit/kombu_ssl_version': ensure => absent; } } @@ -499,51 +479,15 @@ } if $rpc_backend == 'qpid' or $rpc_backend == 'neutron.openstack.common.rpc.impl_qpid' { - neutron_config { - 'oslo_messaging_qpid/qpid_hostname': value => $qpid_hostname; - 'oslo_messaging_qpid/qpid_port': value => $qpid_port; - 'oslo_messaging_qpid/qpid_username': value => $qpid_username; - 'oslo_messaging_qpid/qpid_password': value => $qpid_password, secret => true; - 'oslo_messaging_qpid/qpid_heartbeat': value => $qpid_heartbeat; - 'oslo_messaging_qpid/qpid_protocol': value => $qpid_protocol; - 'oslo_messaging_qpid/qpid_tcp_nodelay': value => $qpid_tcp_nodelay; - 'oslo_messaging_qpid/qpid_reconnect': value => $qpid_reconnect; - 'oslo_messaging_qpid/qpid_reconnect_timeout': value => $qpid_reconnect_timeout; - 'oslo_messaging_qpid/qpid_reconnect_limit': value => $qpid_reconnect_limit; - 'oslo_messaging_qpid/qpid_reconnect_interval_min': value => $qpid_reconnect_interval_min; - 'oslo_messaging_qpid/qpid_reconnect_interval_max': value => $qpid_reconnect_interval_max; - 'oslo_messaging_qpid/qpid_reconnect_interval': value => $qpid_reconnect_interval; - } + warning('Qpid driver is removed from Oslo.messaging in the Mitaka release') } # SSL Options - neutron_config { 'DEFAULT/use_ssl' : value => $use_ssl; } - if $use_ssl { - neutron_config { - 'DEFAULT/ssl_cert_file' : value => $cert_file; - 'DEFAULT/ssl_key_file' : value => $key_file; - } - if $ca_file { - neutron_config { 'DEFAULT/ssl_ca_file' : value => $ca_file; } - } else { - neutron_config { 'DEFAULT/ssl_ca_file' : ensure => absent; } - } - } else { - neutron_config { - 'DEFAULT/ssl_cert_file': ensure => absent; - 'DEFAULT/ssl_key_file': ensure => absent; - 'DEFAULT/ssl_ca_file': ensure => absent; - } + neutron_config { + 'DEFAULT/use_ssl': value => $use_ssl; + 'DEFAULT/ssl_cert_file': value => $cert_file; + 'DEFAULT/ssl_key_file': value => $key_file; + 'DEFAULT/ssl_ca_file': value => $ca_file; } - if $use_syslog { - neutron_config { - 'DEFAULT/use_syslog': value => true; - 'DEFAULT/syslog_log_facility': value => $log_facility; - } - } else { - neutron_config { - 'DEFAULT/use_syslog': value => false; - } - } } diff --git a/neutron/manifests/params.pp b/neutron/manifests/params.pp index 47266c216..7f7f9a146 100644 --- a/neutron/manifests/params.pp +++ b/neutron/manifests/params.pp @@ -78,6 +78,7 @@ $kernel_headers = "linux-headers-${::kernelrelease}" $sqlite_package_name = undef + $pymysql_package_name = undef } elsif($::osfamily == 'Debian') { @@ -159,6 +160,7 @@ $kernel_headers = "linux-headers-${::kernelrelease}" $sqlite_package_name = 'python-pysqlite2' + $pymysql_package_name = 'python-pymysql' } else { fail("Unsupported osfamily ${::osfamily}") diff --git a/neutron/manifests/plugins/cisco.pp b/neutron/manifests/plugins/cisco.pp index c8d9394e3..07c6a8847 100644 --- a/neutron/manifests/plugins/cisco.pp +++ b/neutron/manifests/plugins/cisco.pp @@ -42,7 +42,7 @@ # # [*nexus_plugin*] # (optional) The nexus plugin to use -# Defaults to undef. This will not set a nexus plugin to use +# Defaults to $::os_service_default. This will not set a nexus plugin to use # Can be set to neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin # # [*vlan_start*] @@ -101,7 +101,7 @@ $keystone_auth_url = 'http://127.0.0.1:35357/v2.0/', $vswitch_plugin = 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', - $nexus_plugin = undef, + $nexus_plugin = $::os_service_default, # Plugin minimum configuration $vlan_start = '100', @@ -162,10 +162,8 @@ } - if $nexus_plugin { - neutron_plugin_cisco { - 'PLUGINS/nexus_plugin' : value => $nexus_plugin; - } + neutron_plugin_cisco { + 'PLUGINS/nexus_plugin' : value => $nexus_plugin; } if $vswitch_plugin { diff --git a/neutron/manifests/plugins/midonet.pp b/neutron/manifests/plugins/midonet.pp index cee2ccddb..fce255820 100644 --- a/neutron/manifests/plugins/midonet.pp +++ b/neutron/manifests/plugins/midonet.pp @@ -17,6 +17,7 @@ # service is desirable and defaulted) # [*keystone_password*] # Password from which midonet api will authenticate against Keystone +# Defaults to $::os_service_default # [*keystone_tenant*] # Tenant from which midonet api will authenticate against Keystone (services # tenant is desirable and defaulted) @@ -69,7 +70,7 @@ $midonet_api_ip = '127.0.0.1', $midonet_api_port = '8080', $keystone_username = 'neutron', - $keystone_password = undef, + $keystone_password = $::os_service_default, $keystone_tenant = 'services', $sync_db = false ) { diff --git a/neutron/manifests/plugins/ml2.pp b/neutron/manifests/plugins/ml2.pp index 2ddff7799..59bca943a 100644 --- a/neutron/manifests/plugins/ml2.pp +++ b/neutron/manifests/plugins/ml2.pp @@ -29,6 +29,11 @@ # local, flat, vlan, gre, vxlan # Defaults to ['local', 'flat', 'vlan', 'gre', 'vxlan']. # +# [*extension_drivers*] +# (optional) Ordered list of extension driver entrypoints to be loaded +# from the neutron.ml2.extension_drivers namespace. +# Defaults to $::os_service_default +# # [*tenant_network_types*] # (optional) Ordered list of network_types to allocate as tenant networks. # The value 'local' is only useful for single-box testing @@ -107,7 +112,7 @@ # [*physical_network_mtus*] # (optional) For L2 mechanism drivers, per-physical network MTU setting. # Should be an array with 'physnetX1:9000'. -# Defaults to undef. +# Defaults to $::os_service_default. # # [*path_mtu*] # (optional) For L3 mechanism drivers, determines the maximum permissible @@ -118,6 +123,7 @@ class neutron::plugins::ml2 ( $type_drivers = ['local', 'flat', 'vlan', 'gre', 'vxlan'], + $extension_drivers = $::os_service_default, $tenant_network_types = ['local', 'flat', 'vlan', 'gre', 'vxlan'], $mechanism_drivers = ['openvswitch', 'linuxbridge'], $flat_networks = '*', @@ -129,7 +135,7 @@ $package_ensure = 'present', $supported_pci_vendor_devs = ['15b3:1004', '8086:10ca'], $sriov_agent_required = false, - $physical_network_mtus = undef, + $physical_network_mtus = $::os_service_default, $path_mtu = 0, ) { @@ -198,10 +204,11 @@ 'ml2/tenant_network_types': value => join(any2array($tenant_network_types), ','); 'ml2/mechanism_drivers': value => join(any2array($mechanism_drivers), ','); 'ml2/path_mtu': value => $path_mtu; + 'ml2/extension_drivers': value => join(any2array($extension_drivers), ','); 'securitygroup/enable_security_group': value => $enable_security_group; } - if empty($physical_network_mtus) { + if is_service_default($physical_network_mtus) { neutron_plugin_ml2 { 'ml2/physical_network_mtus': ensure => absent; } diff --git a/neutron/manifests/plugins/ml2/cisco/nexus1000v.pp b/neutron/manifests/plugins/ml2/cisco/nexus1000v.pp index aeac6c59f..07d5f0f28 100644 --- a/neutron/manifests/plugins/ml2/cisco/nexus1000v.pp +++ b/neutron/manifests/plugins/ml2/cisco/nexus1000v.pp @@ -72,9 +72,9 @@ # enable_vif_type_n1kv = False # class neutron::plugins::ml2::cisco::nexus1000v ( - $n1kv_vsm_ip = undef, - $n1kv_vsm_username = undef, - $n1kv_vsm_password = undef, + $n1kv_vsm_ip = $::os_service_default, + $n1kv_vsm_username = $::os_service_default, + $n1kv_vsm_password = $::os_service_default, $default_policy_profile = 'default-pp', $default_vlan_network_profile = 'default-vlan-np', $default_vxlan_network_profile = 'default-vxlan-np', diff --git a/neutron/manifests/plugins/ml2/cisco/nexus_switch.pp b/neutron/manifests/plugins/ml2/cisco/nexus_switch.pp index e18fe5a3c..450dc3d6a 100644 --- a/neutron/manifests/plugins/ml2/cisco/nexus_switch.pp +++ b/neutron/manifests/plugins/ml2/cisco/nexus_switch.pp @@ -65,7 +65,7 @@ # in the transport network. (NB: If no nve_src_intf is defined then a # default setting of 0 (creates "loopback0") will be used.) # -# Defaults to undef. +# Defaults to $::os_service_default. # # [*physnet*] # (optional) Only valid if VXLAN overlay is configured. @@ -76,7 +76,7 @@ # a physical network are allocated dynamically and are unique per physical # network. These dynamic vlans may be reused across physical networks. # -# Defaults to undef. +# Defaults to $::os_service_default. # define neutron::plugins::ml2::cisco::nexus_switch( $username, @@ -85,8 +85,8 @@ $ssh_port, $servers, $switchname = $title, - $nve_src_intf = undef, - $physnet = undef + $nve_src_intf = $::os_service_default, + $physnet = $::os_service_default ) { $section = "ML2_MECH_CISCO_NEXUS:${ip_address}" neutron_plugin_ml2 { diff --git a/neutron/manifests/plugins/ml2/cisco/ucsm.pp b/neutron/manifests/plugins/ml2/cisco/ucsm.pp index 0a3ed131b..4b2254f7e 100644 --- a/neutron/manifests/plugins/ml2/cisco/ucsm.pp +++ b/neutron/manifests/plugins/ml2/cisco/ucsm.pp @@ -23,7 +23,7 @@ # [*supported_pci_devs*] # (optional) SR-IOV and VM-FEX vendors supported by this plugin # xxxx:yyyy represents vendor_id:product_id -# Defaults to undef +# Defaults to $::os_service_default # Example: # [ '2222:3333', '4444:5555' ] # @@ -33,7 +33,7 @@ $ucsm_username, $ucsm_password, $ucsm_host_list, - $supported_pci_devs = undef, + $supported_pci_devs = $::os_service_default, ) { include ::neutron::plugins::ml2::cisco diff --git a/neutron/manifests/plugins/nuage.pp b/neutron/manifests/plugins/nuage.pp index 7fae1636c..9ed703a1a 100644 --- a/neutron/manifests/plugins/nuage.pp +++ b/neutron/manifests/plugins/nuage.pp @@ -49,19 +49,14 @@ include ::neutron::params + File['/etc/neutron/plugins/nuage/plugin.ini'] -> Neutron_plugin_nuage<||> Neutron_plugin_nuage<||> ~> Service['neutron-server'] + Neutron_plugin_nuage<||> ~> Exec<| title == 'neutron-db-sync' |> file { '/etc/neutron/plugins/nuage': - ensure => 'directory', + ensure => directory, } - ensure_resource('file', $::neutron::params::nuage_config_file, { - ensure => file, - owner => 'root', - group => 'neutron', - mode => '0640'} - ) - if $::osfamily == 'Debian' { file_line { '/etc/default/neutron-server:NEUTRON_PLUGIN_CONFIG': path => '/etc/default/neutron-server', @@ -72,12 +67,22 @@ } if $::osfamily == 'Redhat' { + File['/etc/neutron/plugin.ini'] ~> Exec<| title == 'neutron-db-sync' |> file { '/etc/neutron/plugin.ini': - ensure => link, - target => $::neutron::params::nuage_config_file, + ensure => link, + require => File['/etc/neutron/plugins/nuage/plugin.ini'], + target => $::neutron::params::nuage_config_file, } } + file { '/etc/neutron/plugins/nuage/plugin.ini': + ensure => file, + owner => 'root', + group => 'neutron', + require => File['/etc/neutron/plugins/nuage'], + mode => '0640' + } + $nuage_base_uri_base = '/nuage/api' neutron_plugin_nuage { 'RESTPROXY/default_net_partition_name': value => $nuage_net_partition_name; @@ -94,8 +99,4 @@ ($::neutron::core_plugin != 'nuage') { fail('Nuage plugin should be the core_plugin in neutron.conf') } - - Neutron_plugin_nuage<||> ~> Exec<| title == 'neutron-db-sync' |> - } - diff --git a/neutron/manifests/plugins/nvp.pp b/neutron/manifests/plugins/nvp.pp index 4186427bc..6db315526 100644 --- a/neutron/manifests/plugins/nvp.pp +++ b/neutron/manifests/plugins/nvp.pp @@ -19,7 +19,7 @@ # [*default_l3_gw_service_uuid*] # (Optional) UUID for the default l3 gateway service to use with this cluster. # To be specified if planning to use logical routers with external gateways. -# Defaults to None. +# Defaults to $::os_service_default. # # [*package_ensure*] # (optional) Ensure state for package. @@ -30,7 +30,7 @@ $nvp_controllers, $nvp_user, $nvp_password, - $default_l3_gw_service_uuid = undef, + $default_l3_gw_service_uuid = $::os_service_default, $package_ensure = 'present' ) { @@ -49,17 +49,12 @@ validate_array($nvp_controllers) neutron_plugin_nvp { - 'DEFAULT/default_tz_uuid': value => $default_tz_uuid; - 'DEFAULT/nvp_controllers': value => join($nvp_controllers, ','); - 'DEFAULT/nvp_user': value => $nvp_user; - 'DEFAULT/nvp_password': value => $nvp_password, secret => true; - 'nvp/metadata_mode': value => 'access_network'; - } - - if($default_l3_gw_service_uuid) { - neutron_plugin_nvp { - 'DEFAULT/default_l3_gw_service_uuid': value => $default_l3_gw_service_uuid; - } + 'DEFAULT/default_tz_uuid': value => $default_tz_uuid; + 'DEFAULT/nvp_controllers': value => join($nvp_controllers, ','); + 'DEFAULT/nvp_user': value => $nvp_user; + 'DEFAULT/nvp_password': value => $nvp_password, secret => true; + 'DEFAULT/default_l3_gw_service_uuid': value => $default_l3_gw_service_uuid; + 'nvp/metadata_mode': value => 'access_network'; } if $::neutron::core_plugin != 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2' { diff --git a/neutron/manifests/plugins/opencontrail.pp b/neutron/manifests/plugins/opencontrail.pp index e71ca9e14..47d949032 100644 --- a/neutron/manifests/plugins/opencontrail.pp +++ b/neutron/manifests/plugins/opencontrail.pp @@ -4,19 +4,19 @@ # # [*api_server_ip*] # IP address of the API Server -# Defaults to undef +# Defaults to $::os_service_default # # [*api_server_port*] # Port of the API Server. -# Defaults to undef +# Defaults to $::os_service_default # # [*multi_tenancy*] # Whether to enable multi-tenancy -# Default to undef +# Default to $::os_service_default # # [*contrail_extensions*] # Array of OpenContrail extensions to be supported -# Defaults to undef +# Defaults to $::os_service_default # Example: # # class {'neutron::plugins::opencontrail' : @@ -25,38 +25,38 @@ # # [*keystone_auth_url*] # Url of the keystone auth server -# Defaults to undef +# Defaults to $::os_service_default # # [*keystone_admin_user*] # Admin user name -# Defaults to undef +# Defaults to $::os_service_default # # [*keystone_admin_tenant_name*] # Admin_tenant_name -# Defaults to undef +# Defaults to $::os_service_default # # [*keystone_admin_password*] # Admin password -# Defaults to undef +# Defaults to $::os_service_default # # [*keystone_admin_token*] # Admin token -# Defaults to undef +# Defaults to $::os_service_default # # [*package_ensure*] # (optional) Ensure state for package. # Defaults to 'present'. # class neutron::plugins::opencontrail ( - $api_server_ip = undef, - $api_server_port = undef, - $multi_tenancy = undef, - $contrail_extensions = undef, - $keystone_auth_url = undef, - $keystone_admin_user = undef, - $keystone_admin_tenant_name = undef, - $keystone_admin_password = undef, - $keystone_admin_token = undef, + $api_server_ip = $::os_service_default, + $api_server_port = $::os_service_default, + $multi_tenancy = $::os_service_default, + $contrail_extensions = $::os_service_default, + $keystone_auth_url = $::os_service_default, + $keystone_admin_user = $::os_service_default, + $keystone_admin_tenant_name = $::os_service_default, + $keystone_admin_password = $::os_service_default, + $keystone_admin_token = $::os_service_default, $package_ensure = 'present', ) { diff --git a/neutron/manifests/plugins/plumgrid.pp b/neutron/manifests/plugins/plumgrid.pp index 07376ce8c..043a07221 100644 --- a/neutron/manifests/plugins/plumgrid.pp +++ b/neutron/manifests/plugins/plumgrid.pp @@ -12,9 +12,11 @@ # # [*username*] # PLUMgrid platform username +# Defaults to $::os_service_default # # [*password*] # PLUMgrid platform password +# Defaults to $::os_service_default # # [*servertimeout*] # Request timeout duration (seconds) to PLUMgrid paltform @@ -26,6 +28,7 @@ # # [*admin_password*] # Keystone admin password +# Defaults to $::os_service_default # # [*controller_priv_host*] # Controller private host IP @@ -45,6 +48,7 @@ # # [*metadata_proxy_shared_secret*] # Neutron metadata shared secret key +# Defaults to $::os_service_default # # [*package_ensure*] # (optional) Ensure state for package. @@ -53,16 +57,16 @@ class neutron::plugins::plumgrid ( $director_server = '127.0.0.1', $director_server_port = '443', - $username = undef, - $password = undef, + $username = $::os_service_default, + $password = $::os_service_default, $servertimeout = '99', $connection = 'http://127.0.0.1:35357/v2.0', - $admin_password = undef, + $admin_password = $::os_service_default, $controller_priv_host = '127.0.0.1', $auth_protocol = 'http', $nova_metadata_ip = '127.0.0.1', $nova_metadata_port = '8775', - $metadata_proxy_shared_secret = undef, + $metadata_proxy_shared_secret = $::os_service_default, $package_ensure = 'present' ) { diff --git a/neutron/manifests/quota.pp b/neutron/manifests/quota.pp index 531214fa9..d46868f65 100644 --- a/neutron/manifests/quota.pp +++ b/neutron/manifests/quota.pp @@ -6,49 +6,49 @@ # # [*default_quota*] # (optional) Default number of resources allowed per tenant, -# minus for unlimited. Defaults to -1. +# minus for unlimited. Defaults to $::os_service_default. # # [*quota_network*] # (optional) Number of networks allowed per tenant, and minus means unlimited. -# Defaults to 10. +# Defaults to $::os_service_default. # # [*quota_subnet*] # (optional) Number of subnets allowed per tenant, and minus means unlimited. -# Defaults to 10. +# Defaults to $::os_service_default. # # [*quota_port*] # (optional) Number of ports allowed per tenant, and minus means unlimited. -# Defaults to 50. +# Defaults to $::os_service_default. # # [*quota_router*] # (optional) Number of routers allowed per tenant, and minus means unlimited. -# Requires L3 extension. Defaults to 10. +# Requires L3 extension. Defaults to $::os_service_default. # # [*quota_floatingip*] # (optional) Number of floating IPs allowed per tenant, -# and minus means unlimited. Requires L3 extension. Defaults to 50. +# and minus means unlimited. Requires L3 extension. Defaults to $::os_service_default. # # [*quota_security_group*] # (optional) Number of security groups allowed per tenant, # and minus means unlimited. Requires securitygroup extension. -# Defaults to 10. +# Defaults to $::os_service_default. # # [*quota_security_group_rule*] # (optional) Number of security rules allowed per tenant, # and minus means unlimited. Requires securitygroup extension. -# Defaults to 100. +# Defaults to $::os_service_default. # # [*quota_driver*] # (optional) Default driver to use for quota checks. -# Defaults to 'neutron.db.quota_db.DbQuotaDriver'. +# Defaults to $::os_service_default. # # [*quota_firewall*] # (optional) Number of firewalls allowed per tenant, -1 for unlimited. -# Defaults to '1'. +# Defaults to $::os_service_default. # # [*quota_firewall_policy*] # (optional) Number of firewalls policies allowed per tenant, -1 for unlimited. -# Defaults to '1'. +# Defaults to $::os_service_default. # # [*quota_firewall_rule*] # (optional) Number of firewalls rules allowed per tenant, -1 for unlimited. @@ -57,16 +57,16 @@ # [*quota_health_monitor*] # (optional) Number of health monitors allowed per tenant. # A negative value means unlimited. -# Defaults to '-1'. +# Defaults to $::os_service_default. # # [*quota_items*] # (optional) Resource name(s) that are supported in quota features. -# Defaults to 'network,subnet,port'. +# Defaults to $::os_service_default. # # [*quota_member*] # (optional) Number of pool members allowed per tenant. # A negative value means unlimited -# Defaults to '-1'. +# Defaults to $::os_service_default. # # [*quota_network_gateway*] # (optional) Number of network gateways allowed per tenant, -1 for unlimited. @@ -79,35 +79,35 @@ # [*quota_pool*] # (optional) Number of pools allowed per tenant. # A negative value means unlimited. -# Defaults to '10'. +# Defaults to $::os_service_default. # # [*quota_vip*] # (optional) Number of vips allowed per tenant. # A negative value means unlimited. -# Defaults to '10'. +# Defaults to $::os_service_default. # class neutron::quota ( - $default_quota = -1, - $quota_network = 10, - $quota_subnet = 10, - $quota_port = 50, + $default_quota = $::os_service_default, + $quota_network = $::os_service_default, + $quota_subnet = $::os_service_default, + $quota_port = $::os_service_default, # l3 extension - $quota_router = 10, - $quota_floatingip = 50, + $quota_router = $::os_service_default, + $quota_floatingip = $::os_service_default, # securitygroup extension - $quota_security_group = 10, - $quota_security_group_rule = 100, - $quota_driver = 'neutron.db.quota_db.DbQuotaDriver', - $quota_firewall = 1, - $quota_firewall_policy = 1, + $quota_security_group = $::os_service_default, + $quota_security_group_rule = $::os_service_default, + $quota_driver = $::os_service_default, + $quota_firewall = $::os_service_default, + $quota_firewall_policy = $::os_service_default, $quota_firewall_rule = -1, - $quota_health_monitor = -1, - $quota_items = 'network,subnet,port', - $quota_member = -1, + $quota_health_monitor = $::os_service_default, + $quota_items = $::os_service_default, + $quota_member = $::os_service_default, $quota_network_gateway = 5, $quota_packet_filter = 100, - $quota_pool = 10, - $quota_vip = 10 + $quota_pool = $::os_service_default, + $quota_vip = $::os_service_default ) { neutron_config { diff --git a/neutron/manifests/server.pp b/neutron/manifests/server.pp index ad13f8e4d..7ec1bd820 100644 --- a/neutron/manifests/server.pp +++ b/neutron/manifests/server.pp @@ -55,7 +55,7 @@ # [*auth_region*] # (optional) The authentication region. Note this value is case-sensitive and # must match the endpoint region defined in Keystone. -# Defaults to undef +# Defaults to $::os_service_default # # [*auth_tenant*] # (optional) The tenant of the auth user @@ -151,7 +151,7 @@ # report_interval, to be sure the agent is down for good. # agent_down_time is a config for neutron-server, set by class neutron::server # report_interval is a config for neutron agents, set by class neutron -# Defaults to: 75 +# Defaults to: $::os_service_default # # [*state_path*] # (optional) Deprecated. Use state_path parameter on base neutron class instead. @@ -169,12 +169,12 @@ # (optional) Setting the "router_distributed" flag to "True" will default to the creation # of distributed tenant routers. # Also can be the type of the router on the create request (admin-only attribute). -# Defaults to false +# Defaults to $::os_service_default # # [*allow_automatic_l3agent_failover*] # (optional) Allow automatic rescheduling of routers from dead L3 agents with # admin_state_up set to True to alive agents. -# Defaults to false +# Defaults to $::os_service_default # # [*l3_ha*] # (optional) Enable high availability for virtual routers. @@ -190,19 +190,23 @@ # # [*l3_ha_net_cidr*] # (optional) CIDR of the administrative network if HA mode is enabled. -# Defaults to '169.254.192.0/18' +# Defaults to $::os_service_default # # [*report_interval*] # (optional) Deprecated, does nothing. # Defaults to 'undef'. # +# [*qos_notification_drivers*] +# (optional) Drivers list to use to send the update notification +# Defaults to $::os_service_default. +# class neutron::server ( $package_ensure = 'present', $enabled = true, $manage_service = true, $service_name = $::neutron::params::server_service, $auth_password = false, - $auth_region = undef, + $auth_region = $::os_service_default, $auth_type = 'keystone', $auth_tenant = 'services', $auth_user = 'neutron', @@ -218,14 +222,15 @@ $sync_db = false, $api_workers = $::processorcount, $rpc_workers = $::processorcount, - $agent_down_time = '75', + $agent_down_time = $::os_service_default, $router_scheduler_driver = 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler', - $router_distributed = false, - $allow_automatic_l3agent_failover = false, + $router_distributed = $::os_service_default, + $allow_automatic_l3agent_failover = $::os_service_default, $l3_ha = false, $max_l3_agents_per_router = 3, $min_l3_agents_per_router = 2, - $l3_ha_net_cidr = '169.254.192.0/18', + $l3_ha_net_cidr = $::os_service_default, + $qos_notification_drivers = $::os_service_default, # DEPRECATED PARAMETERS $auth_host = 'localhost', $auth_port = '35357', @@ -301,6 +306,8 @@ } } + neutron_config { 'qos/notification_drivers': value => join(any2array($qos_notification_drivers), ',') } + if ($::neutron::params::server_package) { Package['neutron-server'] -> Neutron_api_config<||> Package['neutron-server'] -> Neutron_config<||> @@ -435,10 +442,8 @@ 'filter:authtoken/auth_uri': value => $auth_uri_real; } - if $auth_region { - neutron_config { - 'keystone_authtoken/auth_region': value => $auth_region; - } + neutron_config { + 'keystone_authtoken/auth_region': value => $auth_region; } if $identity_uri { diff --git a/neutron/manifests/server/notifications.pp b/neutron/manifests/server/notifications.pp index cb90a4dbe..e26d9703d 100644 --- a/neutron/manifests/server/notifications.pp +++ b/neutron/manifests/server/notifications.pp @@ -28,7 +28,7 @@ # [*send_events_interval*] # (optional) Number of seconds between sending events to nova if there are # any events to send. -# Defaults to '2' +# Defaults to $::os_service_default # # [*nova_url*] # (optional) URL for connection to nova (Only supports one nova region @@ -76,7 +76,7 @@ # [*region_name*] # (optional) Name of nova region to use. Useful if keystone manages more than # one region. -# Defaults to undef +# Defaults to $::os_service_default # # === Deprecated Parameters # @@ -109,31 +109,31 @@ # Deprecated. region_name parameter should be used instead # Name of nova region to use. Useful if keystone manages more than # one region. -# Defaults to undef +# Defaults to $::os_service_default # class neutron::server::notifications ( $notify_nova_on_port_status_changes = true, $notify_nova_on_port_data_changes = true, - $send_events_interval = '2', + $send_events_interval = $::os_service_default, $nova_url = 'http://127.0.0.1:8774/v2', $auth_plugin = 'password', $username = 'nova', $password = false, - $tenant_id = undef, + $tenant_id = $::os_service_default, $tenant_name = 'services', $project_domain_id = 'default', $project_name = 'services', $user_domain_id = 'default', $auth_url = 'http://127.0.0.1:35357', - $region_name = undef, + $region_name = $::os_service_default, # DEPRECATED PARAMETERS $nova_admin_auth_url = 'http://127.0.0.1:35357/v2.0', $nova_admin_username = 'nova', $nova_admin_tenant_name = 'services', - $nova_admin_tenant_id = undef, + $nova_admin_tenant_id = $::os_service_default, $nova_admin_password = false, - $nova_region_name = undef, + $nova_region_name = $::os_service_default, ) { # Depend on the specified keystone_user resource, if it exists. @@ -143,25 +143,28 @@ fail('nova_admin_password or password must be set.') } - if $nova_admin_password and !($nova_admin_tenant_id or $nova_admin_tenant_name) { + if $nova_admin_password and is_service_default($nova_admin_tenant_id) and (! $nova_admin_tenant_name) { fail('You must provide either nova_admin_tenant_name or nova_admin_tenant_id.') } - if $password and !($tenant_id or $tenant_name) { + if $password and is_service_default($tenant_id) and (! $tenant_name) { fail('You must provide either tenant_name or tenant_id.') } if $nova_admin_password { - warning('nova_admin-* parameters are deprecated and will be removed in a future release') + warning('nova_admin-* and nova_region_name parameters are deprecated and will be removed in a future release') neutron_config { 'DEFAULT/nova_admin_auth_url': value => $nova_admin_auth_url; 'DEFAULT/nova_admin_username': value => $nova_admin_username; 'DEFAULT/nova_admin_password': value => $nova_admin_password, secret => true; + 'DEFAULT/nova_region_name': value => $nova_region_name; } - if $nova_admin_tenant_id { - neutron_config { - 'DEFAULT/nova_admin_tenant_id': value => $nova_admin_tenant_id; + if ! is_service_default ($nova_admin_tenant_id) { + if $nova_admin_tenant_id { + neutron_config { + 'DEFAULT/nova_admin_tenant_id': value => $nova_admin_tenant_id; + } } } else { neutron_config { @@ -179,11 +182,17 @@ 'nova/project_domain_id': value => $project_domain_id; 'nova/project_name': value => $project_name; 'nova/user_domain_id': value => $user_domain_id; - 'nova/tenant_name': value => $tenant_name; + 'nova/region_name': value => $region_name; } - if $tenant_id { + if ! is_service_default ($tenant_id) { + if $tenant_id { + neutron_config { + 'nova/tenant_id': value => $tenant_id; + } + } + } else { neutron_config { - 'nova/tenant_id': value => $tenant_id; + 'nova/tenant_name': value => $tenant_name; } } } @@ -194,26 +203,4 @@ 'DEFAULT/send_events_interval': value => $send_events_interval; 'DEFAULT/nova_url': value => $nova_url; } - - if $nova_region_name { - warning('nova_region_name parameter is deprecated and will be removed in a future release') - neutron_config { - 'DEFAULT/nova_region_name': value => $nova_region_name; - } - } else { - neutron_config { - 'DEFAULT/nova_region_name': ensure => absent; - } - } - - if $region_name { - neutron_config { - 'nova/region_name': value => $region_name; - } - } else { - neutron_config { - 'nova/region_name': ensure => absent; - } - } - } diff --git a/neutron/manifests/services/fwaas.pp b/neutron/manifests/services/fwaas.pp index acd933b8c..c87b9c1f7 100644 --- a/neutron/manifests/services/fwaas.pp +++ b/neutron/manifests/services/fwaas.pp @@ -23,12 +23,12 @@ # === Parameters: # # [*enabled*] -# (required) Whether or not to enable the FWaaS neutron plugin Service -# true/false +# (optional) Whether or not to enable the FWaaS neutron plugin Service +# Defaults to $::os_service_default # # [*driver*] # (optional) FWaaS Driver to use -# Defaults to 'neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver' +# Defaults to $::os_service_default # # [*vpnaas_agent_package*] # (optional) Use VPNaaS agent package instead of L3 agent package on debian platforms @@ -38,13 +38,16 @@ # class neutron::services::fwaas ( - $enabled = true, - $driver = 'neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver', + $enabled = $::os_service_default, + $driver = $::os_service_default, $vpnaas_agent_package = false ) { include ::neutron::params + # FWaaS needs to be enabled before starting Neutron L3 agent + Neutron_fwaas_service_config<||> ~> Service['neutron-l3'] + if ($::osfamily == 'Debian') { # Debian platforms if $vpnaas_agent_package { diff --git a/neutron/metadata.json b/neutron/metadata.json index 55bf1b074..ffac354ad 100644 --- a/neutron/metadata.json +++ b/neutron/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-neutron", - "version": "6.1.0", + "version": "7.0.0", "author": "OpenStack Contributors", "summary": "Puppet module for OpenStack Neutron", "license": "Apache-2.0", @@ -32,11 +32,11 @@ "description": "Installs and configures OpenStack Neutron (Networking).", "dependencies": [ { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, - { "name": "openstack/nova", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=7.0.0 <8.0.0" }, + { "name": "openstack/nova", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "openstack/vswitch", "version_requirement": ">=2.0.0 <3.0.0" }, + { "name": "openstack/vswitch", "version_requirement": ">=3.0.0 <4.0.0" }, { "name": "duritong/sysctl", "version_requirement": ">=0.0.1 <1.0.0" }, - { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=7.0.0 <8.0.0" } ] } diff --git a/neutron/spec/acceptance/basic_neutron_spec.rb b/neutron/spec/acceptance/basic_neutron_spec.rb index cdec97e72..2b17e4cd5 100644 --- a/neutron/spec/acceptance/basic_neutron_spec.rb +++ b/neutron/spec/acceptance/basic_neutron_spec.rb @@ -49,7 +49,7 @@ class { '::neutron::keystone::auth': password => 'a_big_secret', } class { '::neutron::server': - database_connection => 'mysql://neutron:a_big_secret@127.0.0.1/neutron?charset=utf8', + database_connection => 'mysql+pymysql://neutron:a_big_secret@127.0.0.1/neutron?charset=utf8', auth_password => 'a_big_secret', identity_uri => 'http://127.0.0.1:35357/', sync_db => true, diff --git a/neutron/spec/classes/neutron_agents_dhcp_spec.rb b/neutron/spec/classes/neutron_agents_dhcp_spec.rb index 3c38c4615..d16ca803f 100644 --- a/neutron/spec/classes/neutron_agents_dhcp_spec.rb +++ b/neutron/spec/classes/neutron_agents_dhcp_spec.rb @@ -11,24 +11,19 @@ end let :default_params do - { :package_ensure => 'present', - :enabled => true, - :debug => false, - :state_path => '/var/lib/neutron', - :resync_interval => 30, - :interface_driver => 'neutron.agent.linux.interface.OVSInterfaceDriver', - :dhcp_domain => 'openstacklocal', - :dhcp_driver => 'neutron.agent.linux.dhcp.Dnsmasq', - :root_helper => 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf', - :use_namespaces => nil, - :dnsmasq_config_file => nil, - :dhcp_delete_namespaces => true, + { :package_ensure => 'present', + :enabled => true, + :state_path => '/var/lib/neutron', + :resync_interval => 30, + :interface_driver => 'neutron.agent.linux.interface.OVSInterfaceDriver', + :dhcp_driver => 'neutron.agent.linux.dhcp.Dnsmasq', + :root_helper => 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf', :enable_isolated_metadata => false, :enable_metadata_network => false, - :dhcp_broadcast_reply => false } + :dhcp_delete_namespaces => true } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -44,17 +39,17 @@ it_configures 'dnsmasq dhcp_driver' it 'configures dhcp_agent.ini' do - is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/debug').with_value(p[:debug]); + is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/debug').with_value(''); is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/state_path').with_value(p[:state_path]); is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/resync_interval').with_value(p[:resync_interval]); is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/interface_driver').with_value(p[:interface_driver]); - is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/dhcp_domain').with_value(p[:dhcp_domain]); + is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/dhcp_domain').with_value(''); is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/dhcp_driver').with_value(p[:dhcp_driver]); is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/root_helper').with_value(p[:root_helper]); is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/dhcp_delete_namespaces').with_value(p[:dhcp_delete_namespaces]); is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/enable_isolated_metadata').with_value(p[:enable_isolated_metadata]); is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/enable_metadata_network').with_value(p[:enable_metadata_network]); - is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/dhcp_broadcast_reply').with_value(p[:dhcp_broadcast_reply]); + is_expected.to contain_neutron_dhcp_agent_config('DEFAULT/dhcp_broadcast_reply').with_value(''); end it 'installs neutron dhcp agent package' do @@ -161,7 +156,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :platform_params do @@ -180,7 +177,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_agents_l3_spec.rb b/neutron/spec/classes/neutron_agents_l3_spec.rb index d939deec0..741284734 100644 --- a/neutron/spec/classes/neutron_agents_l3_spec.rb +++ b/neutron/spec/classes/neutron_agents_l3_spec.rb @@ -11,26 +11,15 @@ :enabled => true, :debug => false, :external_network_bridge => 'br-ex', - :use_namespaces => nil, :interface_driver => 'neutron.agent.linux.interface.OVSInterfaceDriver', - :router_id => nil, - :gateway_external_network_id => nil, - :handle_internal_only_routers => true, - :metadata_port => '9697', - :send_arp_for_ha => '3', - :periodic_interval => '40', - :periodic_fuzzy_delay => '5', - :enable_metadata_proxy => true, - :network_device_mtu => nil, :router_delete_namespaces => true, :ha_enabled => false, :ha_vrrp_auth_type => 'PASS', - :ha_vrrp_auth_password => nil, :ha_vrrp_advert_int => '3', :agent_mode => 'legacy' } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -51,15 +40,15 @@ is_expected.to contain_neutron_l3_agent_config('DEFAULT/debug').with_value(p[:debug]) is_expected.to contain_neutron_l3_agent_config('DEFAULT/external_network_bridge').with_value(p[:external_network_bridge]) is_expected.to contain_neutron_l3_agent_config('DEFAULT/interface_driver').with_value(p[:interface_driver]) - is_expected.to contain_neutron_l3_agent_config('DEFAULT/router_id').with_value(p[:router_id]) - is_expected.to contain_neutron_l3_agent_config('DEFAULT/gateway_external_network_id').with_value(p[:gateway_external_network_id]) - is_expected.to contain_neutron_l3_agent_config('DEFAULT/handle_internal_only_routers').with_value(p[:handle_internal_only_routers]) - is_expected.to contain_neutron_l3_agent_config('DEFAULT/metadata_port').with_value(p[:metadata_port]) - is_expected.to contain_neutron_l3_agent_config('DEFAULT/send_arp_for_ha').with_value(p[:send_arp_for_ha]) - is_expected.to contain_neutron_l3_agent_config('DEFAULT/periodic_interval').with_value(p[:periodic_interval]) - is_expected.to contain_neutron_l3_agent_config('DEFAULT/periodic_fuzzy_delay').with_value(p[:periodic_fuzzy_delay]) - is_expected.to contain_neutron_l3_agent_config('DEFAULT/enable_metadata_proxy').with_value(p[:enable_metadata_proxy]) - is_expected.to contain_neutron_l3_agent_config('DEFAULT/network_device_mtu').with_ensure('absent') + is_expected.to contain_neutron_l3_agent_config('DEFAULT/router_id').with_value('') + is_expected.to contain_neutron_l3_agent_config('DEFAULT/gateway_external_network_id').with_value('') + is_expected.to contain_neutron_l3_agent_config('DEFAULT/handle_internal_only_routers').with_value('') + is_expected.to contain_neutron_l3_agent_config('DEFAULT/metadata_port').with_value('') + is_expected.to contain_neutron_l3_agent_config('DEFAULT/send_arp_for_ha').with_value('') + is_expected.to contain_neutron_l3_agent_config('DEFAULT/periodic_interval').with_value('') + is_expected.to contain_neutron_l3_agent_config('DEFAULT/periodic_fuzzy_delay').with_value('') + is_expected.to contain_neutron_l3_agent_config('DEFAULT/enable_metadata_proxy').with_value('') + is_expected.to contain_neutron_l3_agent_config('DEFAULT/network_device_mtu').with_value('') is_expected.to contain_neutron_l3_agent_config('DEFAULT/router_delete_namespaces').with_value(p[:router_delete_namespaces]) end @@ -141,7 +130,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :platform_params do @@ -158,7 +149,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_agents_lbaas_spec.rb b/neutron/spec/classes/neutron_agents_lbaas_spec.rb index e7ec0c630..bfc4cc8be 100644 --- a/neutron/spec/classes/neutron_agents_lbaas_spec.rb +++ b/neutron/spec/classes/neutron_agents_lbaas_spec.rb @@ -13,7 +13,6 @@ let :default_params do { :package_ensure => 'present', :enabled => true, - :debug => false, :interface_driver => 'neutron.agent.linux.interface.OVSInterfaceDriver', :device_driver => 'neutron_lbaas.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver', :use_namespaces => nil, @@ -21,7 +20,7 @@ } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -39,7 +38,7 @@ it_configures 'haproxy lbaas_driver without package' it 'configures lbaas_agent.ini' do - is_expected.to contain_neutron_lbaas_agent_config('DEFAULT/debug').with_value(p[:debug]); + is_expected.to contain_neutron_lbaas_agent_config('DEFAULT/debug').with_value(''); is_expected.to contain_neutron_lbaas_agent_config('DEFAULT/interface_driver').with_value(p[:interface_driver]); is_expected.to contain_neutron_lbaas_agent_config('DEFAULT/device_driver').with_value(p[:device_driver]); is_expected.to contain_neutron_lbaas_agent_config('haproxy/user_group').with_value(platform_params[:nobody_user_group]); @@ -114,11 +113,11 @@ class { 'neutron': rabbit_password => 'passw0rd' }" context 'on Debian platforms' do let :facts do - default_facts.merge( + @default_facts.merge(test_facts.merge( { :osfamily => 'Debian', :concat_basedir => '/dne' } - ) + )) end let :platform_params do @@ -133,11 +132,12 @@ class { 'neutron': rabbit_password => 'passw0rd' }" context 'on RedHat platforms' do let :facts do - default_facts.merge( - { :osfamily => 'RedHat', - :concat_basedir => '/dne' - } - ) + @default_facts.merge(test_facts.merge( + { :osfamily => 'RedHat', + :operatingsystemrelease => '7', + :concat_basedir => '/dne' + } + )) end let :platform_params do diff --git a/neutron/spec/classes/neutron_agents_metadata_spec.rb b/neutron/spec/classes/neutron_agents_metadata_spec.rb index 9ddb044ed..4cbea6957 100644 --- a/neutron/spec/classes/neutron_agents_metadata_spec.rb +++ b/neutron/spec/classes/neutron_agents_metadata_spec.rb @@ -16,15 +16,11 @@ :auth_tenant => 'services', :auth_user => 'neutron', :auth_password => 'password', - :metadata_ip => '127.0.0.1', - :metadata_port => '8775', - :metadata_protocol => 'http', - :metadata_backlog => '4096', - :shared_secret => 'metadata-secret' + :shared_secret => 'metadata-secret', } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default', :processorcount => '2' @@ -59,19 +55,19 @@ is_expected.to contain_neutron_metadata_agent_config('DEFAULT/debug').with(:value => params[:debug]) is_expected.to contain_neutron_metadata_agent_config('DEFAULT/auth_url').with(:value => params[:auth_url]) is_expected.to contain_neutron_metadata_agent_config('DEFAULT/auth_insecure').with(:value => params[:auth_insecure]) - is_expected.to contain_neutron_metadata_agent_config('DEFAULT/auth_ca_cert').with_ensure('absent') + is_expected.to contain_neutron_metadata_agent_config('DEFAULT/auth_ca_cert').with(:value => '') is_expected.to contain_neutron_metadata_agent_config('DEFAULT/auth_region').with(:value => params[:auth_region]) is_expected.to contain_neutron_metadata_agent_config('DEFAULT/admin_tenant_name').with(:value => params[:auth_tenant]) is_expected.to contain_neutron_metadata_agent_config('DEFAULT/admin_user').with(:value => params[:auth_user]) is_expected.to contain_neutron_metadata_agent_config('DEFAULT/admin_password').with(:value => params[:auth_password]) is_expected.to contain_neutron_metadata_agent_config('DEFAULT/admin_password').with_secret( true ) - is_expected.to contain_neutron_metadata_agent_config('DEFAULT/nova_metadata_ip').with(:value => params[:metadata_ip]) - is_expected.to contain_neutron_metadata_agent_config('DEFAULT/nova_metadata_port').with(:value => params[:metadata_port]) - is_expected.to contain_neutron_metadata_agent_config('DEFAULT/nova_metadata_protocol').with(:value => params[:metadata_protocol]) + is_expected.to contain_neutron_metadata_agent_config('DEFAULT/nova_metadata_ip').with(:value => '') + is_expected.to contain_neutron_metadata_agent_config('DEFAULT/nova_metadata_port').with(:value => '') + is_expected.to contain_neutron_metadata_agent_config('DEFAULT/nova_metadata_protocol').with(:value => '') is_expected.to contain_neutron_metadata_agent_config('DEFAULT/metadata_workers').with(:value => facts[:processorcount]) - is_expected.to contain_neutron_metadata_agent_config('DEFAULT/metadata_backlog').with(:value => params[:metadata_backlog]) + is_expected.to contain_neutron_metadata_agent_config('DEFAULT/metadata_backlog').with(:value => '') is_expected.to contain_neutron_metadata_agent_config('DEFAULT/metadata_proxy_shared_secret').with(:value => params[:shared_secret]) - is_expected.to contain_neutron_metadata_agent_config('DEFAULT/cache_url').with(:value => 'memory://?default_ttl=5') + is_expected.to contain_neutron_metadata_agent_config('DEFAULT/cache_url').with(:ensure => 'absent') end end @@ -92,9 +88,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge( + @default_facts.merge(test_facts.merge( { :osfamily => 'Debian' } - ) + )) end let :platform_params do @@ -119,9 +115,10 @@ context 'on Red Hat platforms' do let :facts do - default_facts.merge( - { :osfamily => 'RedHat' } - ) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_agents_metering_spec.rb b/neutron/spec/classes/neutron_agents_metering_spec.rb index da2fbace7..0bd0a8fa9 100644 --- a/neutron/spec/classes/neutron_agents_metering_spec.rb +++ b/neutron/spec/classes/neutron_agents_metering_spec.rb @@ -39,12 +39,10 @@ :interface_driver => 'neutron.agent.linux.interface.OVSInterfaceDriver', :driver => 'neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver', :use_namespaces => nil, - :measure_interval => '30', - :report_interval => '300' } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -61,8 +59,8 @@ is_expected.to contain_neutron_metering_agent_config('DEFAULT/debug').with_value(p[:debug]); is_expected.to contain_neutron_metering_agent_config('DEFAULT/interface_driver').with_value(p[:interface_driver]); is_expected.to contain_neutron_metering_agent_config('DEFAULT/driver').with_value(p[:driver]); - is_expected.to contain_neutron_metering_agent_config('DEFAULT/measure_interval').with_value(p[:measure_interval]); - is_expected.to contain_neutron_metering_agent_config('DEFAULT/report_interval').with_value(p[:report_interval]); + is_expected.to contain_neutron_metering_agent_config('DEFAULT/measure_interval').with_value(''); + is_expected.to contain_neutron_metering_agent_config('DEFAULT/report_interval').with_value(''); end it 'installs neutron metering agent package' do @@ -117,7 +115,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :platform_params do @@ -133,7 +133,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_agents_ml2_linuxbridge_spec.rb b/neutron/spec/classes/neutron_agents_ml2_linuxbridge_spec.rb index 6f36adfa5..71e80c9d6 100644 --- a/neutron/spec/classes/neutron_agents_ml2_linuxbridge_spec.rb +++ b/neutron/spec/classes/neutron_agents_ml2_linuxbridge_spec.rb @@ -7,21 +7,16 @@ end let :default_params do - { :package_ensure => 'present', - :enabled => true, - :manage_service => true, - :tunnel_types => [], - :local_ip => false, - :vxlan_group => '224.0.0.1', - :vxlan_ttl => false, - :vxlan_tos => false, - :polling_interval => 2, - :l2_population => false, + { :package_ensure => 'present', + :enabled => true, + :manage_service => true, + :tunnel_types => [], + :local_ip => false, :physical_interface_mappings => [], - :firewall_driver => 'neutron.agent.linux.iptables_firewall.IptablesFirewallDriver' } + :firewall_driver => 'neutron.agent.linux.iptables_firewall.IptablesFirewallDriver' } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -37,7 +32,7 @@ it { is_expected.to contain_class('neutron::params') } it 'configures ml2_conf.ini' do - is_expected.to contain_neutron_agent_linuxbridge('agent/polling_interval').with_value(default_params[:polling_interval]) + is_expected.to contain_neutron_agent_linuxbridge('agent/polling_interval').with_value('') is_expected.to contain_neutron_agent_linuxbridge('linux_bridge/physical_interface_mappings').with_value(default_params[:physical_interface_mappings].join(',')) is_expected.to contain_neutron_agent_linuxbridge('securitygroup/firewall_driver').with_value(default_params[:firewall_driver]) end @@ -80,8 +75,8 @@ it 'does not configre VXLAN tunneling' do is_expected.to contain_neutron_agent_linuxbridge('vxlan/enable_vxlan').with_value(false) is_expected.to contain_neutron_agent_linuxbridge('vxlan/local_ip').with_ensure('absent') - is_expected.to contain_neutron_agent_linuxbridge('vxlan/vxlan_group').with_ensure('absent') - is_expected.to contain_neutron_agent_linuxbridge('vxlan/l2_population').with_ensure('absent') + is_expected.not_to contain_neutron_agent_linuxbridge('vxlan/vxlan_group') + is_expected.not_to contain_neutron_agent_linuxbridge('vxlan/l2_population') end end @@ -97,10 +92,10 @@ it 'configures ml2_conf.ini' do is_expected.to contain_neutron_agent_linuxbridge('vxlan/enable_vxlan').with_value(true) is_expected.to contain_neutron_agent_linuxbridge('vxlan/local_ip').with_value(params[:local_ip]) - is_expected.to contain_neutron_agent_linuxbridge('vxlan/vxlan_group').with_value(default_params[:vxlan_group]) - is_expected.to contain_neutron_agent_linuxbridge('vxlan/vxlan_ttl').with_ensure('absent') - is_expected.to contain_neutron_agent_linuxbridge('vxlan/vxlan_tos').with_ensure('absent') - is_expected.to contain_neutron_agent_linuxbridge('vxlan/l2_population').with_value(default_params[:l2_population]) + is_expected.to contain_neutron_agent_linuxbridge('vxlan/vxlan_group').with_value('') + is_expected.to contain_neutron_agent_linuxbridge('vxlan/vxlan_ttl').with_value('') + is_expected.to contain_neutron_agent_linuxbridge('vxlan/vxlan_tos').with_value('') + is_expected.to contain_neutron_agent_linuxbridge('vxlan/l2_population').with_value('') end end @@ -149,7 +144,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :platform_params do @@ -162,7 +159,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_agents_ml2_ovs_spec.rb b/neutron/spec/classes/neutron_agents_ml2_ovs_spec.rb index c1591eaed..64c1c0d49 100644 --- a/neutron/spec/classes/neutron_agents_ml2_ovs_spec.rb +++ b/neutron/spec/classes/neutron_agents_ml2_ovs_spec.rb @@ -15,17 +15,12 @@ :enable_tunneling => false, :local_ip => false, :tunnel_bridge => 'br-tun', - :polling_interval => 2, - :l2_population => false, - :arp_responder => false, :drop_flows_on_start => false, - :enable_distributed_routing => false, :firewall_driver => 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver', - :manage_vswitch => true, - :prevent_arp_spoofing => true } + :manage_vswitch => true } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -43,11 +38,12 @@ it { is_expected.to contain_class('neutron::params') } it 'configures plugins/ml2/openvswitch_agent.ini' do - is_expected.to contain_neutron_agent_ovs('agent/polling_interval').with_value(p[:polling_interval]) - is_expected.to contain_neutron_agent_ovs('agent/l2_population').with_value(p[:l2_population]) - is_expected.to contain_neutron_agent_ovs('agent/arp_responder').with_value(p[:arp_responder]) - is_expected.to contain_neutron_agent_ovs('agent/prevent_arp_spoofing').with_value(p[:prevent_arp_spoofing]) + is_expected.to contain_neutron_agent_ovs('agent/polling_interval').with_value('') + is_expected.to contain_neutron_agent_ovs('agent/l2_population').with_value('') + is_expected.to contain_neutron_agent_ovs('agent/arp_responder').with_value('') + is_expected.to contain_neutron_agent_ovs('agent/prevent_arp_spoofing').with_value('') is_expected.to contain_neutron_agent_ovs('agent/drop_flows_on_start').with_value(p[:drop_flows_on_start]) + is_expected.to contain_neutron_agent_ovs('agent/extensions').with_value(['']) is_expected.to contain_neutron_agent_ovs('ovs/integration_bridge').with_value(p[:integration_bridge]) is_expected.to contain_neutron_agent_ovs('securitygroup/firewall_driver').\ with_value(p[:firewall_driver]) @@ -176,6 +172,16 @@ end end + context 'when supplying extensions for ML2 plugin' do + before :each do + params.merge!(:extensions => ['qos']) + end + + it 'configures extensions' do + is_expected.to contain_neutron_agent_ovs('agent/extensions').with_value(params[:extensions].join(',')) + end + end + context 'when enabling tunneling' do context 'without local ip address' do before :each do @@ -237,7 +243,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :platform_params do @@ -250,7 +258,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_agents_ml2_sriov_spec.rb b/neutron/spec/classes/neutron_agents_ml2_sriov_spec.rb index 4711a09d2..ec00f82e9 100644 --- a/neutron/spec/classes/neutron_agents_ml2_sriov_spec.rb +++ b/neutron/spec/classes/neutron_agents_ml2_sriov_spec.rb @@ -17,7 +17,7 @@ } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -38,6 +38,7 @@ is_expected.to contain_neutron_sriov_agent_config('sriov_nic/polling_interval').with_value(p[:polling_interval]) is_expected.to contain_neutron_sriov_agent_config('sriov_nic/exclude_devices').with_value(p[:exclude_devices].join(',')) is_expected.to contain_neutron_sriov_agent_config('sriov_nic/physical_device_mappings').with_value(p[:physical_device_mappings].join(',')) + is_expected.to contain_neutron_sriov_agent_config('agent/extensions').with_value(['']) end @@ -82,11 +83,23 @@ is_expected.to contain_neutron_sriov_agent_config('sriov_nic/physical_device_mappings').with_value(['physnet1:eth1']) end end + + context 'when supplying extensions for ML2 SR-IOV agent' do + before :each do + params.merge!(:extensions => ['qos']) + end + + it 'configures extensions' do + is_expected.to contain_neutron_sriov_agent_config('agent/extensions').with_value(params[:extensions].join(',')) + end + end end context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :platform_params do @@ -99,7 +112,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_agents_vpnaas_spec.rb b/neutron/spec/classes/neutron_agents_vpnaas_spec.rb index 3d40b6a91..6983dde51 100644 --- a/neutron/spec/classes/neutron_agents_vpnaas_spec.rb +++ b/neutron/spec/classes/neutron_agents_vpnaas_spec.rb @@ -35,11 +35,10 @@ :enabled => true, :vpn_device_driver => 'neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver', :interface_driver => 'neutron.agent.linux.interface.OVSInterfaceDriver', - :ipsec_status_check_interval => '60' } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -56,9 +55,9 @@ it 'configures vpnaas_agent.ini' do is_expected.to contain_neutron_vpnaas_agent_config('vpnagent/vpn_device_driver').with_value(p[:vpn_device_driver]); - is_expected.to contain_neutron_vpnaas_agent_config('ipsec/ipsec_status_check_interval').with_value(p[:ipsec_status_check_interval]); + is_expected.to contain_neutron_vpnaas_agent_config('ipsec/ipsec_status_check_interval').with_value(''); is_expected.to contain_neutron_vpnaas_agent_config('DEFAULT/interface_driver').with_value(p[:interface_driver]); - is_expected.to contain_neutron_vpnaas_agent_config('DEFAULT/external_network_bridge').with_ensure('absent'); + is_expected.to contain_neutron_vpnaas_agent_config('DEFAULT/external_network_bridge').with_value(''); end context 'with external_network_bridge as br-ex' do @@ -119,7 +118,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :platform_params do @@ -136,10 +137,10 @@ context 'on RedHat 6 platforms' do let :facts do - default_facts.merge( + @default_facts.merge(test_facts.merge( { :osfamily => 'RedHat', :operatingsystemrelease => '6.5', - :operatingsystemmajrelease => 6 }) + :operatingsystemmajrelease => 6 })) end let :platform_params do @@ -153,10 +154,10 @@ context 'on RedHat 7 platforms' do let :facts do - default_facts.merge( + @default_facts.merge(test_facts.merge( { :osfamily => 'RedHat', :operatingsystemrelease => '7.1.2', - :operatingsystemmajrelease => 7 }) + :operatingsystemmajrelease => 7 })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_client_spec.rb b/neutron/spec/classes/neutron_client_spec.rb index 80de68b3d..82e5c60d5 100644 --- a/neutron/spec/classes/neutron_client_spec.rb +++ b/neutron/spec/classes/neutron_client_spec.rb @@ -2,7 +2,7 @@ describe 'neutron::client' do - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -10,7 +10,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end it { is_expected.to contain_class('neutron::client') } @@ -18,7 +20,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it { is_expected.to contain_class('neutron::client') } diff --git a/neutron/spec/classes/neutron_db_postgresql_spec.rb b/neutron/spec/classes/neutron_db_postgresql_spec.rb index d38037392..6d2bd56aa 100644 --- a/neutron/spec/classes/neutron_db_postgresql_spec.rb +++ b/neutron/spec/classes/neutron_db_postgresql_spec.rb @@ -10,7 +10,7 @@ 'include postgresql::server' end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -18,11 +18,11 @@ context 'on a RedHat osfamily' do let :facts do - default_facts.merge({ + @default_facts.merge(test_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.0', :concat_basedir => '/var/lib/puppet/concat' - }) + })) end context 'with only required parameters' do @@ -40,12 +40,12 @@ context 'on a Debian osfamily' do let :facts do - default_facts.merge({ + @default_facts.merge(test_facts.merge({ :operatingsystemrelease => '7.8', :operatingsystem => 'Debian', :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' - }) + })) end context 'with only required parameters' do diff --git a/neutron/spec/classes/neutron_db_spec.rb b/neutron/spec/classes/neutron_db_spec.rb index 1672b6e81..2c5a14c0a 100644 --- a/neutron/spec/classes/neutron_db_spec.rb +++ b/neutron/spec/classes/neutron_db_spec.rb @@ -7,23 +7,23 @@ context 'with default parameters' do it { is_expected.to contain_neutron_config('database/connection').with_value('sqlite:////var/lib/neutron/ovs.sqlite').with_secret(true) } - it { is_expected.to contain_neutron_config('database/idle_timeout').with_value('3600') } - it { is_expected.to contain_neutron_config('database/min_pool_size').with_value('1') } - it { is_expected.to contain_neutron_config('database/max_retries').with_value('10') } - it { is_expected.to contain_neutron_config('database/retry_interval').with_value('10') } + it { is_expected.to contain_neutron_config('database/idle_timeout').with_value('') } + it { is_expected.to contain_neutron_config('database/min_pool_size').with_value('') } + it { is_expected.to contain_neutron_config('database/max_retries').with_value('') } + it { is_expected.to contain_neutron_config('database/retry_interval').with_value('') } end context 'with specific parameters' do let :params do - { :database_connection => 'mysql://neutron:neutron@localhost/neutron', + { :database_connection => 'mysql+pymysql://neutron:neutron@localhost/neutron', :database_idle_timeout => '3601', :database_min_pool_size => '2', :database_max_retries => '11', :database_retry_interval => '11', } end - it { is_expected.to contain_neutron_config('database/connection').with_value('mysql://neutron:neutron@localhost/neutron').with_secret(true) } + it { is_expected.to contain_neutron_config('database/connection').with_value('mysql+pymysql://neutron:neutron@localhost/neutron').with_secret(true) } it { is_expected.to contain_neutron_config('database/idle_timeout').with_value('3601') } it { is_expected.to contain_neutron_config('database/min_pool_size').with_value('2') } it { is_expected.to contain_neutron_config('database/max_retries').with_value('11') } @@ -31,6 +31,14 @@ end + context 'with MySQL-python library as backend package' do + let :params do + { :database_connection => 'mysql://neutron:neutron@localhost/neutron' } + end + + it { is_expected.to contain_neutron_config('database/connection').with_value('mysql://neutron:neutron@localhost/neutron').with_secret(true) } + end + context 'with postgresql backend' do let :params do { :database_connection => 'postgresql://neutron:neutron@localhost/neutron', } @@ -50,27 +58,55 @@ it_raises 'a Puppet::Error', /validate_re/ end + context 'with incorrect database_connection string' do + let :params do + { :database_connection => 'foo+pymysql://neutron:neutron@localhost/neutron', } + end + + it_raises 'a Puppet::Error', /validate_re/ + end + end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', - :operatingsystem => 'Debian', - :operatingsystemrelease => 'jessie', - } + @default_facts.merge({ + :osfamily => 'Debian', + :operatingsystem => 'Debian', + :operatingsystemrelease => 'jessie', + }) end it_configures 'neutron::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://neutron:neutron@localhost/neutron' } + end + + it { is_expected.to contain_package('neutron-backend-package').with({ :ensure => 'present', :name => 'python-pymysql' }) } + end + end context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat', - :operatingsystemrelease => '7.1', - } + @default_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7.1', + }) end it_configures 'neutron::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://neutron:neutron@localhost/neutron' } + end + + it { is_expected.not_to contain_package('neutron-backend-package') } + end + end end diff --git a/neutron/spec/classes/neutron_init_spec.rb b/neutron/spec/classes/neutron_init_spec.rb index d51f47346..3171a4d83 100644 --- a/neutron/spec/classes/neutron_init_spec.rb +++ b/neutron/spec/classes/neutron_init_spec.rb @@ -4,24 +4,18 @@ let :params do { :package_ensure => 'present', - :verbose => false, - :debug => false, - :use_stderr => true, :core_plugin => 'linuxbridge', + :auth_strategy => 'keystone', + :rabbit_hosts => false, :rabbit_host => '127.0.0.1', :rabbit_port => 5672, - :rabbit_hosts => false, :rabbit_user => 'guest', :rabbit_password => 'guest', - :rabbit_virtual_host => '/', - :kombu_reconnect_delay => '1.0', :log_dir => '/var/log/neutron', - :report_interval => '30', - :rpc_response_timeout => '60', } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -54,7 +48,7 @@ it 'configures logging' do is_expected.to contain_neutron_config('DEFAULT/log_file').with_ensure('absent') is_expected.to contain_neutron_config('DEFAULT/log_dir').with_value(params[:log_dir]) - is_expected.to contain_neutron_config('DEFAULT/use_stderr').with_value(params[:use_stderr]) + is_expected.to contain_neutron_config('DEFAULT/use_stderr').with_value('') end end @@ -71,7 +65,7 @@ it_configures 'with SSL and kombu wrongly configured' it_configures 'with SSL socket options set' it_configures 'with SSL socket options set with wrong parameters' - it_configures 'with SSL socket options set to false' + it_configures 'with SSL socket options left by default' it_configures 'with syslog disabled' it_configures 'with syslog enabled' it_configures 'with syslog enabled and custom settings' @@ -81,7 +75,6 @@ it_configures 'with service_plugins' it_configures 'without memcache_servers' it_configures 'with memcache_servers' - it_configures 'with qpid rpc backend' end shared_examples_for 'a neutron base installation' do @@ -100,36 +93,36 @@ is_expected.to contain_neutron_config('oslo_messaging_rabbit/rabbit_userid').with_value( params[:rabbit_user] ) is_expected.to contain_neutron_config('oslo_messaging_rabbit/rabbit_password').with_value( params[:rabbit_password] ) is_expected.to contain_neutron_config('oslo_messaging_rabbit/rabbit_password').with_secret( true ) - is_expected.to contain_neutron_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value( params[:rabbit_virtual_host] ) + is_expected.to contain_neutron_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value( '' ) is_expected.to contain_neutron_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') is_expected.to contain_neutron_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') - is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_reconnect_delay').with_value( params[:kombu_reconnect_delay] ) + is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_reconnect_delay').with_value( '' ) end it 'configures neutron.conf' do - is_expected.to contain_neutron_config('DEFAULT/verbose').with_value( params[:verbose] ) - is_expected.to contain_neutron_config('DEFAULT/bind_host').with_value('0.0.0.0') - is_expected.to contain_neutron_config('DEFAULT/bind_port').with_value('9696') + is_expected.to contain_neutron_config('DEFAULT/verbose').with_value( '' ) + is_expected.to contain_neutron_config('DEFAULT/bind_host').with_value('') + is_expected.to contain_neutron_config('DEFAULT/bind_port').with_value('') is_expected.to contain_neutron_config('DEFAULT/auth_strategy').with_value('keystone') is_expected.to contain_neutron_config('DEFAULT/core_plugin').with_value( params[:core_plugin] ) - is_expected.to contain_neutron_config('DEFAULT/base_mac').with_value('fa:16:3e:00:00:00') - is_expected.to contain_neutron_config('DEFAULT/mac_generation_retries').with_value(16) - is_expected.to contain_neutron_config('DEFAULT/dhcp_lease_duration').with_value(86400) - is_expected.to contain_neutron_config('DEFAULT/dhcp_agents_per_network').with_value(1) - is_expected.to contain_neutron_config('DEFAULT/network_device_mtu').with_ensure('absent') - is_expected.to contain_neutron_config('DEFAULT/dhcp_agent_notification').with_value(true) - is_expected.to contain_neutron_config('DEFAULT/advertise_mtu').with_value(false) - is_expected.to contain_neutron_config('DEFAULT/allow_bulk').with_value(true) - is_expected.to contain_neutron_config('DEFAULT/allow_pagination').with_value(false) - is_expected.to contain_neutron_config('DEFAULT/allow_sorting').with_value(false) - is_expected.to contain_neutron_config('DEFAULT/allow_overlapping_ips').with_value(false) - is_expected.to contain_neutron_config('DEFAULT/api_extensions_path').with_value(nil) + is_expected.to contain_neutron_config('DEFAULT/base_mac').with_value('') + is_expected.to contain_neutron_config('DEFAULT/mac_generation_retries').with_value('') + is_expected.to contain_neutron_config('DEFAULT/dhcp_lease_duration').with_value('') + is_expected.to contain_neutron_config('DEFAULT/dhcp_agents_per_network').with_value('') + is_expected.to contain_neutron_config('DEFAULT/network_device_mtu').with_value('') + is_expected.to contain_neutron_config('DEFAULT/dhcp_agent_notification').with_value('') + is_expected.to contain_neutron_config('DEFAULT/advertise_mtu').with_value('') + is_expected.to contain_neutron_config('DEFAULT/allow_bulk').with_value('') + is_expected.to contain_neutron_config('DEFAULT/allow_pagination').with_value('') + is_expected.to contain_neutron_config('DEFAULT/allow_sorting').with_value('') + is_expected.to contain_neutron_config('DEFAULT/allow_overlapping_ips').with_value('') + is_expected.to contain_neutron_config('DEFAULT/api_extensions_path').with_value('') is_expected.to contain_neutron_config('DEFAULT/control_exchange').with_value('neutron') - is_expected.to contain_neutron_config('DEFAULT/state_path').with_value('/var/lib/neutron') - is_expected.to contain_neutron_config('DEFAULT/lock_path').with_value('/var/lib/neutron/lock') - is_expected.to contain_neutron_config('DEFAULT/rpc_response_timeout').with_value( params[:rpc_response_timeout] ) + is_expected.to contain_neutron_config('DEFAULT/state_path').with_value('') + is_expected.to contain_neutron_config('DEFAULT/lock_path').with_value('') + is_expected.to contain_neutron_config('DEFAULT/rpc_response_timeout').with_value( '' ) is_expected.to contain_neutron_config('agent/root_helper').with_value('sudo neutron-rootwrap /etc/neutron/rootwrap.conf') - is_expected.to contain_neutron_config('agent/report_interval').with_value('30') + is_expected.to contain_neutron_config('agent/report_interval').with_value('') end end @@ -158,55 +151,6 @@ end end - shared_examples_for 'with qpid rpc backend' do - before do - params.merge!({ :rpc_backend => 'qpid' }) - end - - it { is_expected.to contain_neutron_config('DEFAULT/rpc_backend').with_value('qpid') } - - context 'when default params' do - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_username').with_value('guest') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_password').with_value('guest').with_secret(true) } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_port').with_value('5672') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_heartbeat').with_value('60') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_tcp_nodelay').with_value('true') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect').with_value('true') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect_timeout').with_value('0') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect_limit').with_value('0') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect_interval_min').with_value('0') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect_interval_max').with_value('0') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect_interval').with_value('0') } - end - - context 'when passing params' do - before do - params.merge!({ - :qpid_password => 'pass', - :qpid_username => 'guest2', - :qpid_hostname => 'localhost2', - :qpid_port => '5673', - :qpid_protocol => 'udp', - :qpid_heartbeat => '89', - :qpid_tcp_nodelay => 'false', - :qpid_reconnect => 'false', - }) - end - - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_username').with_value('guest2') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_password').with_value('pass').with_secret(true) } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost2') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_port').with_value('5673') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_protocol').with_value('udp') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_heartbeat').with_value('89') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_tcp_nodelay').with_value('false') } - it { is_expected.to contain_neutron_config('oslo_messaging_qpid/qpid_reconnect').with_value('false') } - end - - end - shared_examples_for 'with SSL socket options set' do before do params.merge!( @@ -235,20 +179,12 @@ it_raises 'a Puppet::Error', /The cert_file parameter is required when use_ssl is set to true/ end - shared_examples_for 'with SSL socket options set to false' do - before do - params.merge!( - :use_ssl => false, - :cert_file => false, - :key_file => false, - :ca_file => false - ) - end + shared_examples_for 'with SSL socket options left by default' do - it { is_expected.to contain_neutron_config('DEFAULT/use_ssl').with_value('false') } - it { is_expected.to contain_neutron_config('DEFAULT/ssl_cert_file').with_ensure('absent') } - it { is_expected.to contain_neutron_config('DEFAULT/ssl_key_file').with_ensure('absent') } - it { is_expected.to contain_neutron_config('DEFAULT/ssl_ca_file').with_ensure('absent') } + it { is_expected.to contain_neutron_config('DEFAULT/use_ssl').with_value('') } + it { is_expected.to contain_neutron_config('DEFAULT/ssl_cert_file').with_value('') } + it { is_expected.to contain_neutron_config('DEFAULT/ssl_key_file').with_value('') } + it { is_expected.to contain_neutron_config('DEFAULT/ssl_ca_file').with_value('') } end shared_examples_for 'with SSL socket options set and no ca_file' do @@ -278,6 +214,11 @@ end shared_examples_for 'with syslog disabled' do + before do + params.merge!( + :use_syslog => false, + ) + end it { is_expected.to contain_neutron_config('DEFAULT/use_syslog').with_value(false) } end @@ -310,9 +251,9 @@ it do is_expected.to contain_neutron_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('true') - is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') - is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') - is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') + is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('') + is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('') + is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('') is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('TLSv1') end end @@ -320,16 +261,15 @@ shared_examples_for 'with SSL disabled' do before do params.merge!( - :rabbit_use_ssl => false, :kombu_ssl_version => 'TLSv1' ) end it do - is_expected.to contain_neutron_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('false') - is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') - is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') - is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') + is_expected.to contain_neutron_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('') + is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('') + is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('') + is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('') is_expected.to contain_neutron_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') end end @@ -337,7 +277,7 @@ shared_examples_for 'with SSL wrongly configured' do before do params.merge!( - :rabbit_use_ssl => false + :rabbit_use_ssl => false ) end @@ -389,7 +329,7 @@ it do is_expected.to contain_neutron_config('DEFAULT/use_syslog').with_value(true) - is_expected.to contain_neutron_config('DEFAULT/syslog_log_facility').with_value('LOG_USER') + is_expected.to contain_neutron_config('DEFAULT/syslog_log_facility').with_value('') end end @@ -449,12 +389,12 @@ shared_examples_for 'with service_plugins' do before do params.merge!( - :service_plugins => ['router','firewall','lbaas','vpnaas','metering'] + :service_plugins => ['router','firewall','lbaas','vpnaas','metering','qos'] ) end it do - is_expected.to contain_neutron_config('DEFAULT/service_plugins').with_value('router,firewall,lbaas,vpnaas,metering') + is_expected.to contain_neutron_config('DEFAULT/service_plugins').with_value('router,firewall,lbaas,vpnaas,metering,qos') end end @@ -502,7 +442,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :platform_params do @@ -514,7 +456,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_keystone_auth_spec.rb b/neutron/spec/classes/neutron_keystone_auth_spec.rb index 8da3fcbb6..33c62edc9 100644 --- a/neutron/spec/classes/neutron_keystone_auth_spec.rb +++ b/neutron/spec/classes/neutron_keystone_auth_spec.rb @@ -2,7 +2,7 @@ describe 'neutron::keystone::auth' do - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -47,7 +47,9 @@ end let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :params do diff --git a/neutron/spec/classes/neutron_plugins_cisco_spec.rb b/neutron/spec/classes/neutron_plugins_cisco_spec.rb index 2ac2f1666..ed31f8b8c 100644 --- a/neutron/spec/classes/neutron_plugins_cisco_spec.rb +++ b/neutron/spec/classes/neutron_plugins_cisco_spec.rb @@ -33,7 +33,7 @@ class { 'neutron': rabbit_password => 'passw0rd' }" } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -137,7 +137,9 @@ class { 'neutron': rabbit_password => 'passw0rd' }" end context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end context 'on Ubuntu operating systems' do @@ -168,7 +170,10 @@ class { 'neutron': rabbit_password => 'passw0rd' }" context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it_configures 'default cisco plugin' diff --git a/neutron/spec/classes/neutron_plugins_midonet_spec.rb b/neutron/spec/classes/neutron_plugins_midonet_spec.rb index d0cd08b2f..347c51deb 100644 --- a/neutron/spec/classes/neutron_plugins_midonet_spec.rb +++ b/neutron/spec/classes/neutron_plugins_midonet_spec.rb @@ -18,7 +18,7 @@ class { 'neutron': rabbit_password => 'passw0rd' } } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -53,7 +53,9 @@ class { 'neutron': rabbit_password => 'passw0rd' } context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end it 'configures /etc/default/neutron-server' do is_expected.to contain_file_line('/etc/default/neutron-server:NEUTRON_PLUGIN_CONFIG').with( @@ -69,7 +71,10 @@ class { 'neutron': rabbit_password => 'passw0rd' } context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it_configures 'neutron midonet plugin' end diff --git a/neutron/spec/classes/neutron_plugins_ml2_bigswitch_restproxy_spec.rb b/neutron/spec/classes/neutron_plugins_ml2_bigswitch_restproxy_spec.rb index 42fc79e78..cce07f738 100644 --- a/neutron/spec/classes/neutron_plugins_ml2_bigswitch_restproxy_spec.rb +++ b/neutron/spec/classes/neutron_plugins_ml2_bigswitch_restproxy_spec.rb @@ -24,7 +24,7 @@ class { 'neutron': required_params end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default', } @@ -70,7 +70,10 @@ class { 'neutron': context 'on RedHat platforms' do let :facts do - default_facts.merge({:osfamily => 'RedHat'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it_configures 'neutron bigswitch ml2 restproxy' diff --git a/neutron/spec/classes/neutron_plugins_ml2_bigswitch_spec.rb b/neutron/spec/classes/neutron_plugins_ml2_bigswitch_spec.rb index 80ace6c08..7d1dc92e3 100644 --- a/neutron/spec/classes/neutron_plugins_ml2_bigswitch_spec.rb +++ b/neutron/spec/classes/neutron_plugins_ml2_bigswitch_spec.rb @@ -19,7 +19,7 @@ class { 'neutron': {} end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default', @@ -44,7 +44,10 @@ class { 'neutron': context 'on RedHat platforms' do let :facts do - default_facts.merge({:osfamily => 'RedHat'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it_configures 'neutron plugin bigswitch ml2' @@ -52,7 +55,9 @@ class { 'neutron': context 'on Debian platforms' do let :facts do - default_facts.merge({:osfamily => 'Debian'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end it { is_expected.to raise_error(Puppet::Error, /Unsupported osfamily Debian/) } diff --git a/neutron/spec/classes/neutron_plugins_ml2_cisco_nexus_spec.rb b/neutron/spec/classes/neutron_plugins_ml2_cisco_nexus_spec.rb index bacdd5f21..2153004ce 100644 --- a/neutron/spec/classes/neutron_plugins_ml2_cisco_nexus_spec.rb +++ b/neutron/spec/classes/neutron_plugins_ml2_cisco_nexus_spec.rb @@ -47,7 +47,7 @@ class { 'neutron': {} end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default', :concat_basedir => '/', @@ -89,7 +89,10 @@ class { 'neutron': begin context 'on RedHat platforms' do let :facts do - default_facts.merge({:osfamily => 'RedHat'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_plugins_ml2_cisco_spec.rb b/neutron/spec/classes/neutron_plugins_ml2_cisco_spec.rb index d6cdfd78c..5b227c7d7 100644 --- a/neutron/spec/classes/neutron_plugins_ml2_cisco_spec.rb +++ b/neutron/spec/classes/neutron_plugins_ml2_cisco_spec.rb @@ -19,7 +19,7 @@ class { 'neutron': {} end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default', @@ -46,7 +46,10 @@ class { 'neutron': begin context 'on RedHat platforms' do let :facts do - default_facts.merge({:osfamily => 'RedHat'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it_configures 'neutron plugin cisco ml2' diff --git a/neutron/spec/classes/neutron_plugins_ml2_cisco_type_nexus_vxlan_spec.rb b/neutron/spec/classes/neutron_plugins_ml2_cisco_type_nexus_vxlan_spec.rb index affac4ba1..89765fc73 100644 --- a/neutron/spec/classes/neutron_plugins_ml2_cisco_type_nexus_vxlan_spec.rb +++ b/neutron/spec/classes/neutron_plugins_ml2_cisco_type_nexus_vxlan_spec.rb @@ -24,7 +24,7 @@ class { 'neutron': {} end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default', :concat_basedir => '/', @@ -49,7 +49,10 @@ class { 'neutron': begin context 'on RedHat platforms' do let :facts do - default_facts.merge({:osfamily => 'RedHat'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it_configures 'neutron cisco ml2 type nexus vxlan plugin' diff --git a/neutron/spec/classes/neutron_plugins_ml2_cisco_ucsm_spec.rb b/neutron/spec/classes/neutron_plugins_ml2_cisco_ucsm_spec.rb index 26c211072..33da6eb59 100644 --- a/neutron/spec/classes/neutron_plugins_ml2_cisco_ucsm_spec.rb +++ b/neutron/spec/classes/neutron_plugins_ml2_cisco_ucsm_spec.rb @@ -27,7 +27,7 @@ class { 'neutron': {} end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default', :concat_basedir => '/', @@ -55,7 +55,11 @@ class { 'neutron': begin context 'on RedHat platforms' do let :facts do - default_facts.merge({:osfamily => 'RedHat'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7', + :osfamily => 'RedHat' + })) end it_configures 'neutron cisco ml2 ucsm plugin' diff --git a/neutron/spec/classes/neutron_plugins_ml2_spec.rb b/neutron/spec/classes/neutron_plugins_ml2_spec.rb index 9cdd08c23..eab645945 100644 --- a/neutron/spec/classes/neutron_plugins_ml2_spec.rb +++ b/neutron/spec/classes/neutron_plugins_ml2_spec.rb @@ -42,7 +42,7 @@ :package_ensure => 'present' } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -67,6 +67,7 @@ is_expected.to contain_neutron_plugin_ml2('ml2/type_drivers').with_value(p[:type_drivers].join(',')) is_expected.to contain_neutron_plugin_ml2('ml2/tenant_network_types').with_value(p[:tenant_network_types].join(',')) is_expected.to contain_neutron_plugin_ml2('ml2/mechanism_drivers').with_value(p[:mechanism_drivers].join(',')) + is_expected.to contain_neutron_plugin_ml2('ml2/extension_drivers').with_value('') is_expected.to contain_neutron_plugin_ml2('ml2/path_mtu').with_value(p[:path_mtu]) is_expected.to contain_neutron_plugin_ml2('ml2/physical_network_mtus').with_ensure('absent') end @@ -88,6 +89,16 @@ end end + context 'when using extension drivers for ML2 plugin' do + before :each do + params.merge!(:extension_drivers => ['port_security','qos']) + end + + it 'configures extension drivers' do + is_expected.to contain_neutron_plugin_ml2('ml2/extension_drivers').with_value(p[:extension_drivers].join(',')) + end + end + context 'configure ml2 with bad driver value' do before :each do params.merge!(:type_drivers => ['foobar']) @@ -239,7 +250,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :platform_params do @@ -266,7 +279,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_plugins_nuage_spec.rb b/neutron/spec/classes/neutron_plugins_nuage_spec.rb index 223422333..8d38c3fbe 100644 --- a/neutron/spec/classes/neutron_plugins_nuage_spec.rb +++ b/neutron/spec/classes/neutron_plugins_nuage_spec.rb @@ -9,7 +9,7 @@ class { 'neutron::server': auth_password => 'password' }" end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -30,6 +30,15 @@ class { 'neutron::server': auth_password => 'password' }" it { is_expected.to contain_class('neutron::params') } + it 'should have a nuage plugin ini file' do + is_expected.to contain_file('/etc/neutron/plugins/nuage/plugin.ini').with( + :ensure => 'file', + :owner => 'root', + :group => 'neutron', + :mode => '0640' + ) + end + it 'should configure neutron.conf' do is_expected.to contain_neutron_config('DEFAULT/core_plugin').with_value('neutron.plugins.nuage.plugin.NuagePlugin') end @@ -54,7 +63,9 @@ class { 'neutron::server': auth_password => 'password' }" begin context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end it_configures 'neutron plugin nuage' @@ -62,7 +73,10 @@ class { 'neutron::server': auth_password => 'password' }" context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it_configures 'neutron plugin nuage' @@ -71,7 +85,9 @@ class { 'neutron::server': auth_password => 'password' }" begin context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end it 'configures /etc/default/neutron-server' do @@ -88,7 +104,10 @@ class { 'neutron::server': auth_password => 'password' }" context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it 'should create plugin symbolic link' do diff --git a/neutron/spec/classes/neutron_plugins_nvp_spec.rb b/neutron/spec/classes/neutron_plugins_nvp_spec.rb index 48a15d3ca..af46dee76 100644 --- a/neutron/spec/classes/neutron_plugins_nvp_spec.rb +++ b/neutron/spec/classes/neutron_plugins_nvp_spec.rb @@ -14,7 +14,7 @@ :package_ensure => 'present'} end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -93,7 +93,9 @@ begin context 'on Debian platforms' do let :facts do - default_facts.merge({:osfamily => 'Debian'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :platform_params do @@ -105,7 +107,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({:osfamily => 'RedHat'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_plugins_opencontrail_spec.rb b/neutron/spec/classes/neutron_plugins_opencontrail_spec.rb index 975e89b21..e453b32ba 100644 --- a/neutron/spec/classes/neutron_plugins_opencontrail_spec.rb +++ b/neutron/spec/classes/neutron_plugins_opencontrail_spec.rb @@ -20,7 +20,7 @@ class { 'neutron': rabbit_password => 'passw0rd' }" } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -52,7 +52,9 @@ class { 'neutron': rabbit_password => 'passw0rd' }" context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :params do @@ -73,7 +75,10 @@ class { 'neutron': rabbit_password => 'passw0rd' }" context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :params do diff --git a/neutron/spec/classes/neutron_plugins_plumgrid_spec.rb b/neutron/spec/classes/neutron_plugins_plumgrid_spec.rb index 398568d49..209d4fe2a 100644 --- a/neutron/spec/classes/neutron_plugins_plumgrid_spec.rb +++ b/neutron/spec/classes/neutron_plugins_plumgrid_spec.rb @@ -20,7 +20,7 @@ class { 'neutron': rabbit_password => 'passw0rd' }" } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -51,15 +51,15 @@ class { 'neutron': rabbit_password => 'passw0rd' }" it 'should perform default configuration of plumgrid plugin' do is_expected.to contain_neutron_plugin_plumgrid('PLUMgridDirector/director_server').with_value(params[:director_server]) is_expected.to contain_neutron_plugin_plumgrid('PLUMgridDirector/director_server_port').with_value(params[:director_server_port]) - is_expected.to contain_neutron_plugin_plumgrid('PLUMgridDirector/username').with_value(params[:username]) - is_expected.to contain_neutron_plugin_plumgrid('PLUMgridDirector/password').with_value(params[:password]) + is_expected.to contain_neutron_plugin_plumgrid('PLUMgridDirector/username').with_value('') + is_expected.to contain_neutron_plugin_plumgrid('PLUMgridDirector/password').with_value('') is_expected.to contain_neutron_plugin_plumgrid('PLUMgridDirector/servertimeout').with_value(params[:servertimeout]) is_expected.to contain_neutron_plugin_plumgrid('database/connection').with_value(params[:connection]) end it 'should perform default configuration of plumgrid plumlib' do is_expected.to contain_neutron_plumlib_plumgrid('keystone_authtoken/admin_user').with_value('admin') - is_expected.to contain_neutron_plumlib_plumgrid('keystone_authtoken/admin_password').with_value(params[:admin_password]) + is_expected.to contain_neutron_plumlib_plumgrid('keystone_authtoken/admin_password').with_value('') is_expected.to contain_neutron_plumlib_plumgrid('keystone_authtoken/admin_tenant_name').with_value('admin') auth_uri = params[:auth_protocol] + "://" + params[:controller_priv_host] + ":" + "35357/v2.0"; is_expected.to contain_neutron_plumlib_plumgrid('keystone_authtoken/auth_uri').with_value(auth_uri) @@ -67,14 +67,16 @@ class { 'neutron': rabbit_password => 'passw0rd' }" is_expected.to contain_neutron_plumlib_plumgrid('PLUMgridMetadata/metadata_mode').with_value('local') is_expected.to contain_neutron_plumlib_plumgrid('PLUMgridMetadata/nova_metadata_ip').with_value(params[:nova_metadata_ip]) is_expected.to contain_neutron_plumlib_plumgrid('PLUMgridMetadata/nova_metadata_port').with_value(params[:nova_metadata_port]) - is_expected.to contain_neutron_plumlib_plumgrid('PLUMgridMetadata/metadata_proxy_shared_secret').with_value(params[:metadata_proxy_shared_secret]) + is_expected.to contain_neutron_plumlib_plumgrid('PLUMgridMetadata/metadata_proxy_shared_secret').with_value('') end end context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end it 'configures /etc/default/neutron-server' do @@ -92,7 +94,10 @@ class { 'neutron': rabbit_password => 'passw0rd' }" context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat'}) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it 'should create plugin symbolic link' do diff --git a/neutron/spec/classes/neutron_policy_spec.rb b/neutron/spec/classes/neutron_policy_spec.rb index df9016565..1e3e734e2 100644 --- a/neutron/spec/classes/neutron_policy_spec.rb +++ b/neutron/spec/classes/neutron_policy_spec.rb @@ -2,7 +2,7 @@ describe 'neutron::policy' do - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -31,7 +31,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end it_configures 'neutron policies' @@ -39,7 +41,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it_configures 'neutron policies' diff --git a/neutron/spec/classes/neutron_quota_spec.rb b/neutron/spec/classes/neutron_quota_spec.rb index 449c7cca0..c05644479 100644 --- a/neutron/spec/classes/neutron_quota_spec.rb +++ b/neutron/spec/classes/neutron_quota_spec.rb @@ -7,25 +7,9 @@ end let :default_params do - { :default_quota => -1, - :quota_network => 10, - :quota_subnet => 10, - :quota_port => 50, - :quota_router => 10, - :quota_floatingip => 50, - :quota_security_group => 10, - :quota_security_group_rule => 100, - :quota_driver => 'neutron.db.quota_db.DbQuotaDriver', - :quota_firewall => 1, - :quota_firewall_policy => 1, - :quota_firewall_rule => -1, - :quota_health_monitor => -1, - :quota_items => 'network,subnet,port', - :quota_member => -1, + { :quota_firewall_rule => -1, :quota_network_gateway => 5, - :quota_packet_filter => 100, - :quota_pool => 10, - :quota_vip => 10 } + :quota_packet_filter => 100 } end let :facts do diff --git a/neutron/spec/classes/neutron_server_notifications_spec.rb b/neutron/spec/classes/neutron_server_notifications_spec.rb index 93ed7a23a..e511312bc 100644 --- a/neutron/spec/classes/neutron_server_notifications_spec.rb +++ b/neutron/spec/classes/neutron_server_notifications_spec.rb @@ -24,7 +24,6 @@ { :notify_nova_on_port_status_changes => true, :notify_nova_on_port_data_changes => true, - :send_events_interval => '2', :nova_url => 'http://127.0.0.1:8774/v2', :auth_plugin => 'password', :username => 'nova', @@ -36,11 +35,10 @@ :nova_admin_auth_url => 'http://127.0.0.1:35357/v2.0', :nova_admin_username => 'nova', :nova_admin_tenant_name => 'services', - :nova_region_name => nil, } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -48,8 +46,7 @@ let :params do { - :password => 'secrete', - :tenant_id => 'UUID' + :password => 'secrete' } end @@ -61,14 +58,15 @@ it 'configure neutron.conf' do is_expected.to contain_neutron_config('DEFAULT/notify_nova_on_port_status_changes').with_value(true) is_expected.to contain_neutron_config('DEFAULT/notify_nova_on_port_data_changes').with_value(true) - is_expected.to contain_neutron_config('DEFAULT/send_events_interval').with_value('2') + is_expected.to contain_neutron_config('DEFAULT/send_events_interval').with_value('') is_expected.to contain_neutron_config('DEFAULT/nova_url').with_value('http://127.0.0.1:8774/v2') is_expected.to contain_neutron_config('nova/auth_url').with_value('http://127.0.0.1:35357') is_expected.to contain_neutron_config('nova/username').with_value('nova') is_expected.to contain_neutron_config('nova/password').with_value('secrete') is_expected.to contain_neutron_config('nova/password').with_secret( true ) - is_expected.to contain_neutron_config('nova/tenant_id').with_value('UUID') - is_expected.to contain_neutron_config('nova/region_name').with_ensure('absent') + is_expected.to contain_neutron_config('nova/tenant_name').with_value('services') + is_expected.to contain_neutron_config('nova/region_name').with_value('') + is_expected.not_to contain_neutron_config('DEFAULT/nova_region_name') is_expected.not_to contain_neutron_config('DEFAULT/nova_admin_auth_url') is_expected.not_to contain_neutron_config('DEFAULT/nova_admin_username') is_expected.not_to contain_neutron_config('DEFAULT/nova_admin_password') @@ -144,7 +142,6 @@ before :each do params.merge!({ :nova_admin_tenant_name => false, - :nova_admin_tenant_id => false, :nova_admin_password => 'secrete', }) end @@ -156,7 +153,6 @@ before :each do params.merge!({ :tenant_name => false, - :tenant_id => false, :password => 'secrete', }) end @@ -168,7 +164,6 @@ before :each do params.merge!({ :nova_admin_tenant_name => 'services', - :nova_admin_tenant_id => false, :nova_admin_password => 'secrete', :password => false }) @@ -181,7 +176,9 @@ context 'on Debian platforms' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :platform_params do @@ -193,7 +190,10 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_server_spec.rb b/neutron/spec/classes/neutron_server_spec.rb index acb40efdb..090193761 100644 --- a/neutron/spec/classes/neutron_server_spec.rb +++ b/neutron/spec/classes/neutron_server_spec.rb @@ -27,18 +27,14 @@ :database_max_pool_size => 10, :database_max_overflow => 20, :sync_db => false, - :agent_down_time => 75, :router_scheduler_driver => 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler', - :router_distributed => false, :l3_ha => false, :max_l3_agents_per_router => 3, :min_l3_agents_per_router => 2, - :l3_ha_net_cidr => '169.254.192.0/18', - :allow_automatic_l3agent_failover => false } end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -94,8 +90,9 @@ is_expected.to contain_service('neutron-server').with_name('neutron-server') is_expected.to contain_neutron_config('DEFAULT/api_workers').with_value(facts[:processorcount]) is_expected.to contain_neutron_config('DEFAULT/rpc_workers').with_value(facts[:processorcount]) - is_expected.to contain_neutron_config('DEFAULT/agent_down_time').with_value(p[:agent_down_time]) + is_expected.to contain_neutron_config('DEFAULT/agent_down_time').with_value('') is_expected.to contain_neutron_config('DEFAULT/router_scheduler_driver').with_value(p[:router_scheduler_driver]) + is_expected.to contain_neutron_config('qos/notification_drivers').with_value('') end context 'with manage_service as false' do @@ -124,7 +121,7 @@ is_expected.to contain_neutron_config('DEFAULT/l3_ha').with_value(true) is_expected.to contain_neutron_config('DEFAULT/max_l3_agents_per_router').with_value(3) is_expected.to contain_neutron_config('DEFAULT/min_l3_agents_per_router').with_value(2) - is_expected.to contain_neutron_config('DEFAULT/l3_ha_net_cidr').with_value('169.254.192.0/18') + is_expected.to contain_neutron_config('DEFAULT/l3_ha_net_cidr').with_value('') end end @@ -179,7 +176,16 @@ context 'with allow_automatic_l3agent_failover in neutron.conf' do it 'should configure allow_automatic_l3agent_failover' do - is_expected.to contain_neutron_config('DEFAULT/allow_automatic_l3agent_failover').with_value(p[:allow_automatic_l3agent_failover]) + is_expected.to contain_neutron_config('DEFAULT/allow_automatic_l3agent_failover').with_value('') + end + end + + context 'with qos_notification_drivers parameter' do + before :each do + params.merge!(:qos_notification_drivers => 'message_queue') + end + it 'should configure qos_notification_drivers' do + is_expected.to contain_neutron_config('qos/notification_drivers').with_value('message_queue') end end end @@ -234,7 +240,10 @@ describe "with custom keystone auth_uri" do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end before do params.merge!({ @@ -253,7 +262,10 @@ describe "with custom keystone identity_uri" do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end before do params.merge!({ @@ -272,7 +284,10 @@ describe "with custom keystone identity_uri and auth_uri" do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end before do params.merge!({ @@ -292,7 +307,10 @@ describe "with custom auth region" do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end before do params.merge!({ @@ -306,9 +324,10 @@ context 'on Debian platforms' do let :facts do - default_facts.merge( - { :osfamily => 'Debian', - :processorcount => '2' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian', + :processorcount => '2' + })) end let :platform_params do @@ -325,9 +344,11 @@ context 'on RedHat platforms' do let :facts do - default_facts.merge( - { :osfamily => 'RedHat', - :processorcount => '2' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7', + :processorcount => '2' + })) end let :platform_params do diff --git a/neutron/spec/classes/neutron_services_fwaas_spec.rb b/neutron/spec/classes/neutron_services_fwaas_spec.rb index 39fc01695..c876729f2 100644 --- a/neutron/spec/classes/neutron_services_fwaas_spec.rb +++ b/neutron/spec/classes/neutron_services_fwaas_spec.rb @@ -22,10 +22,11 @@ describe 'neutron::services::fwaas' do let :pre_condition do - "class { 'neutron': rabbit_password => 'passw0rd' }" + "class { 'neutron': rabbit_password => 'passw0rd' } + include ::neutron::agents::l3" end - let :default_facts do + let :test_facts do { :operatingsystem => 'default', :operatingsystemrelease => 'default' } @@ -36,9 +37,7 @@ end let :default_params do - { :driver => 'neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver', - :enabled => true, - :vpnaas_agent_package => false } + { :vpnaas_agent_package => false } end shared_examples_for 'neutron fwaas service plugin' do @@ -47,16 +46,18 @@ end it 'configures driver in fwaas_driver.ini' do - is_expected.to contain_neutron_fwaas_service_config('fwaas/driver').with_value('neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver') - is_expected.to contain_neutron_fwaas_service_config('fwaas/enabled').with_value('true') + is_expected.to contain_neutron_fwaas_service_config('fwaas/driver').with_value('') + is_expected.to contain_neutron_fwaas_service_config('fwaas/enabled').with_value('') end end context 'on Ubuntu platforms' do let :facts do - default_facts.merge( + @default_facts.merge(test_facts.merge( { :osfamily => 'Debian', - :operatingsystem => 'Ubuntu' }) + :operatingsystem => 'Ubuntu' + } + )) end it_configures 'neutron fwaas service plugin' @@ -72,9 +73,11 @@ context 'on Debian platforms without VPNaaS' do let :facts do - default_facts.merge( + @default_facts.merge(test_facts.merge( { :osfamily => 'Debian', - :operatingsystem => 'Debian' }) + :operatingsystem => 'Debian' + } + )) end it_configures 'neutron fwaas service plugin' @@ -90,7 +93,9 @@ context 'on Debian platforms with VPNaaS' do let :facts do - default_facts.merge({ :osfamily => 'Debian' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'Debian' + })) end let :params do @@ -109,7 +114,10 @@ context 'on Red Hat platforms' do let :facts do - default_facts.merge({ :osfamily => 'RedHat' }) + @default_facts.merge(test_facts.merge({ + :osfamily => 'RedHat', + :operatingsystemrelease => '7' + })) end it_configures 'neutron fwaas service plugin' diff --git a/neutron/spec/spec_helper.rb b/neutron/spec/spec_helper.rb index 3df4cede1..9bc7bcf96 100644 --- a/neutron/spec/spec_helper.rb +++ b/neutron/spec/spec_helper.rb @@ -5,6 +5,9 @@ RSpec.configure do |c| c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' + c.before :each do + @default_facts = { :os_service_default => '' } + end end at_exit { RSpec::Puppet::Coverage.report! } diff --git a/nova/CHANGELOG.md b/nova/CHANGELOG.md index f415b3360..70592cd14 100644 --- a/nova/CHANGELOG.md +++ b/nova/CHANGELOG.md @@ -1,3 +1,63 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes +- change section name for AMQP qpid parameters +- change section name for AMQP rabbit parameters +- remove deprecated mysql_module +- do not manage python-greenlet anymore +- rabbitmq: do not manage rabbitmq service anymore +- remove openstackocci installation feature +- enable nova service by default + +####Features +- add tag to package and service resources +- add nova::db::sync +- add an ability to manage use_stderr parameter +- reflect provider change in puppet-openstacklib +- add nameservers (dns) parameters +- move os_region_name config option +- use auth_nova method to create nova network +- api: add default_floating_pool parameter +- db: Use postgresql lib class for psycopg package +- add support for RabbitMQ connection heartbeat +- move cinder_catalog_info to init +- don't add non-existent hosts to host aggregates +- make libvirt migration security configurable +- add region support to nova providers +- floating IP range support in Nova network +- rename neuton/url_timeout to neutron/timeout +- add upgrade_levels configuration options +- switch nova to leverage os_package_type fact +- use os_package_type for libvirt service name +- making instance_name_template configurable +- remove POSIX users, groups, and file modes +- allows the modification of the nova-api ports +- put all the logging related parameters to the logging class +- add kombu_reconnect_delay option +- update nova::db class to match other module pattern +- volume: allow to change catalog_info +- add config_drive_format option to nova_compute +- handle libvirt/cpu_model option +- add ability to set default baremetal filters + +####Bugfixes +- rely on autorequire for config resource ordering +- avoid empty notification driver +- fixed issue with rabbit_hosts parameter +- docfix: update default image_service param in doc + +####Maintenance +- fix rspec 3.x syntax +- acceptance: enable debug & verbosity for OpenStack logs +- initial msync run for all Puppet OpenStack modules +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check +- acceptance: use common bits from puppet-openstack-integration +- spec: enable webmock connect to IPv4 link-local + ##2015-10-10 - 6.1.0 ###Summary diff --git a/nova/README.md b/nova/README.md index 6ebbc18d3..5f3354b01 100644 --- a/nova/README.md +++ b/nova/README.md @@ -1,7 +1,7 @@ nova ==== -6.1.0 - 2015.1 - Kilo +7.0.0 - 2015.2.0 - Liberty #### Table of Contents @@ -23,7 +23,7 @@ Module Description The nova module is a thorough attempt to make Puppet capable of managing the entirety of nova. This includes manifests to provision such things as keystone endpoints, RPC configurations specific to nova, and database connections. Types are shipped as part of the nova module to assist in manipulation of configuration files. -This module is tested in combination with other modules needed to build and leverage an entire Openstack software stack. These modules can be found, all pulled together in the [openstack module](https://github.com/stackforge/puppet-openstack). +This module is tested in combination with other modules needed to build and leverage an entire Openstack software stack. Setup ----- @@ -38,7 +38,7 @@ Setup ### Beginning with nova -To utilize the nova module's functionality you will need to declare multiple resources. The following is a modified excerpt from the [openstack module](https://github.com/stackforge/puppet-openstack). This is not an exhaustive list of all the components needed, we recommend you consult and understand the [openstack module](https://github.com/stackforge/puppet-openstack) and the [core openstack](http://docs.openstack.org) documentation. +To utilize the nova module's functionality you will need to declare multiple resources. This is not an exhaustive list of all the components needed, we recommend you consult and understand the [core openstack](http://docs.openstack.org) documentation. ```puppet class { 'nova': diff --git a/nova/lib/puppet/provider/nova.rb b/nova/lib/puppet/provider/nova.rb index 06b90c5a8..1faa51bb7 100644 --- a/nova/lib/puppet/provider/nova.rb +++ b/nova/lib/puppet/provider/nova.rb @@ -39,8 +39,8 @@ def nova_credentials def self.get_nova_credentials #needed keys for authentication - auth_keys = ['auth_host', 'auth_port', 'auth_protocol', - 'admin_tenant_name', 'admin_user', 'admin_password'] + auth_keys = ['auth_uri', 'admin_tenant_name', 'admin_user', + 'admin_password'] conf = nova_conf if conf and conf['keystone_authtoken'] and auth_keys.all?{|k| !conf['keystone_authtoken'][k].nil?} @@ -59,7 +59,7 @@ def self.get_nova_credentials def self.get_auth_endpoint q = nova_credentials - "#{q['auth_protocol']}://#{q['auth_host']}:#{q['auth_port']}/v2.0/" + "#{q['auth_uri']}" end def self.auth_endpoint diff --git a/nova/lib/puppet/provider/nova_floating/nova_manage.rb b/nova/lib/puppet/provider/nova_floating/nova_manage.rb index 37a4a6257..53da6b775 100644 --- a/nova/lib/puppet/provider/nova_floating/nova_manage.rb +++ b/nova/lib/puppet/provider/nova_floating/nova_manage.rb @@ -1,5 +1,3 @@ -require 'netaddr' - Puppet::Type.type(:nova_floating).provide(:nova_manage) do desc "Manage nova floating" @@ -48,6 +46,7 @@ def destroy # Create range in cidr, including first and last ip # Nova will create this range, excluding network and broadcast IPs def mixed_range + require 'netaddr' range = [] NetAddr.merge(operate_range).each do |cidr| tmp_range = NetAddr::CIDR.create(cidr).enumerate @@ -74,6 +73,7 @@ def operate_range end def ip_range + require 'netaddr' ip_range = [] Array(@resource[:network]).each do |rng| ip = rng.split('-') diff --git a/nova/lib/puppet/provider/nova_network/nova.rb b/nova/lib/puppet/provider/nova_network/nova.rb new file mode 100644 index 000000000..64e194b9c --- /dev/null +++ b/nova/lib/puppet/provider/nova_network/nova.rb @@ -0,0 +1,43 @@ +require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/nova') + +Puppet::Type.type(:nova_network).provide(:nova, :parent => Puppet::Provider::Nova) do + + desc "Manage nova network" + + optional_commands :nova => 'nova' + + def create + optional_opts = [] + { + # this needs to be converted from a project name to an id + :project => '--project_id', + :dns1 => '--dns1', + :dns2 => '--dns2', + :gateway => '--gateway', + :bridge => '--bridge', + :vlan_start => '--vlan-start', + :allowed_start => '--allowed-start', + :allowed_end => '--allowed-end', + }.each do |param, opt| + if resource[param] + optional_opts.push(opt).push(resource[param]) + end + end + + opts = [resource[:label], "--fixed-range-v4", resource[:name]] + + auth_nova('network-create', opts + optional_opts) + end + + def exists? + instances = auth_nova('network-list') + return instances.split('\n')[1..-1].detect do |n| + n =~ /(\S+)\s+(#{resource[:network]})\s+(\S+)/ + end + end + + def destroy + auth_nova("network-delete", resource[:network]) + end + +end diff --git a/nova/lib/puppet/provider/nova_network/nova_manage.rb b/nova/lib/puppet/provider/nova_network/nova_manage.rb deleted file mode 100644 index b89f61ef0..000000000 --- a/nova/lib/puppet/provider/nova_network/nova_manage.rb +++ /dev/null @@ -1,71 +0,0 @@ -require File.join(File.dirname(__FILE__), '..','..','..', - 'puppet/provider/nova') - -Puppet::Type.type(:nova_network).provide(:nova_manage, :parent => Puppet::Provider::Nova) do - - desc "Manage nova network" - - optional_commands :nova_manage => 'nova-manage', :nova => 'nova' - - # I need to setup caching and what-not to make this lookup performance not suck - def self.instances - begin - network_list = nova_manage("network", "list") - rescue Exception => e - if e.message =~ /No networks defined/ - return [] - else - raise(e) - end - end - network_list.split("\n")[1..-1].collect do |net| - if net =~ /^(\S+)\s+(\S+)/ - new(:name => $2 ) - end - end.compact - end - - def create - optional_opts = [] - { - # this needs to be converted from a project name to an id - :project => '--project_id', - :dns1 => '--dns1', - :dns2 => '--dns2', - :gateway => '--gateway', - :bridge => '--bridge', - :vlan_start => '--vlan-start', - :allowed_start => '--allowed-start', - :allowed_end => '--allowed-end', - }.each do |param, opt| - if resource[param] - optional_opts.push(opt).push(resource[param]) - end - end - - auth_nova('network-create', - resource[:label], - '--fixed-range-v4', - resource[:name], - optional_opts - ) - end - - def exists? - begin - network_list = nova_manage("network", "list") - return network_list.split("\n")[1..-1].detect do |n| - # TODO - this does not take the CIDR into accont. Does it matter? - n =~ /^(\S+)\s+(#{resource[:network].split('/').first})/ - end - rescue - return false - end - end - - - def destroy - nova_manage("network", "delete", resource[:network]) - end - -end diff --git a/nova/manifests/api.pp b/nova/manifests/api.pp index e438cd957..6f6edc460 100644 --- a/nova/manifests/api.pp +++ b/nova/manifests/api.pp @@ -217,20 +217,13 @@ $conductor_workers = undef, ) { + include ::nova::deps include ::nova::db include ::nova::params include ::nova::policy require ::keystone::python include ::cinder::client - Package<| title == 'nova-common' |> -> Class['nova::api'] - Package<| title == 'nova-common' |> -> Class['nova::policy'] - - Nova_paste_api_ini<| |> ~> Exec['post-nova_config'] - - Nova_paste_api_ini<| |> ~> Service['nova-api'] - Class['nova::policy'] ~> Service['nova-api'] - if $conductor_workers { warning('The conductor_workers parameter is deprecated and has no effect. Use workers parameter of nova::conductor class instead.') } diff --git a/nova/manifests/cells.pp b/nova/manifests/cells.pp index b1f3d9207..0f4cc5ebe 100644 --- a/nova/manifests/cells.pp +++ b/nova/manifests/cells.pp @@ -172,6 +172,7 @@ $weight_scale = '1.0' ) { + include ::nova::deps include ::nova::params case $cell_type { diff --git a/nova/manifests/cert.pp b/nova/manifests/cert.pp index 1d3a6ff17..11657cab3 100644 --- a/nova/manifests/cert.pp +++ b/nova/manifests/cert.pp @@ -22,6 +22,7 @@ $ensure_package = 'present' ) { + include ::nova::deps include ::nova::params nova::generic_service { 'cert': diff --git a/nova/manifests/client.pp b/nova/manifests/client.pp index 024dd300a..90152e671 100644 --- a/nova/manifests/client.pp +++ b/nova/manifests/client.pp @@ -11,10 +11,11 @@ class nova::client( $ensure = 'present' ) { + include ::nova::deps package { 'python-novaclient': ensure => $ensure, - tag => ['openstack'], + tag => ['openstack', 'nova-support-package'], } } diff --git a/nova/manifests/compute.pp b/nova/manifests/compute.pp index 8417d4a0d..6d1175a2a 100644 --- a/nova/manifests/compute.pp +++ b/nova/manifests/compute.pp @@ -150,6 +150,7 @@ $allow_resize_to_same_host = false, ) { + include ::nova::deps include ::nova::params nova_config { @@ -173,7 +174,7 @@ # Install bridge-utils if we use nova-network package { 'bridge-utils': ensure => present, - before => Nova::Generic_service['compute'], + tag => ['openstack', 'nova-support-package'], } } @@ -221,6 +222,7 @@ package { 'pm-utils': ensure => present, + tag => ['openstack', 'nova-support-package'], } nova_config { diff --git a/nova/manifests/compute/ironic.pp b/nova/manifests/compute/ironic.pp index ae5c8800f..ced522ca0 100644 --- a/nova/manifests/compute/ironic.pp +++ b/nova/manifests/compute/ironic.pp @@ -46,6 +46,8 @@ $compute_driver = 'ironic.IronicDriver' ) { + include ::nova::deps + if $admin_user { warning('The admin_user parameter is deprecated, use admin_username instead.') } diff --git a/nova/manifests/compute/libvirt.pp b/nova/manifests/compute/libvirt.pp index 34d17cce6..c4594fa3e 100644 --- a/nova/manifests/compute/libvirt.pp +++ b/nova/manifests/compute/libvirt.pp @@ -106,6 +106,7 @@ $compute_driver = 'libvirt.LibvirtDriver' ) inherits nova::params { + include ::nova::deps include ::nova::params Service['libvirt'] -> Service['nova-compute'] @@ -126,10 +127,8 @@ if($::osfamily == 'Debian') { package { "nova-compute-${libvirt_virt_type}": - ensure => present, - before => Package['nova-compute'], - require => Package['nova-common'], - tag => ['openstack'], + ensure => present, + tag => ['openstack', 'nova-package'], } } @@ -156,6 +155,7 @@ ensure => present, name => $::nova::params::libvirt_nwfilter_package_name, before => Service['libvirt'], + tag => ['openstack', 'nova-support-package'], } case $libvirt_virt_type { 'qemu': { @@ -172,6 +172,7 @@ package { 'libvirt': ensure => present, name => $libvirt_package_name_real, + tag => ['openstack', 'nova-support-package'], } service { 'libvirt' : @@ -179,7 +180,7 @@ enable => true, name => $libvirt_service_name, provider => $::nova::params::special_service_provider, - require => Package['libvirt'], + require => Anchor['nova::config::end'], } nova_config { diff --git a/nova/manifests/compute/neutron.pp b/nova/manifests/compute/neutron.pp index 91ee91cfc..43d6eeed9 100644 --- a/nova/manifests/compute/neutron.pp +++ b/nova/manifests/compute/neutron.pp @@ -19,6 +19,7 @@ $libvirt_vif_driver = 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver', $force_snat_range = '0.0.0.0/0', ) { + include ::nova::deps if $libvirt_vif_driver == 'nova.virt.libvirt.vif.LibvirtOpenVswitchDriver' { fail('nova.virt.libvirt.vif.LibvirtOpenVswitchDriver as vif_driver is removed from Icehouse') diff --git a/nova/manifests/compute/rbd.pp b/nova/manifests/compute/rbd.pp index 0d03f0cc7..874ecb2cb 100644 --- a/nova/manifests/compute/rbd.pp +++ b/nova/manifests/compute/rbd.pp @@ -65,6 +65,7 @@ $ephemeral_storage = true, ) { + include ::nova::deps include ::nova::params nova_config { @@ -78,7 +79,7 @@ file { '/etc/nova/secret.xml': content => template('nova/secret.xml-compute.erb'), - require => Class['::nova'] + require => Anchor['nova::config::begin'], } exec { 'get-or-set virsh secret': @@ -95,9 +96,9 @@ exec { 'set-secret-value virsh': command => "/usr/bin/virsh secret-set-value --secret ${libvirt_rbd_secret_uuid} --base64 ${libvirt_key}", unless => "/usr/bin/virsh secret-get-value ${libvirt_rbd_secret_uuid}", - require => Exec['get-or-set virsh secret'] + require => Exec['get-or-set virsh secret'], + before => Anchor['nova::config::end'], } - } if $ephemeral_storage { diff --git a/nova/manifests/compute/serial.pp b/nova/manifests/compute/serial.pp index f71db8546..3f5bb4e83 100644 --- a/nova/manifests/compute/serial.pp +++ b/nova/manifests/compute/serial.pp @@ -28,6 +28,7 @@ $proxyclient_address = '127.0.0.1', ) { + include ::nova::deps nova_config { 'serial_console/enabled': value => true; diff --git a/nova/manifests/compute/spice.pp b/nova/manifests/compute/spice.pp index b6cf09f0e..cdfc7397f 100644 --- a/nova/manifests/compute/spice.pp +++ b/nova/manifests/compute/spice.pp @@ -48,6 +48,8 @@ $proxy_path = '/spice_auto.html' ) { + include ::nova::deps + if $proxy_host { $html5proxy_base_url = "${proxy_protocol}://${proxy_host}:${proxy_port}${proxy_path}" nova_config { diff --git a/nova/manifests/compute/vmware.pp b/nova/manifests/compute/vmware.pp index 40b2d460d..e72f78046 100644 --- a/nova/manifests/compute/vmware.pp +++ b/nova/manifests/compute/vmware.pp @@ -60,6 +60,8 @@ $compute_driver = 'vmwareapi.VMwareVCDriver' ) { + include ::nova::deps + nova_config { 'DEFAULT/compute_driver': value => $compute_driver; 'VMWARE/host_ip': value => $host_ip; @@ -79,6 +81,7 @@ } package { 'python-suds': - ensure => present + ensure => present, + tag => ['openstack', 'nova-support-package'], } } diff --git a/nova/manifests/compute/xenserver.pp b/nova/manifests/compute/xenserver.pp index cebd6d23e..a57179a69 100644 --- a/nova/manifests/compute/xenserver.pp +++ b/nova/manifests/compute/xenserver.pp @@ -29,6 +29,8 @@ $xenapi_inject_image = undef, ) { + include ::nova::deps + if $xenapi_inject_image != undef { warning('The xenapi_inject_image parameter is deprecated and has no effect.') } @@ -44,7 +46,8 @@ package { 'xenapi': ensure => present, - provider => pip + provider => pip, + tag => ['openstack', 'nova-support-package'], } Package['python-pip'] -> Package['xenapi'] diff --git a/nova/manifests/conductor.pp b/nova/manifests/conductor.pp index c5a67c086..7a8c76549 100644 --- a/nova/manifests/conductor.pp +++ b/nova/manifests/conductor.pp @@ -32,6 +32,7 @@ $use_local = false, ) { + include ::nova::deps include ::nova::db include ::nova::params diff --git a/nova/manifests/config.pp b/nova/manifests/config.pp index e9d13ba2d..d138b3307 100644 --- a/nova/manifests/config.pp +++ b/nova/manifests/config.pp @@ -32,6 +32,8 @@ $nova_paste_api_ini = {}, ) { + include ::nova::deps + validate_hash($nova_config) validate_hash($nova_paste_api_ini) diff --git a/nova/manifests/consoleauth.pp b/nova/manifests/consoleauth.pp index d5764873e..f5a3c7beb 100644 --- a/nova/manifests/consoleauth.pp +++ b/nova/manifests/consoleauth.pp @@ -25,6 +25,7 @@ $ensure_package = 'present' ) { + include ::nova::deps include ::nova::params nova::generic_service { 'consoleauth': @@ -33,7 +34,6 @@ package_name => $::nova::params::consoleauth_package_name, service_name => $::nova::params::consoleauth_service_name, ensure_package => $ensure_package, - require => Package['nova-common'], } } diff --git a/nova/manifests/cron/archive_deleted_rows.pp b/nova/manifests/cron/archive_deleted_rows.pp index 9574977f9..ccebeca39 100644 --- a/nova/manifests/cron/archive_deleted_rows.pp +++ b/nova/manifests/cron/archive_deleted_rows.pp @@ -60,6 +60,8 @@ $destination = '/var/log/nova/nova-rowsflush.log' ) { + include ::nova::deps + cron { 'nova-manage db archive_deleted_rows': command => "nova-manage db archive_deleted_rows --max_rows ${max_rows} >>${destination} 2>&1", environment => 'PATH=/bin:/usr/bin:/usr/sbin SHELL=/bin/sh', @@ -69,6 +71,6 @@ monthday => $monthday, month => $month, weekday => $weekday, - require => Package['nova-common'], + require => Anchor['nova::dbsync::end'] } } diff --git a/nova/manifests/db.pp b/nova/manifests/db.pp index 5fb320137..07fc8191c 100644 --- a/nova/manifests/db.pp +++ b/nova/manifests/db.pp @@ -23,48 +23,49 @@ # # [*database_connection*] # (optional) Connection url to connect to nova database. -# Defaults to false. +# Defaults to $::os_service_default # # [*slave_connection*] # (optional) Connection url to connect to nova slave database (read-only). -# Defaults to false. +# Defaults to $::os_service_default # # [*database_idle_timeout*] # Timeout when db connections should be reaped. -# (Optional) Defaults to 3600. +# (Optional) Defaults to $::os_service_default # # [*database_min_pool_size*] # Minimum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 1. +# (Optional) Defaults to $::os_service_default # # [*database_max_pool_size*] # Maximum number of SQL connections to keep open in a pool. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_max_retries*] # Maximum db connection retries during startup. # Setting -1 implies an infinite retry count. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_retry_interval*] # Interval between retries of opening a sql connection. -# (Optional) Defaults to 10. +# (Optional) Defaults to $::os_service_default # # [*database_max_overflow*] # If set, use this value for max_overflow with sqlalchemy. -# (Optional) Defaults to 20. +# (Optional) Defaults to $::os_service_default # class nova::db ( - $database_connection = false, - $slave_connection = false, - $database_idle_timeout = 3600, - $database_min_pool_size = 1, - $database_max_pool_size = 10, - $database_max_retries = 10, - $database_retry_interval = 10, - $database_max_overflow = 20, + $database_connection = $::os_service_default, + $slave_connection = $::os_service_default, + $database_idle_timeout = $::os_service_default, + $database_min_pool_size = $::os_service_default, + $database_max_pool_size = $::os_service_default, + $database_max_retries = $::os_service_default, + $database_retry_interval = $::os_service_default, + $database_max_overflow = $::os_service_default, ) { + include ::nova::deps include ::nova::params # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function @@ -78,16 +79,20 @@ $database_retry_interval_real = pick($::nova::database_retry_interval, $database_retry_interval) $database_max_overflow_real = pick($::nova::database_max_overflow, $database_max_overflow) - if $database_connection_real { + if !is_service_default($database_connection_real) { validate_re($database_connection_real, - '(sqlite|mysql|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') + '^(sqlite|mysql(\+pymysql)?|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') case $database_connection_real { - /^mysql:\/\//: { - $backend_package = false + /^mysql(\+pymysql)?:\/\//: { require 'mysql::bindings' require 'mysql::bindings::python' + if $database_connection_real =~ /^mysql\+pymysql/ { + $backend_package = $::nova::params::pymysql_package_name + } else { + $backend_package = false + } } /^postgresql:\/\//: { $backend_package = false @@ -105,29 +110,21 @@ package {'nova-backend-package': ensure => present, name => $backend_package, - tag => 'openstack', + tag => ['openstack', 'nova-package'], } } nova_config { - 'database/connection': value => $database_connection_real, secret => true; - 'database/idle_timeout': value => $database_idle_timeout_real; - 'database/min_pool_size': value => $database_min_pool_size_real; - 'database/max_retries': value => $database_max_retries_real; - 'database/retry_interval': value => $database_retry_interval_real; - 'database/max_pool_size': value => $database_max_pool_size_real; - 'database/max_overflow': value => $database_max_overflow_real; + 'database/connection': value => $database_connection_real, secret => true; + 'database/idle_timeout': value => $database_idle_timeout_real; + 'database/min_pool_size': value => $database_min_pool_size_real; + 'database/max_retries': value => $database_max_retries_real; + 'database/retry_interval': value => $database_retry_interval_real; + 'database/max_pool_size': value => $database_max_pool_size_real; + 'database/max_overflow': value => $database_max_overflow_real; + 'database/slave_connection': value => $slave_connection_real, secret => true; } - if $slave_connection_real { - nova_config { - 'database/slave_connection': value => $slave_connection_real, secret => true; - } - } else { - nova_config { - 'database/slave_connection': ensure => absent; - } - } } } diff --git a/nova/manifests/db/mysql.pp b/nova/manifests/db/mysql.pp index cf4c55dce..ee4652c3d 100644 --- a/nova/manifests/db/mysql.pp +++ b/nova/manifests/db/mysql.pp @@ -31,10 +31,6 @@ # (optional) Additional hosts that are allowed to access this DB # Defaults to undef # -# [*cluster_id*] -# (optional) Deprecated. Does nothing -# Defaults to 'localzone' -# class nova::db::mysql( $password, $dbname = 'nova', @@ -45,6 +41,8 @@ $allowed_hosts = undef, ) { + include ::nova::deps + ::openstacklib::db::mysql { 'nova': user => $user, password_hash => mysql_password($password), @@ -55,5 +53,7 @@ allowed_hosts => $allowed_hosts, } - ::Openstacklib::Db::Mysql['nova'] ~> Exec<| title == 'nova-db-sync' |> + Anchor['nova::db::begin'] + ~> Class['nova::db::mysql'] + ~> Anchor['nova::db::end'] } diff --git a/nova/manifests/db/postgresql.pp b/nova/manifests/db/postgresql.pp index 8eeb44a67..c037886f1 100644 --- a/nova/manifests/db/postgresql.pp +++ b/nova/manifests/db/postgresql.pp @@ -32,6 +32,8 @@ $privileges = 'ALL', ) { + include ::nova::deps + ::openstacklib::db::postgresql { 'nova': password_hash => postgresql_password($user, $password), dbname => $dbname, @@ -40,7 +42,7 @@ privileges => $privileges, } - ::Openstacklib::Db::Postgresql['nova'] ~> Exec<| title == 'nova-db-sync' |> - ::Openstacklib::Db::Postgresql['nova'] -> Anchor<| title == 'nova-start' |> - + Anchor['nova::db::begin'] + ~> Class['nova::db::postgresql'] + ~> Anchor['nova::db::end'] } diff --git a/nova/manifests/db/sync.pp b/nova/manifests/db/sync.pp index 3e2ddb22c..24dd40184 100644 --- a/nova/manifests/db/sync.pp +++ b/nova/manifests/db/sync.pp @@ -1,22 +1,30 @@ # # Class to execute nova dbsync # -class nova::db::sync { +# ==Parameters +# +# [*extra_params*] +# (optional) String of extra command line parameters to append +# to the nova-manage db sync command. These will be inserted in +# the command line between 'nova-manage' and 'db sync'. +# Defaults to undef +# +class nova::db::sync( + $extra_params = undef, +) { + include ::nova::deps include ::nova::params - Package<| tag =='nova-package' |> ~> Exec['nova-db-sync'] - Exec['nova-db-sync'] ~> Service <| tag == 'nova-service' |> - - Nova_config <||> -> Exec['nova-db-sync'] - Nova_config <| title == 'database/connection' |> ~> Exec['nova-db-sync'] - - Exec<| title == 'post-nova_config' |> ~> Exec['nova-db-sync'] - exec { 'nova-db-sync': - command => '/usr/bin/nova-manage db sync', + command => "/usr/bin/nova-manage ${extra_params} db sync", refreshonly => true, logoutput => on_failure, + subscribe => [ + Anchor['nova::install::end'], + Anchor['nova::config::end'], + Anchor['nova::dbsync::begin'] + ], + notify => Anchor['nova::dbsync::end'], } - } diff --git a/nova/manifests/deps.pp b/nova/manifests/deps.pp new file mode 100644 index 000000000..78c1fa3bb --- /dev/null +++ b/nova/manifests/deps.pp @@ -0,0 +1,63 @@ +# == Class: nova::deps +# +# Nova anchors and dependency management +# +class nova::deps { + # Setup anchors for install, config and service phases of the module. These + # anchors allow external modules to hook the begin and end of any of these + # phases. Package or service management can also be replaced by ensuring the + # package is absent or turning off service management and having the + # replacement depend on the appropriate anchors. When applicable, end tags + # should be notified so that subscribers can determine if installation, + # config or service state changed and act on that if needed. + anchor { 'nova::install::begin': } + -> Package<| tag == 'nova-package'|> + ~> anchor { 'nova::install::end': } + -> anchor { 'nova::config::begin': } + -> Nova_config<||> + ~> anchor { 'nova::config::end': } + -> anchor { 'nova::db::begin': } + -> anchor { 'nova::db::end': } + ~> anchor { 'nova::dbsync::begin': } + -> anchor { 'nova::dbsync::end': } + ~> anchor { 'nova::service::begin': } + ~> Service<| tag == 'nova-service' |> + ~> anchor { 'nova::service::end': } + + # paste-api.ini config shold occur in the config block also. + Anchor['nova::config::begin'] + -> Nova_paste_api_ini<||> + ~> Anchor['nova::config::end'] + + # Support packages need to be installed in the install phase, but we don't + # put them in the chain above because we don't want any false dependencies + # between packages with the nova-package tag and the nova-support-package + # tag. Note: the package resources here will have a 'before' relationshop on + # the nova::install::end anchor. The line between nova-support-package and + # nova-package should be whether or not Nova services would need to be + # restarted if the package state was changed. + Anchor['nova::install::begin'] + -> Package<| tag == 'nova-support-package'|> + -> Anchor['nova::install::end'] + + # The following resourcs are managed by calling 'nova manage' and so the + # database must be provisioned before they can be applied. + Anchor['nova::dbsync::end'] + -> Nova_cells<||> + Anchor['nova::dbsync::end'] + -> Nova_floating<||> + Anchor['nova::dbsync::end'] + -> Nova_network<||> + + # Installation or config changes will always restart services. + Anchor['nova::install::end'] ~> Anchor['nova::service::begin'] + Anchor['nova::config::end'] ~> Anchor['nova::service::begin'] + + # This is here for backwards compatability for any external users of the + # nova-start anchor. This should be considered deprecated and removed in the + # N cycle + anchor { 'nova-start': + require => Anchor['nova::install::end'], + before => Anchor['nova::config::begin'], + } +} diff --git a/nova/manifests/generic_service.pp b/nova/manifests/generic_service.pp index c566acff1..1893ecb11 100644 --- a/nova/manifests/generic_service.pp +++ b/nova/manifests/generic_service.pp @@ -38,16 +38,10 @@ $ensure_package = 'present' ) { + include ::nova::deps include ::nova::params $nova_title = "nova-${name}" - # ensure that the service is only started after - # all nova config entries have been set - Exec['post-nova_config'] ~> Service<| title == $nova_title |> - # ensure that the service has only been started - # after the initial db sync - Exec<| title == 'nova-db-sync' |> ~> Service<| title == $nova_title |> - # I need to mark that ths package should be # installed before nova_config @@ -56,18 +50,9 @@ package { $nova_title: ensure => $ensure_package, name => $package_name, - notify => Service[$nova_title], tag => ['openstack', 'nova-package'], } } - - if $service_name { - # Do the dependency relationship here in case the package - # has been defined elsewhere, either as Package[$nova_title] - # or Package[$package_name] - Package<| title == $nova_title |> -> Service[$nova_title] - Package<| title == $package_name |> -> Service[$nova_title] - } } if $service_name { @@ -84,7 +69,6 @@ name => $service_name, enable => $enabled, hasstatus => true, - require => [Package['nova-common']], tag => 'nova-service', } } diff --git a/nova/manifests/init.pp b/nova/manifests/init.pp index 7afb0b036..d59d44a26 100644 --- a/nova/manifests/init.pp +++ b/nova/manifests/init.pp @@ -134,38 +134,6 @@ # (optional) Define queues as "durable" to rabbitmq. # Defaults to false # -# [*qpid_hostname*] -# (optional) Location of qpid server -# Defaults to 'localhost' -# -# [*qpid_port*] -# (optional) Port for qpid server -# Defaults to '5672' -# -# [*qpid_username*] -# (optional) Username to use when connecting to qpid -# Defaults to 'guest' -# -# [*qpid_password*] -# (optional) Password to use when connecting to qpid -# Defaults to 'guest' -# -# [*qpid_heartbeat*] -# (optional) Seconds between connection keepalive heartbeats -# Defaults to 60 -# -# [*qpid_protocol*] -# (optional) Transport to use, either 'tcp' or 'ssl'' -# Defaults to 'tcp' -# -# [*qpid_sasl_mechanisms*] -# (optional) Enable one or more SASL mechanisms -# Defaults to false -# -# [*qpid_tcp_nodelay*] -# (optional) Disable Nagle algorithm -# Defaults to true -# # [*auth_strategy*] # (optional) The strategy to use for auth: noauth or keystone. # Defaults to 'keystone' @@ -328,7 +296,40 @@ # (optional) Sets a version cap for messages sent to scheduler services # Defaults to undef # - +# DEPRECATED PARAMETERS +# +# [*qpid_hostname*] +# (optional) Location of qpid server +# Defaults to undef +# +# [*qpid_port*] +# (optional) Port for qpid server +# Defaults to undef +# +# [*qpid_username*] +# (optional) Username to use when connecting to qpid +# Defaults to undef +# +# [*qpid_password*] +# (optional) Password to use when connecting to qpid +# Defaults to undef +# +# [*qpid_heartbeat*] +# (optional) Seconds between connection keepalive heartbeats +# Defaults to undef +# +# [*qpid_protocol*] +# (optional) Transport to use, either 'tcp' or 'ssl'' +# Defaults to undef +# +# [*qpid_sasl_mechanisms*] +# (optional) Enable one or more SASL mechanisms +# Defaults to undef +# +# [*qpid_tcp_nodelay*] +# (optional) Disable Nagle algorithm +# Defaults to undef +# class nova( $ensure_package = 'present', $database_connection = undef, @@ -361,14 +362,6 @@ $kombu_ssl_version = 'TLSv1', $kombu_reconnect_delay = '1.0', $amqp_durable_queues = false, - $qpid_hostname = 'localhost', - $qpid_port = '5672', - $qpid_username = 'guest', - $qpid_password = 'guest', - $qpid_sasl_mechanisms = false, - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, $auth_strategy = 'keystone', $service_down_time = 60, $log_dir = undef, @@ -405,8 +398,19 @@ $upgrade_level_intercell = undef, $upgrade_level_network = undef, $upgrade_level_scheduler = undef, + # DEPRECATED PARAMETERS + $qpid_hostname = undef, + $qpid_port = undef, + $qpid_username = undef, + $qpid_password = undef, + $qpid_sasl_mechanisms = undef, + $qpid_heartbeat = undef, + $qpid_protocol = undef, + $qpid_tcp_nodelay = undef, ) inherits nova::params { + include ::nova::deps + # maintain backward compatibility include ::nova::db include ::nova::logging @@ -444,7 +448,8 @@ mode => '0700', owner => 'nova', group => 'nova', - require => Package['nova-common'], + require => Anchor['nova::config::begin'], + before => Anchor['nova::config::end'], } if $nova_public_key { @@ -482,37 +487,27 @@ mode => '0600', owner => 'nova', group => 'nova', - require => [ File['/var/lib/nova/.ssh'], Package['nova-common'] ], + require => File['/var/lib/nova/.ssh'], } } } - Nova_config<| |> ~> Exec['post-nova_config'] - if $install_utilities { class { '::nova::utilities': } } - # this anchor is used to simplify the graph between nova components by - # allowing a resource to serve as a point where the configuration of nova begins - anchor { 'nova-start': } - package { 'python-nova': ensure => $ensure_package, - tag => ['openstack'], + tag => ['openstack', 'nova-package'], } package { 'nova-common': ensure => $ensure_package, name => $::nova::params::common_package_name, - require => [Package['python-nova'], Anchor['nova-start']], + require => Package['python-nova'], tag => ['openstack', 'nova-package'], } - file { '/etc/nova/nova.conf': - require => Package['nova-common'], - } - # used by debian/ubuntu in nova::network_bridge to refresh # interfaces based on /etc/network/interfaces exec { 'networking-refresh': @@ -608,30 +603,7 @@ # we keep "nova.openstack.common.rpc.impl_qpid" for backward compatibility # but since Icehouse, "qpid" is enough. if $rpc_backend == 'nova.openstack.common.rpc.impl_qpid' or $rpc_backend == 'qpid' { - nova_config { - 'oslo_messaging_qpid/qpid_hostname': value => $qpid_hostname; - 'oslo_messaging_qpid/qpid_port': value => $qpid_port; - 'oslo_messaging_qpid/qpid_username': value => $qpid_username; - 'oslo_messaging_qpid/qpid_password': value => $qpid_password, secret => true; - 'oslo_messaging_qpid/qpid_heartbeat': value => $qpid_heartbeat; - 'oslo_messaging_qpid/qpid_protocol': value => $qpid_protocol; - 'oslo_messaging_qpid/qpid_tcp_nodelay': value => $qpid_tcp_nodelay; - } - if is_array($qpid_sasl_mechanisms) { - nova_config { - 'oslo_messaging_qpid/qpid_sasl_mechanisms': value => join($qpid_sasl_mechanisms, ' '); - } - } - elsif $qpid_sasl_mechanisms { - nova_config { - 'oslo_messaging_qpid/qpid_sasl_mechanisms': value => $qpid_sasl_mechanisms; - } - } - else { - nova_config { - 'oslo_messaging_qpid/qpid_sasl_mechanisms': ensure => absent; - } - } + warning('Qpid driver is removed from Oslo.messaging in the Mitaka release') } # SSL Options @@ -802,10 +774,4 @@ nova_config { 'DEFAULT/os_region_name': ensure => absent; } - - exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', - refreshonly => true, - } - } diff --git a/nova/manifests/keystone/auth.pp b/nova/manifests/keystone/auth.pp index 25f7d2f11..85953e83d 100644 --- a/nova/manifests/keystone/auth.pp +++ b/nova/manifests/keystone/auth.pp @@ -188,6 +188,8 @@ $internal_address = undef, ) { + include ::nova::deps + if $compute_version { warning('The compute_version parameter is deprecated, use public_url, internal_url and admin_url instead.') } diff --git a/nova/manifests/logging.pp b/nova/manifests/logging.pp index 4145cdd6d..f50443598 100644 --- a/nova/manifests/logging.pp +++ b/nova/manifests/logging.pp @@ -6,23 +6,23 @@ # # [*verbose*] # (Optional) Should the daemons log verbose messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*debug*] # (Optional) Should the daemons log debug messages -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_syslog*] # (Optional) Use syslog for logging. -# Defaults to 'false' +# Defaults to $::os_service_default # # [*use_stderr*] # (optional) Use stderr for logging -# Defaults to 'true' +# Defaults to $::os_service_default # # [*log_facility*] # (Optional) Syslog facility to receive log lines. -# Defaults to 'LOG_USER' +# Defaults to $::os_service_default # # [*log_dir*] # (optional) Directory where logs should be stored. @@ -31,34 +31,34 @@ # # [*logging_context_format_string*] # (optional) Format string to use for log messages with context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [%(request_id)s %(user_identity)s] %(instance)s%(message)s' # # [*logging_default_format_string*] # (optional) Format string to use for log messages without context. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [-] %(instance)s%(message)s' # # [*logging_debug_format_suffix*] # (optional) Formatted data to append to log format when level is DEBUG. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(funcName)s %(pathname)s:%(lineno)d' # # [*logging_exception_prefix*] # (optional) Prefix each line of exception output with this format. -# Defaults to undef. +# Defaults to $::os_service_default # Example: '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s' # # [*log_config_append*] # The name of an additional logging configuration file. -# Defaults to undef. +# Defaults to $::os_service_default # See https://docs.python.org/2/howto/logging.html # # [*default_log_levels*] # (optional) Hash of logger (keys) and level (values) pairs. -# Defaults to undef. +# Defaults to $::os_service_default # Example: # { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', # 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', @@ -67,49 +67,51 @@ # # [*publish_errors*] # (optional) Publish error events (boolean value). -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*fatal_deprecations*] # (optional) Make deprecations fatal (boolean value) -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default # # [*instance_format*] # (optional) If an instance is passed with the log message, format it # like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: '[instance: %(uuid)s] ' # # [*instance_uuid_format*] # (optional) If an instance UUID is passed with the log message, format # it like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default # Example: instance_uuid_format='[instance: %(uuid)s] ' - +# # [*log_date_format*] # (optional) Format string for %%(asctime)s in log records. -# Defaults to undef. +# Defaults to $::os_service_default # Example: 'Y-%m-%d %H:%M:%S' class nova::logging( - $use_syslog = false, - $use_stderr = true, - $log_facility = 'LOG_USER', + $use_syslog = $::os_service_default, + $use_stderr = $::os_service_default, + $log_facility = $::os_service_default, $log_dir = '/var/log/nova', - $verbose = false, - $debug = false, - $logging_context_format_string = undef, - $logging_default_format_string = undef, - $logging_debug_format_suffix = undef, - $logging_exception_prefix = undef, - $log_config_append = undef, - $default_log_levels = undef, - $publish_errors = undef, - $fatal_deprecations = undef, - $instance_format = undef, - $instance_uuid_format = undef, - $log_date_format = undef, + $verbose = $::os_service_default, + $debug = $::os_service_default, + $logging_context_format_string = $::os_service_default, + $logging_default_format_string = $::os_service_default, + $logging_debug_format_suffix = $::os_service_default, + $logging_exception_prefix = $::os_service_default, + $log_config_append = $::os_service_default, + $default_log_levels = $::os_service_default, + $publish_errors = $::os_service_default, + $fatal_deprecations = $::os_service_default, + $instance_format = $::os_service_default, + $instance_uuid_format = $::os_service_default, + $log_date_format = $::os_service_default, ) { + include ::nova::deps + # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function # to use nova:: first then nova::logging::. $use_syslog_real = pick($::nova::use_syslog,$use_syslog) @@ -119,139 +121,31 @@ $verbose_real = pick($::nova::verbose,$verbose) $debug_real = pick($::nova::debug,$debug) - nova_config { - 'DEFAULT/debug' : value => $debug_real; - 'DEFAULT/verbose' : value => $verbose_real; - 'DEFAULT/use_stderr' : value => $use_stderr_real; - 'DEFAULT/use_syslog' : value => $use_syslog_real; - 'DEFAULT/log_dir' : value => $log_dir_real; - 'DEFAULT/syslog_log_facility': value => $log_facility_real; + if is_service_default($default_log_levels) { + $default_log_levels_real = $default_log_levels + } else { + $default_log_levels_real = join(sort(join_keys_to_values($default_log_levels, '=')), ',') } - if $logging_context_format_string { - nova_config { - 'DEFAULT/logging_context_format_string' : - value => $logging_context_format_string; - } - } - else { - nova_config { - 'DEFAULT/logging_context_format_string' : ensure => absent; - } - } - - if $logging_default_format_string { - nova_config { - 'DEFAULT/logging_default_format_string' : - value => $logging_default_format_string; - } - } - else { - nova_config { - 'DEFAULT/logging_default_format_string' : ensure => absent; - } - } - - if $logging_debug_format_suffix { - nova_config { - 'DEFAULT/logging_debug_format_suffix' : - value => $logging_debug_format_suffix; - } - } - else { - nova_config { - 'DEFAULT/logging_debug_format_suffix' : ensure => absent; - } - } - - if $logging_exception_prefix { - nova_config { - 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; - } - } - else { - nova_config { - 'DEFAULT/logging_exception_prefix' : ensure => absent; - } - } - - if $log_config_append { - nova_config { - 'DEFAULT/log_config_append' : value => $log_config_append; - } - } - else { - nova_config { - 'DEFAULT/log_config_append' : ensure => absent; - } - } - - if $default_log_levels { - nova_config { - 'DEFAULT/default_log_levels' : - value => join(sort(join_keys_to_values($default_log_levels, '=')), ','); - } - } - else { - nova_config { - 'DEFAULT/default_log_levels' : ensure => absent; - } - } - - if $publish_errors { - nova_config { - 'DEFAULT/publish_errors' : value => $publish_errors; - } - } - else { - nova_config { - 'DEFAULT/publish_errors' : ensure => absent; - } - } - - if $fatal_deprecations { - nova_config { - 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; - } - } - else { - nova_config { - 'DEFAULT/fatal_deprecations' : ensure => absent; - } - } - - if $instance_format { - nova_config { - 'DEFAULT/instance_format' : value => $instance_format; - } - } - else { - nova_config { - 'DEFAULT/instance_format' : ensure => absent; - } - } - - if $instance_uuid_format { - nova_config { - 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; - } - } - else { - nova_config { - 'DEFAULT/instance_uuid_format' : ensure => absent; - } - } - - if $log_date_format { - nova_config { - 'DEFAULT/log_date_format' : value => $log_date_format; - } - } - else { - nova_config { - 'DEFAULT/log_date_format' : ensure => absent; - } - } + nova_config { + 'DEFAULT/debug': value => $debug_real; + 'DEFAULT/verbose': value => $verbose_real; + 'DEFAULT/use_stderr': value => $use_stderr_real; + 'DEFAULT/use_syslog': value => $use_syslog_real; + 'DEFAULT/log_dir': value => $log_dir_real; + 'DEFAULT/syslog_log_facility': value => $log_facility_real; + 'DEFAULT/logging_context_format_string': value => $logging_context_format_string; + 'DEFAULT/logging_default_format_string': value => $logging_default_format_string; + 'DEFAULT/logging_debug_format_suffix': value => $logging_debug_format_suffix; + 'DEFAULT/logging_exception_prefix': value => $logging_exception_prefix; + 'DEFAULT/log_config_append': value => $log_config_append; + 'DEFAULT/default_log_levels': value => $default_log_levels_real; + 'DEFAULT/publish_errors': value => $publish_errors; + 'DEFAULT/fatal_deprecations': value => $fatal_deprecations; + 'DEFAULT/instance_format': value => $instance_format; + 'DEFAULT/instance_uuid_format': value => $instance_uuid_format; + 'DEFAULT/log_date_format': value => $log_date_format; + } } diff --git a/nova/manifests/manage/cells.pp b/nova/manifests/manage/cells.pp index b51455275..c2bad4a95 100644 --- a/nova/manifests/manage/cells.pp +++ b/nova/manifests/manage/cells.pp @@ -71,8 +71,7 @@ $weight_scale = '1.0' ) { - File['/etc/nova/nova.conf'] -> Nova_cells[$name] - Exec<| title == 'nova-db-sync' |> -> Nova_cells[$name] + include ::nova::deps nova_cells { $name: ensure => present, diff --git a/nova/manifests/manage/floating.pp b/nova/manifests/manage/floating.pp index 5268430cd..a9421b0f4 100644 --- a/nova/manifests/manage/floating.pp +++ b/nova/manifests/manage/floating.pp @@ -7,13 +7,11 @@ # define nova::manage::floating ( $network ) { - File['/etc/nova/nova.conf'] -> Nova_floating[$name] - Exec<| title == 'nova-db-sync' |> -> Nova_floating[$name] + include ::nova::deps nova_floating { $name: ensure => present, network => $network, provider => 'nova_manage', } - } diff --git a/nova/manifests/manage/network.pp b/nova/manifests/manage/network.pp index 4f3afac3b..cb66686b3 100644 --- a/nova/manifests/manage/network.pp +++ b/nova/manifests/manage/network.pp @@ -52,8 +52,7 @@ $dns2 = undef ) { - File['/etc/nova/nova.conf'] -> Nova_network[$name] - Exec<| title == 'nova-db-sync' |> -> Nova_network[$name] + include ::nova::deps nova_network { $name: ensure => present, diff --git a/nova/manifests/migration/libvirt.pp b/nova/manifests/migration/libvirt.pp index ff9e6d27c..e47c8c04b 100644 --- a/nova/manifests/migration/libvirt.pp +++ b/nova/manifests/migration/libvirt.pp @@ -27,6 +27,9 @@ $live_migration_flag = undef, $block_migration_flag = undef, ){ + + include ::nova::deps + if $use_tls { $listen_tls = '1' $listen_tcp = '0' @@ -52,89 +55,90 @@ validate_re($auth, [ '^sasl$', '^none$' ], 'Valid options for auth are none and sasl.') - Package['libvirt'] -> File_line<| path == '/etc/libvirt/libvirtd.conf' |> + Anchor['nova::config::begin'] + -> File_line<| tag == 'libvirt-file_line'|> + -> Anchor['nova::config::end'] + + File_line<| tag == 'libvirt-file_line' |> + ~> Service['libvirt'] case $::osfamily { 'RedHat': { file_line { '/etc/libvirt/libvirtd.conf listen_tls': - path => '/etc/libvirt/libvirtd.conf', - line => "listen_tls = ${listen_tls}", - match => 'listen_tls =', - notify => Service['libvirt'], + path => '/etc/libvirt/libvirtd.conf', + line => "listen_tls = ${listen_tls}", + match => 'listen_tls =', + tag => 'libvirt-file_line', } file_line { '/etc/libvirt/libvirtd.conf listen_tcp': - path => '/etc/libvirt/libvirtd.conf', - line => "listen_tcp = ${listen_tcp}", - match => 'listen_tcp =', - notify => Service['libvirt'], + path => '/etc/libvirt/libvirtd.conf', + line => "listen_tcp = ${listen_tcp}", + match => 'listen_tcp =', + tag => 'libvirt-file_line', } if $use_tls { file_line { '/etc/libvirt/libvirtd.conf auth_tls': - path => '/etc/libvirt/libvirtd.conf', - line => "auth_tls = \"${auth}\"", - match => 'auth_tls =', - notify => Service['libvirt'], + path => '/etc/libvirt/libvirtd.conf', + line => "auth_tls = \"${auth}\"", + match => 'auth_tls =', + tag => 'libvirt-file_line', } } else { file_line { '/etc/libvirt/libvirtd.conf auth_tcp': - path => '/etc/libvirt/libvirtd.conf', - line => "auth_tcp = \"${auth}\"", - match => 'auth_tcp =', - notify => Service['libvirt'], + path => '/etc/libvirt/libvirtd.conf', + line => "auth_tcp = \"${auth}\"", + match => 'auth_tcp =', + tag => 'libvirt-file_line', } } file_line { '/etc/sysconfig/libvirtd libvirtd args': - path => '/etc/sysconfig/libvirtd', - line => 'LIBVIRTD_ARGS="--listen"', - match => 'LIBVIRTD_ARGS=', - notify => Service['libvirt'], + path => '/etc/sysconfig/libvirtd', + line => 'LIBVIRTD_ARGS="--listen"', + match => 'LIBVIRTD_ARGS=', + tag => 'libvirt-file_line', } - - Package['libvirt'] -> File_line<| path == '/etc/sysconfig/libvirtd' |> } 'Debian': { file_line { '/etc/libvirt/libvirtd.conf listen_tls': - path => '/etc/libvirt/libvirtd.conf', - line => "listen_tls = ${listen_tls}", - match => 'listen_tls =', - notify => Service['libvirt'], + path => '/etc/libvirt/libvirtd.conf', + line => "listen_tls = ${listen_tls}", + match => 'listen_tls =', + tag => 'libvirt-file_line', } file_line { '/etc/libvirt/libvirtd.conf listen_tcp': - path => '/etc/libvirt/libvirtd.conf', - line => "listen_tcp = ${listen_tcp}", - match => 'listen_tcp =', - notify => Service['libvirt'], + path => '/etc/libvirt/libvirtd.conf', + line => "listen_tcp = ${listen_tcp}", + match => 'listen_tcp =', + tag => 'libvirt-file_line', } if $use_tls { file_line { '/etc/libvirt/libvirtd.conf auth_tls': - path => '/etc/libvirt/libvirtd.conf', - line => "auth_tls = \"${auth}\"", - match => 'auth_tls =', - notify => Service['libvirt'], + path => '/etc/libvirt/libvirtd.conf', + line => "auth_tls = \"${auth}\"", + match => 'auth_tls =', + tag => 'libvirt-file_line', } } else { file_line { '/etc/libvirt/libvirtd.conf auth_tcp': - path => '/etc/libvirt/libvirtd.conf', - line => "auth_tcp = \"${auth}\"", - match => 'auth_tcp =', - notify => Service['libvirt'], + path => '/etc/libvirt/libvirtd.conf', + line => "auth_tcp = \"${auth}\"", + match => 'auth_tcp =', + tag => 'libvirt-file_line', } } file_line { "/etc/default/${::nova::compute::libvirt::libvirt_service_name} libvirtd opts": - path => "/etc/default/${::nova::compute::libvirt::libvirt_service_name}", - line => 'libvirtd_opts="-d -l"', - match => 'libvirtd_opts=', - notify => Service['libvirt'], + path => "/etc/default/${::nova::compute::libvirt::libvirt_service_name}", + line => 'libvirtd_opts="-d -l"', + match => 'libvirtd_opts=', + tag => 'libvirt-file_line', } - - Package['libvirt'] -> File_line<| path == "/etc/default/${::nova::compute::libvirt::libvirt_service_name}" |> } default: { diff --git a/nova/manifests/network.pp b/nova/manifests/network.pp index a92685899..16e891779 100644 --- a/nova/manifests/network.pp +++ b/nova/manifests/network.pp @@ -105,6 +105,7 @@ $auto_assign_floating_ip = false, ) { + include ::nova::deps include ::nova::params # forward all ipv4 traffic @@ -141,22 +142,28 @@ ensure_package => $ensure_package, before => Exec['networking-refresh'] } - } - if $create_networks { - nova::manage::network { 'nova-vm-net': - network => $fixed_range, - num_networks => $num_networks, - network_size => $network_size, - vlan_start => $vlan_start, - allowed_start => $allowed_start, - allowed_end => $allowed_end, - dns1 => $dns1, - dns2 => $dns2, - } - if $floating_range { - nova::manage::floating { 'nova-vm-floating': - network => $floating_range, + # because nova_network provider uses nova client, so it assumes + # that nova-network service is running already + if $create_networks { + if $enabled { + nova::manage::network { 'nova-vm-net': + network => $fixed_range, + num_networks => $num_networks, + network_size => $network_size, + vlan_start => $vlan_start, + allowed_start => $allowed_start, + allowed_end => $allowed_end, + dns1 => $dns1, + dns2 => $dns2, + } + if $floating_range { + nova::manage::floating { 'nova-vm-floating': + network => $floating_range, + } + } + } else { + warning('Can not create networks, when nova-network service is disabled.') } } } diff --git a/nova/manifests/network/bridge.pp b/nova/manifests/network/bridge.pp index 885ee1b93..f01b17221 100644 --- a/nova/manifests/network/bridge.pp +++ b/nova/manifests/network/bridge.pp @@ -13,6 +13,7 @@ $ip, $netmask = '255.255.255.0' ) { + include ::nova::deps case $::osfamily { diff --git a/nova/manifests/network/flat.pp b/nova/manifests/network/flat.pp index 6bbebdc01..ad2870e14 100644 --- a/nova/manifests/network/flat.pp +++ b/nova/manifests/network/flat.pp @@ -26,6 +26,8 @@ $flat_network_bridge = 'br100' ) { + include ::nova::deps + if $public_interface { nova_config { 'DEFAULT/public_interface': value => $public_interface } } diff --git a/nova/manifests/network/flatdhcp.pp b/nova/manifests/network/flatdhcp.pp index 76869007f..73d89d76a 100644 --- a/nova/manifests/network/flatdhcp.pp +++ b/nova/manifests/network/flatdhcp.pp @@ -51,8 +51,13 @@ $dhcpbridge_flagfile = '/etc/nova/nova.conf' ) { + include ::nova::deps + if $::osfamily == 'RedHat' and $::operatingsystem != 'Fedora' { - package { 'dnsmasq-utils': ensure => present } + package { 'dnsmasq-utils': + ensure => present, + tag => ['openstack', 'nova-support-package'], + } } if $public_interface { diff --git a/nova/manifests/network/neutron.pp b/nova/manifests/network/neutron.pp index 031b7f4e7..6dfed9717 100644 --- a/nova/manifests/network/neutron.pp +++ b/nova/manifests/network/neutron.pp @@ -112,6 +112,8 @@ $dhcp_domain = 'novalocal', ) { + include ::nova::deps + nova_config { 'DEFAULT/dhcp_domain': value => $dhcp_domain; 'DEFAULT/firewall_driver': value => $firewall_driver; diff --git a/nova/manifests/network/vlan.pp b/nova/manifests/network/vlan.pp index 2328488a4..f3261b8b8 100644 --- a/nova/manifests/network/vlan.pp +++ b/nova/manifests/network/vlan.pp @@ -45,8 +45,13 @@ $dhcpbridge_flagfile = '/etc/nova/nova.conf' ) { + include ::nova::deps + if $::osfamily == 'RedHat' and $::operatingsystem != 'Fedora' { - package { 'dnsmasq-utils': ensure => present } + package { 'dnsmasq-utils': + ensure => present, + tag => ['openstack', 'nova-support-package'], + } } if $public_interface { diff --git a/nova/manifests/objectstore.pp b/nova/manifests/objectstore.pp index 78077688d..03a46731a 100644 --- a/nova/manifests/objectstore.pp +++ b/nova/manifests/objectstore.pp @@ -27,6 +27,7 @@ $bind_address = '0.0.0.0' ) { + include ::nova::deps include ::nova::params nova::generic_service { 'objectstore': @@ -35,7 +36,6 @@ package_name => $::nova::params::objectstore_package_name, service_name => $::nova::params::objectstore_service_name, ensure_package => $ensure_package, - require => Package['nova-common'], } nova_config { diff --git a/nova/manifests/params.pp b/nova/manifests/params.pp index 6c9628cd0..df14e7e8b 100644 --- a/nova/manifests/params.pp +++ b/nova/manifests/params.pp @@ -27,6 +27,7 @@ $serialproxy_package_name = 'openstack-nova-serialproxy' $spicehtml5proxy_package_name = 'openstack-nova-console' $sqlite_package_name = undef + $pymysql_package_name = undef # service names $api_service_name = 'openstack-nova-api' $cells_service_name = 'openstack-nova-cells' @@ -82,6 +83,7 @@ $tgt_package_name = 'tgt' $serialproxy_package_name = 'nova-serialproxy' $sqlite_package_name = 'python-pysqlite2' + $pymysql_package_name = 'python-pymysql' # service names $api_service_name = 'nova-api' $cells_service_name = 'nova-cells' diff --git a/nova/manifests/policy.pp b/nova/manifests/policy.pp index 40934028c..c82ad6253 100644 --- a/nova/manifests/policy.pp +++ b/nova/manifests/policy.pp @@ -30,7 +30,11 @@ validate_hash($policies) - $policy_defaults = { 'file_path' => $policy_path } + $policy_defaults = { + 'file_path' => $policy_path, + 'require' => Anchor['nova::config::begin'], + 'notify' => Anchor['nova::config::end'], + } create_resources('openstacklib::policy::base', $policies, $policy_defaults) diff --git a/nova/manifests/qpid.pp b/nova/manifests/qpid.pp index 4aaae57bb..8e5d78403 100644 --- a/nova/manifests/qpid.pp +++ b/nova/manifests/qpid.pp @@ -1,6 +1,6 @@ # == Class: nova::qpid # -# Class for installing qpid server for nova +# Deprecated class for installing qpid server for nova # # === Parameters: # @@ -25,33 +25,12 @@ # Defaults to 'OPENSTACK' # class nova::qpid( - $enabled = true, - $user = 'guest', - $password = 'guest', - $file = '/var/lib/qpidd/qpidd.sasldb', - $realm = 'OPENSTACK' + $enabled = undef, + $user = undef, + $password = undef, + $file = undef, + $realm = undef ) { - # only configure nova after the queue is up - Class['qpid::server'] -> Package<| title == 'nova-common' |> - - if ($enabled) { - $service_ensure = 'running' - - qpid_user { $user: - password => $password, - file => $file, - realm => $realm, - provider => 'saslpasswd2', - require => Class['qpid::server'], - } - - } else { - $service_ensure = 'stopped' - } - - class { '::qpid::server': - service_ensure => $service_ensure - } - + warning('Qpid driver is removed from Oslo.messaging in the Mitaka release') } diff --git a/nova/manifests/quota.pp b/nova/manifests/quota.pp index a9f7fa4d9..ec623b6ec 100644 --- a/nova/manifests/quota.pp +++ b/nova/manifests/quota.pp @@ -76,6 +76,14 @@ # (optional) Number of key pairs # Defaults to 100 # +# [*quota_server_groups*] +# (optional) Number of server groups per project +# Defaults to 10 +# +# [*quota_server_group_members*] +# (optional) Number of servers per server group +# Defaults to 10 +# # [*reservation_expire*] # (optional) Time until reservations expire in seconds # Defaults to 86400 @@ -105,6 +113,8 @@ $quota_security_groups = 10, $quota_security_group_rules = 20, $quota_key_pairs = 100, + $quota_server_groups = 10, + $quota_server_group_members = 10, $reservation_expire = 86400, $until_refresh = 0, $max_age = 0, @@ -117,6 +127,7 @@ $quota_max_injected_file_content_bytes = undef, $quota_max_injected_file_path_bytes = undef ) { + include ::nova::deps if $quota_volumes { warning('The quota_volumes parameter is deprecated and has no effect.') @@ -164,6 +175,8 @@ 'DEFAULT/quota_security_groups': value => $quota_security_groups; 'DEFAULT/quota_security_group_rules': value => $quota_security_group_rules; 'DEFAULT/quota_key_pairs': value => $quota_key_pairs; + 'DEFAULT/quota_server_groups': value => $quota_server_groups; + 'DEFAULT/quota_server_group_members': value => $quota_server_group_members; 'DEFAULT/reservation_expire': value => $reservation_expire; 'DEFAULT/until_refresh': value => $until_refresh; 'DEFAULT/max_age': value => $max_age; diff --git a/nova/manifests/rabbitmq.pp b/nova/manifests/rabbitmq.pp index 2b53013f0..f6e291b44 100644 --- a/nova/manifests/rabbitmq.pp +++ b/nova/manifests/rabbitmq.pp @@ -43,6 +43,8 @@ $port ='5672', ) { + include ::nova::deps + if ($enabled) { if $userid == 'guest' { $delete_guest_user = false @@ -59,10 +61,13 @@ write_permission => '.*', read_permission => '.*', provider => 'rabbitmqctl', - }->Anchor<| title == 'nova-start' |> + } } rabbitmq_vhost { $virtual_host: provider => 'rabbitmqctl', } } + + # Only start Nova after the queue is up + Class['nova::rabbitmq'] -> Anchor['nova::service::end'] } diff --git a/nova/manifests/scheduler.pp b/nova/manifests/scheduler.pp index 329eca0c3..64f66006d 100644 --- a/nova/manifests/scheduler.pp +++ b/nova/manifests/scheduler.pp @@ -27,6 +27,7 @@ $scheduler_driver = 'nova.scheduler.filter_scheduler.FilterScheduler', ) { + include ::nova::deps include ::nova::db include ::nova::params @@ -41,7 +42,4 @@ nova_config { 'DEFAULT/scheduler_driver': value => $scheduler_driver; } - - Nova_config['DEFAULT/scheduler_driver'] ~> Service <| title == 'nova-scheduler' |> - } diff --git a/nova/manifests/scheduler/filter.pp b/nova/manifests/scheduler/filter.pp index 03338d56b..061ee4f7d 100644 --- a/nova/manifests/scheduler/filter.pp +++ b/nova/manifests/scheduler/filter.pp @@ -82,6 +82,8 @@ $scheduler_use_baremetal_filters = false, ) { + include ::nova::deps + nova_config { 'DEFAULT/scheduler_host_manager': value => $scheduler_host_manager; 'DEFAULT/scheduler_max_attempts': value => $scheduler_max_attempts; diff --git a/nova/manifests/serialproxy.pp b/nova/manifests/serialproxy.pp index 7e237ddc6..5df372876 100644 --- a/nova/manifests/serialproxy.pp +++ b/nova/manifests/serialproxy.pp @@ -32,6 +32,7 @@ $ensure_package = 'present' ) { + include ::nova::deps include ::nova::params nova_config { diff --git a/nova/manifests/spicehtml5proxy.pp b/nova/manifests/spicehtml5proxy.pp index 6960a25cc..0156e5d16 100644 --- a/nova/manifests/spicehtml5proxy.pp +++ b/nova/manifests/spicehtml5proxy.pp @@ -36,6 +36,7 @@ $ensure_package = 'present' ) { + include ::nova::deps include ::nova::params nova_config { diff --git a/nova/manifests/vncproxy.pp b/nova/manifests/vncproxy.pp index d70e33225..161253963 100644 --- a/nova/manifests/vncproxy.pp +++ b/nova/manifests/vncproxy.pp @@ -43,6 +43,7 @@ $ensure_package = 'present' ) { + include ::nova::deps include ::nova::params # See http://nova.openstack.org/runnova/vncconsole.html for more details. @@ -58,6 +59,7 @@ package { 'python-numpy': ensure => present, name => $::nova::params::numpy_package_name, + tag => ['openstack', 'nova-support-package'], } } nova::generic_service { 'vncproxy': diff --git a/nova/manifests/vncproxy/common.pp b/nova/manifests/vncproxy/common.pp index 15b46330d..bf3bc1a05 100644 --- a/nova/manifests/vncproxy/common.pp +++ b/nova/manifests/vncproxy/common.pp @@ -23,6 +23,8 @@ $vncproxy_path = undef, ) { + include ::nova::deps + $vncproxy_host_real = pick( $vncproxy_host, $::nova::compute::vncproxy_host, diff --git a/nova/metadata.json b/nova/metadata.json index 0850cc0cd..97e9f4d81 100644 --- a/nova/metadata.json +++ b/nova/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-nova", - "version": "6.1.0", + "version": "7.0.0", "author": "Puppet Labs and OpenStack Contributors", "summary": "Puppet module for OpenStack Nova", "license": "Apache-2.0", @@ -33,12 +33,12 @@ "dependencies": [ { "name": "dprince/qpid", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "duritong/sysctl", "version_requirement": ">=0.0.1 <1.0.0" }, - { "name": "openstack/cinder", "version_requirement": ">=6.0.0 <7.0.0" }, - { "name": "openstack/glance", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/cinder", "version_requirement": ">=7.0.0 <8.0.0" }, + { "name": "openstack/glance", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/rabbitmq", "version_requirement": ">=2.0.2 <6.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=7.0.0 <8.0.0" } ] } diff --git a/nova/spec/acceptance/basic_nova_spec.rb b/nova/spec/acceptance/basic_nova_spec.rb index 0465bdfa9..644889852 100644 --- a/nova/spec/acceptance/basic_nova_spec.rb +++ b/nova/spec/acceptance/basic_nova_spec.rb @@ -29,7 +29,7 @@ # Nova resources class { '::nova': - database_connection => 'mysql://nova:a_big_secret@127.0.0.1/nova?charset=utf8', + database_connection => 'mysql+pymysql://nova:a_big_secret@127.0.0.1/nova?charset=utf8', rabbit_userid => 'nova', rabbit_password => 'an_even_bigger_secret', image_service => 'nova.image.glance.GlanceImageService', diff --git a/nova/spec/classes/nova_api_spec.rb b/nova/spec/classes/nova_api_spec.rb index 11c7e7379..07c04e678 100644 --- a/nova/spec/classes/nova_api_spec.rb +++ b/nova/spec/classes/nova_api_spec.rb @@ -11,7 +11,7 @@ end let :facts do - { :processorcount => 5 } + @default_facts.merge({ :processorcount => 5 }) end shared_examples 'nova-api' do @@ -31,7 +31,8 @@ :ensure => 'present', :tag => ['openstack', 'nova-package'], ) - is_expected.to contain_package('nova-api').that_notifies('Service[nova-api]') + is_expected.to contain_package('nova-api').that_requires('Anchor[nova::install::begin]') + is_expected.to contain_package('nova-api').that_notifies('Anchor[nova::install::end]') is_expected.to_not contain_exec('validate_nova_api') end @@ -256,7 +257,7 @@ it { is_expected.to_not contain_nova_config('database/connection') } it { is_expected.to_not contain_nova_config('database/slave_connection') } - it { is_expected.to_not contain_nova_config('database/idle_timeout').with_value('3600') } + it { is_expected.to_not contain_nova_config('database/idle_timeout').with_value('') } end context 'with overridden database parameters' do diff --git a/nova/spec/classes/nova_cells_spec.rb b/nova/spec/classes/nova_cells_spec.rb index a7307f6cd..b0d65037c 100644 --- a/nova/spec/classes/nova_cells_spec.rb +++ b/nova/spec/classes/nova_cells_spec.rb @@ -143,7 +143,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -160,7 +160,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/nova/spec/classes/nova_cert_spec.rb b/nova/spec/classes/nova_cert_spec.rb index 58998ee2a..71bc4ca5a 100644 --- a/nova/spec/classes/nova_cert_spec.rb +++ b/nova/spec/classes/nova_cert_spec.rb @@ -8,7 +8,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_behaves_like 'generic nova service', { @@ -19,7 +19,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_behaves_like 'generic nova service', { diff --git a/nova/spec/classes/nova_client_spec.rb b/nova/spec/classes/nova_client_spec.rb index 101cfe730..2db0be9b6 100644 --- a/nova/spec/classes/nova_client_spec.rb +++ b/nova/spec/classes/nova_client_spec.rb @@ -6,7 +6,7 @@ it { is_expected.to contain_package('python-novaclient').with( :ensure => 'present', - :tag => ['openstack'] + :tag => ['openstack', 'nova-support-package'] ) } end diff --git a/nova/spec/classes/nova_compute_libvirt_spec.rb b/nova/spec/classes/nova_compute_libvirt_spec.rb index 1e1116436..403dd5f79 100644 --- a/nova/spec/classes/nova_compute_libvirt_spec.rb +++ b/nova/spec/classes/nova_compute_libvirt_spec.rb @@ -10,25 +10,34 @@ it { is_expected.to contain_class('nova::params')} - it { is_expected.to contain_package('nova-compute-kvm').with( - :ensure => 'present', - :before => 'Package[nova-compute]', - :tag => ['openstack'] - ) } + it { + is_expected.to contain_package('nova-compute-kvm').with( + :ensure => 'present', + :tag => ['openstack', 'nova-package'] + ) + is_expected.to contain_package('nova-compute-kvm').that_requires('Anchor[nova::install::begin]') + is_expected.to contain_package('nova-compute-kvm').that_notifies('Anchor[nova::install::end]') + } - it { is_expected.to contain_package('libvirt').with( - :name => 'libvirt-bin', - :ensure => 'present' - ) } + it { + is_expected.to contain_package('libvirt').with( + :name => 'libvirt-bin', + :ensure => 'present' + ) + is_expected.to contain_package('libvirt').that_requires('Anchor[nova::install::begin]') + is_expected.to contain_package('libvirt').that_comes_before('Anchor[nova::install::end]') + } - it { is_expected.to contain_service('libvirt').with( - :name => 'libvirt-bin', - :enable => true, - :ensure => 'running', - :provider => 'upstart', - :require => 'Package[libvirt]', - :before => ['Service[nova-compute]'] - )} + it { + is_expected.to contain_service('libvirt').with( + :name => 'libvirt-bin', + :enable => true, + :ensure => 'running', + :provider => 'upstart', + ) + + is_expected.to contain_service('libvirt').that_requires('Anchor[nova::config::end]') + } it { is_expected.to contain_nova_config('DEFAULT/compute_driver').with_value('libvirt.LibvirtDriver')} it { is_expected.to contain_nova_config('libvirt/virt_type').with_value('kvm')} @@ -71,13 +80,15 @@ it { is_expected.to contain_nova_config('DEFAULT/remove_unused_original_minimum_age_seconds').with_value(3600)} it { is_expected.to contain_nova_config('libvirt/remove_unused_kernels').with_value(true)} it { is_expected.to contain_nova_config('libvirt/remove_unused_resized_minimum_age_seconds').with_value(3600)} - it { is_expected.to contain_service('libvirt').with( - :name => 'custom_service', - :enable => true, - :ensure => 'running', - :require => 'Package[libvirt]', - :before => ['Service[nova-compute]'] - )} + it { + is_expected.to contain_service('libvirt').with( + :name => 'custom_service', + :enable => true, + :ensure => 'running', + :before => ['Service[nova-compute]'] + ) + is_expected.to contain_service('libvirt').that_requires('Anchor[nova::config::end]') + } end describe 'with custom cpu_mode' do @@ -164,7 +175,7 @@ it { is_expected.to contain_package('libvirt-nwfilter').with( :name => 'libvirt-daemon-config-nwfilter', :ensure => 'present', - :before => 'Service[libvirt]', + :before => ['Service[libvirt]', 'Anchor[nova::install::end]'], ) } it { is_expected.to contain_service('libvirt').with( @@ -172,8 +183,8 @@ :enable => true, :ensure => 'running', :provider => 'init', - :require => 'Package[libvirt]', - :before => ['Service[nova-compute]'] + :require => 'Anchor[nova::config::end]', + :before => ['Service[nova-compute]'], )} it { is_expected.to contain_service('messagebus').with( :ensure => 'running', @@ -294,7 +305,7 @@ it { is_expected.to contain_package('libvirt-nwfilter').with( :name => 'libvirt-daemon-config-nwfilter', :ensure => 'present', - :before => 'Service[libvirt]', + :before => ['Service[libvirt]', 'Anchor[nova::install::end]'], ) } it { is_expected.to contain_service('libvirt').with( @@ -302,7 +313,7 @@ :enable => true, :ensure => 'running', :provider => nil, - :require => 'Package[libvirt]', + :require => 'Anchor[nova::config::end]', :before => ['Service[nova-compute]'] )} @@ -315,11 +326,11 @@ context 'on Debian platforms' do let (:facts) do - { + @default_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian', :os_package_family => 'debian' - } + }) end it_behaves_like 'debian-nova-compute-libvirt' @@ -327,11 +338,11 @@ context 'on Debian platforms' do let (:facts) do - { + @default_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Ubuntu', :os_package_family => 'ubuntu' - } + }) end it_behaves_like 'debian-nova-compute-libvirt' @@ -339,10 +350,10 @@ context 'on RedHat platforms' do let (:facts) do - { + @default_facts.merge({ :osfamily => 'RedHat', :os_package_type => 'rpm' - } + }) end it_behaves_like 'redhat-nova-compute-libvirt' diff --git a/nova/spec/classes/nova_compute_rbd_spec.rb b/nova/spec/classes/nova_compute_rbd_spec.rb index d98ee8cc1..25aa79723 100644 --- a/nova/spec/classes/nova_compute_rbd_spec.rb +++ b/nova/spec/classes/nova_compute_rbd_spec.rb @@ -146,7 +146,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'nova compute rbd' @@ -154,7 +154,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'nova compute rbd' diff --git a/nova/spec/classes/nova_compute_spec.rb b/nova/spec/classes/nova_compute_spec.rb index ecea146bb..dc4afe17e 100644 --- a/nova/spec/classes/nova_compute_spec.rb +++ b/nova/spec/classes/nova_compute_spec.rb @@ -31,7 +31,6 @@ it { is_expected.to_not contain_package('bridge-utils').with( :ensure => 'present', - :before => 'Nova::Generic_service[compute]' ) } it { is_expected.to contain_package('pm-utils').with( @@ -126,8 +125,9 @@ it 'installs bridge-utils package for nova-network' do is_expected.to contain_package('bridge-utils').with( :ensure => 'present', - :before => 'Nova::Generic_service[compute]' ) + is_expected.to contain_package('bridge-utils').that_requires('Anchor[nova::install::begin]') + is_expected.to contain_package('bridge-utils').that_comes_before('Anchor[nova::install::end]') end end @@ -140,7 +140,6 @@ it 'does not install bridge-utils package for nova-network' do is_expected.to_not contain_package('bridge-utils').with( :ensure => 'present', - :before => 'Nova::Generic_service[compute]' ) end @@ -216,7 +215,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -229,7 +228,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/nova/spec/classes/nova_conductor_spec.rb b/nova/spec/classes/nova_conductor_spec.rb index 5b785f0fd..38897a59e 100644 --- a/nova/spec/classes/nova_conductor_spec.rb +++ b/nova/spec/classes/nova_conductor_spec.rb @@ -55,7 +55,7 @@ it { is_expected.to_not contain_nova_config('database/connection') } it { is_expected.to_not contain_nova_config('database/slave_connection') } - it { is_expected.to_not contain_nova_config('database/idle_timeout').with_value('3600') } + it { is_expected.to_not contain_nova_config('database/idle_timeout').with_value('') } end context 'with overridden database parameters' do @@ -77,7 +77,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -90,7 +90,7 @@ context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/nova/spec/classes/nova_consoleauth_spec.rb b/nova/spec/classes/nova_consoleauth_spec.rb index ef739bcef..6d0c60232 100644 --- a/nova/spec/classes/nova_consoleauth_spec.rb +++ b/nova/spec/classes/nova_consoleauth_spec.rb @@ -8,7 +8,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_behaves_like 'generic nova service', { @@ -19,7 +19,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_behaves_like 'generic nova service', { diff --git a/nova/spec/classes/nova_cron_archive_deleted_rows_spec.rb b/nova/spec/classes/nova_cron_archive_deleted_rows_spec.rb index c1f89dc30..fd6823921 100644 --- a/nova/spec/classes/nova_cron_archive_deleted_rows_spec.rb +++ b/nova/spec/classes/nova_cron_archive_deleted_rows_spec.rb @@ -3,7 +3,7 @@ describe 'nova::cron::archive_deleted_rows' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :params do @@ -27,7 +27,7 @@ :monthday => params[:monthday], :month => params[:month], :weekday => params[:weekday], - :require => 'Package[nova-common]', + :require => 'Anchor[nova::dbsync::end]', ) end end diff --git a/nova/spec/classes/nova_db_mysql_spec.rb b/nova/spec/classes/nova_db_mysql_spec.rb index cd86ab06d..cff973c17 100644 --- a/nova/spec/classes/nova_db_mysql_spec.rb +++ b/nova/spec/classes/nova_db_mysql_spec.rb @@ -12,7 +12,7 @@ context 'on a Debian osfamily' do let :facts do - { :osfamily => "Debian" } + @default_facts.merge({ :osfamily => "Debian" }) end context 'with only required parameters' do @@ -39,7 +39,7 @@ context 'on a RedHat osfamily' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end context 'with only required parameters' do @@ -66,7 +66,7 @@ describe "overriding allowed_hosts param to array" do let :facts do - { :osfamily => "Debian" } + @default_facts.merge({ :osfamily => "Debian" }) end let :params do { @@ -79,7 +79,7 @@ describe "overriding allowed_hosts param to string" do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :params do { @@ -92,7 +92,7 @@ describe "overriding allowed_hosts param equals to host param " do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :params do { diff --git a/nova/spec/classes/nova_db_postgresql_spec.rb b/nova/spec/classes/nova_db_postgresql_spec.rb index 757189f58..3c0adcb43 100644 --- a/nova/spec/classes/nova_db_postgresql_spec.rb +++ b/nova/spec/classes/nova_db_postgresql_spec.rb @@ -12,11 +12,11 @@ context 'on a RedHat osfamily' do let :facts do - { + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.0', :concat_basedir => '/var/lib/puppet/concat' - } + }) end context 'with only required parameters' do @@ -34,12 +34,12 @@ context 'on a Debian osfamily' do let :facts do - { + @default_facts.merge({ :operatingsystemrelease => '7.8', :operatingsystem => 'Debian', :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' - } + }) end context 'with only required parameters' do diff --git a/nova/spec/classes/nova_db_spec.rb b/nova/spec/classes/nova_db_spec.rb index 9c841cb58..de74c4ccc 100644 --- a/nova/spec/classes/nova_db_spec.rb +++ b/nova/spec/classes/nova_db_spec.rb @@ -20,17 +20,17 @@ context 'with overriden parameters' do before :each do params.merge!( - :database_connection => 'mysql://user:pass@db/db', - :slave_connection => 'mysql://user:pass@slave/db', + :database_connection => 'mysql+pymysql://user:pass@db/db', + :slave_connection => 'mysql+pymysql://user:pass@slave/db', ) end - it { is_expected.to contain_nova_config('database/connection').with_value('mysql://user:pass@db/db').with_secret(true) } - it { is_expected.to contain_nova_config('database/slave_connection').with_value('mysql://user:pass@slave/db').with_secret(true) } - it { is_expected.to contain_nova_config('database/idle_timeout').with_value('3600') } - it { is_expected.to contain_nova_config('database/min_pool_size').with_value('1') } - it { is_expected.to contain_nova_config('database/max_retries').with_value('10') } - it { is_expected.to contain_nova_config('database/retry_interval').with_value('10') } + it { is_expected.to contain_nova_config('database/connection').with_value('mysql+pymysql://user:pass@db/db').with_secret(true) } + it { is_expected.to contain_nova_config('database/slave_connection').with_value('mysql+pymysql://user:pass@slave/db').with_secret(true) } + it { is_expected.to contain_nova_config('database/idle_timeout').with_value('') } + it { is_expected.to contain_nova_config('database/min_pool_size').with_value('') } + it { is_expected.to contain_nova_config('database/max_retries').with_value('') } + it { is_expected.to contain_nova_config('database/retry_interval').with_value('') } end @@ -45,6 +45,14 @@ end + context 'with MySQL-python library as backend package' do + let :params do + { :database_connection => 'mysql://user:pass@db/db', } + end + + it { is_expected.to contain_package('python-mysqldb').with(:ensure => 'present') } + end + context 'with incorrect database_connection string' do let :params do { :database_connection => 'redis://nova:nova@localhost/nova', } @@ -53,18 +61,39 @@ it_raises 'a Puppet::Error', /validate_re/ end + context 'with incorrect pymysql database_connection string' do + let :params do + { :database_connection => 'foo+pymysql://user:pass@db/db', } + end + + it_raises 'a Puppet::Error', /validate_re/ + end end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ + :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => 'jessie', - } + }) end it_configures 'nova::db' + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://user:pass@db/db', } + end + it 'install the proper backend package' do + is_expected.to contain_package('nova-backend-package').with( + :ensure => 'present', + :name => 'python-pymysql', + :tag => ['openstack', 'nova-package'], + ) + end + end + context 'with sqlite backend' do let :params do { :database_connection => 'sqlite:///var/lib/nova/nova.sqlite', } @@ -74,7 +103,7 @@ is_expected.to contain_package('nova-backend-package').with( :ensure => 'present', :name => 'python-pysqlite2', - :tag => 'openstack' + :tag => ['openstack', 'nova-package'], ) end @@ -83,12 +112,21 @@ context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ + :osfamily => 'RedHat', :operatingsystemrelease => '7.1', - } + }) end it_configures 'nova::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://user:pass@db/db', } + end + + it { is_expected.not_to contain_package('nova-backend-package') } + end end end diff --git a/nova/spec/classes/nova_db_sync_spec.rb b/nova/spec/classes/nova_db_sync_spec.rb index 374b3da93..69c71a1ec 100644 --- a/nova/spec/classes/nova_db_sync_spec.rb +++ b/nova/spec/classes/nova_db_sync_spec.rb @@ -6,21 +6,38 @@ it 'runs nova-db-sync' do is_expected.to contain_exec('nova-db-sync').with( - :command => '/usr/bin/nova-manage db sync', + :command => '/usr/bin/nova-manage db sync', :refreshonly => 'true', :logoutput => 'on_failure' ) end + describe "overriding extra_params" do + let :params do + { + :extra_params => '--config-file /etc/nova/nova.conf', + } + end + + it { + is_expected.to contain_exec('nova-db-sync').with( + :command => '/usr/bin/nova-manage --config-file /etc/nova/nova.conf db sync', + :refreshonly => 'true', + :logoutput => 'on_failure' + ) + } + end + end + context 'on a RedHat osfamily' do let :facts do - { + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.0', :concat_basedir => '/var/lib/puppet/concat' - } + }) end it_configures 'nova-dbsync' @@ -28,12 +45,12 @@ context 'on a Debian osfamily' do let :facts do - { + @default_facts.merge({ :operatingsystemrelease => '7.8', :operatingsystem => 'Debian', :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' - } + }) end it_configures 'nova-dbsync' diff --git a/nova/spec/classes/nova_init_spec.rb b/nova/spec/classes/nova_init_spec.rb index ec8066de8..dd98db699 100644 --- a/nova/spec/classes/nova_init_spec.rb +++ b/nova/spec/classes/nova_init_spec.rb @@ -13,7 +13,7 @@ it 'installs packages' do is_expected.to contain_package('python-nova').with( :ensure => 'present', - :tag => ['openstack'] + :tag => ['openstack', 'nova-package'] ) is_expected.to contain_package('nova-common').with( :name => platform_params[:nova_common_package], @@ -22,12 +22,6 @@ ) end - it 'creates various files and folders' do - is_expected.to contain_file('/etc/nova/nova.conf').with( - :require => 'Package[nova-common]' - ) - end - it 'configures rootwrap' do is_expected.to contain_nova_config('DEFAULT/rootwrap_config').with_value('/etc/nova/rootwrap.conf') end @@ -340,60 +334,6 @@ end end - context 'with qpid rpc_backend' do - let :params do - { :rpc_backend => 'qpid' } - end - - context 'with default parameters' do - it 'configures qpid' do - is_expected.to contain_nova_config('DEFAULT/rpc_backend').with_value('qpid') - is_expected.to contain_nova_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost') - is_expected.to contain_nova_config('oslo_messaging_qpid/qpid_port').with_value('5672') - is_expected.to contain_nova_config('oslo_messaging_qpid/qpid_username').with_value('guest') - is_expected.to contain_nova_config('oslo_messaging_qpid/qpid_password').with_value('guest').with_secret(true) - is_expected.to contain_nova_config('oslo_messaging_qpid/qpid_heartbeat').with_value('60') - is_expected.to contain_nova_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') - is_expected.to contain_nova_config('oslo_messaging_qpid/qpid_tcp_nodelay').with_value(true) - end - end - - context 'with qpid_password parameter (without qpid_sasl_mechanisms)' do - before do - params.merge!({ :qpid_password => 'guest' }) - end - it { is_expected.to contain_nova_config('oslo_messaging_qpid/qpid_sasl_mechanisms').with_ensure('absent') } - end - - context 'with qpid_password parameter (with qpid_sasl_mechanisms)' do - before do - params.merge!({ - :qpid_password => 'guest', - :qpid_sasl_mechanisms => 'A' - }) - end - it { is_expected.to contain_nova_config('oslo_messaging_qpid/qpid_sasl_mechanisms').with_value('A') } - end - - context 'with qpid_password parameter (with array of qpid_sasl_mechanisms)' do - before do - params.merge!({ - :qpid_password => 'guest', - :qpid_sasl_mechanisms => [ 'DIGEST-MD5', 'GSSAPI', 'PLAIN' ] - }) - end - it { is_expected.to contain_nova_config('oslo_messaging_qpid/qpid_sasl_mechanisms').with_value('DIGEST-MD5 GSSAPI PLAIN') } - end - end - - context 'with qpid rpc_backend with old parameter' do - let :params do - { :rpc_backend => 'nova.openstack.common.rpc.impl_qpid' } - end - - it { is_expected.to contain_nova_config('DEFAULT/rpc_backend').with_value('nova.openstack.common.rpc.impl_qpid') } - end - context 'with rabbitmq rpc_backend with old parameter' do let :params do { :rpc_backend => 'nova.openstack.common.rpc.impl_kombu' } @@ -568,8 +508,10 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', - :operatingsystem => 'Debian' } + @default_facts.merge({ + :osfamily => 'Debian', + :operatingsystem => 'Debian' + }) end let :platform_params do @@ -582,8 +524,10 @@ context 'on Ubuntu platforms' do let :facts do - { :osfamily => 'Debian', - :operatingsystem => 'Ubuntu' } + @default_facts.merge({ + :osfamily => 'Debian', + :operatingsystem => 'Ubuntu' + }) end let :platform_params do @@ -596,7 +540,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/nova/spec/classes/nova_keystone_auth_spec.rb b/nova/spec/classes/nova_keystone_auth_spec.rb index 5da3568e5..9ec6c8355 100644 --- a/nova/spec/classes/nova_keystone_auth_spec.rb +++ b/nova/spec/classes/nova_keystone_auth_spec.rb @@ -275,7 +275,7 @@ end let :facts do - { :osfamily => "Debian"} + @default_facts.merge({ :osfamily => "Debian"}) end let :params do diff --git a/nova/spec/classes/nova_logging_spec.rb b/nova/spec/classes/nova_logging_spec.rb index 6ee48b8af..b96669982 100644 --- a/nova/spec/classes/nova_logging_spec.rb +++ b/nova/spec/classes/nova_logging_spec.rb @@ -15,7 +15,7 @@ :logging_exception_prefix => '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s', :log_config_append => '/etc/nova/logging.conf', :publish_errors => true, - :default_log_levels => { + :default_log_levels => { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', 'iso8601' => 'WARN', @@ -57,11 +57,11 @@ shared_examples 'basic default logging settings' do it 'configures nova logging settins with default values' do - is_expected.to contain_nova_config('DEFAULT/use_syslog').with(:value => 'false') - is_expected.to contain_nova_config('DEFAULT/use_stderr').with(:value => 'true') + is_expected.to contain_nova_config('DEFAULT/use_syslog').with(:value => '') + is_expected.to contain_nova_config('DEFAULT/use_stderr').with(:value => '') is_expected.to contain_nova_config('DEFAULT/log_dir').with(:value => '/var/log/nova') - is_expected.to contain_nova_config('DEFAULT/verbose').with(:value => 'false') - is_expected.to contain_nova_config('DEFAULT/debug').with(:value => 'false') + is_expected.to contain_nova_config('DEFAULT/verbose').with(:value => '') + is_expected.to contain_nova_config('DEFAULT/debug').with(:value => '') end end @@ -120,13 +120,13 @@ :default_log_levels, :fatal_deprecations, :instance_format, :instance_uuid_format, :log_date_format, ].each { |param| - it { is_expected.to contain_nova_config("DEFAULT/#{param}").with_ensure('absent') } + it { is_expected.to contain_nova_config("DEFAULT/#{param}").with_value('') } } end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'nova-logging' @@ -134,7 +134,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'nova-logging' diff --git a/nova/spec/classes/nova_migration_libvirt_spec.rb b/nova/spec/classes/nova_migration_libvirt_spec.rb index 013965bab..5db587fa8 100644 --- a/nova/spec/classes/nova_migration_libvirt_spec.rb +++ b/nova/spec/classes/nova_migration_libvirt_spec.rb @@ -96,7 +96,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'nova migration with libvirt' @@ -105,7 +105,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'nova migration with libvirt' diff --git a/nova/spec/classes/nova_network_spec.rb b/nova/spec/classes/nova_network_spec.rb index 691caa656..2224c229d 100644 --- a/nova/spec/classes/nova_network_spec.rb +++ b/nova/spec/classes/nova_network_spec.rb @@ -20,7 +20,7 @@ describe 'on debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it { is_expected.to contain_sysctl__value('net.ipv4.ip_forward').with_value('1') } @@ -30,7 +30,7 @@ it { is_expected.to contain_package('nova-network').with( 'name' => 'nova-network', 'ensure' => 'present', - 'notify' => 'Service[nova-network]' + 'notify' => ['Anchor[nova::install::end]'], ) } describe 'with enabled as true' do @@ -96,6 +96,15 @@ it { is_expected.to contain_nova__manage__floating('nova-vm-floating').with_network('10.0.0.0/30') } end end + + describe 'when creating networks, but service nova-network is disabled' do + let :params do + default_params.merge(:enabled => false) + end + it { is_expected.to_not contain_nova__manage__network('nova-vm-net') } + it { is_expected.to_not contain_nova__manage__floating('nova-vm-floating') } + end + describe 'when configuring networks' do describe 'when configuring flatdhcpmanager' do let :params do @@ -224,7 +233,7 @@ end describe 'on rhel' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it { is_expected.to contain_service('nova-network').with( 'name' => 'openstack-nova-network', diff --git a/nova/spec/classes/nova_objectstore_spec.rb b/nova/spec/classes/nova_objectstore_spec.rb index 1591cac33..4be11e22a 100644 --- a/nova/spec/classes/nova_objectstore_spec.rb +++ b/nova/spec/classes/nova_objectstore_spec.rb @@ -8,7 +8,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_behaves_like 'generic nova service', { @@ -28,7 +28,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_behaves_like 'generic nova service', { diff --git a/nova/spec/classes/nova_policy_spec.rb b/nova/spec/classes/nova_policy_spec.rb index e8507c220..91f5f75f7 100644 --- a/nova/spec/classes/nova_policy_spec.rb +++ b/nova/spec/classes/nova_policy_spec.rb @@ -25,7 +25,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'nova policies' @@ -33,7 +33,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'nova policies' diff --git a/nova/spec/classes/nova_qpid_spec.rb b/nova/spec/classes/nova_qpid_spec.rb deleted file mode 100644 index c7e030338..000000000 --- a/nova/spec/classes/nova_qpid_spec.rb +++ /dev/null @@ -1,50 +0,0 @@ -require 'spec_helper' - -describe 'nova::qpid' do - - let :facts do - {:puppetversion => '2.7'} - end - - describe 'with defaults' do - - it 'should contain all of the default resources' do - - is_expected.to contain_class('qpid::server').with( - :service_ensure => 'running', - :port => '5672' - ) - - end - - it 'should contain user' do - - is_expected.to contain_qpid_user('guest').with( - :password => 'guest', - :file => '/var/lib/qpidd/qpidd.sasldb', - :realm => 'OPENSTACK', - :provider => 'saslpasswd2' - ) - - end - - end - - describe 'when disabled' do - let :params do - { - :enabled => false - } - end - - it 'should be disabled' do - - is_expected.to_not contain_qpid_user('guest') - is_expected.to contain_class('qpid::server').with( - :service_ensure => 'stopped' - ) - - end - end - -end diff --git a/nova/spec/classes/nova_quota_spec.rb b/nova/spec/classes/nova_quota_spec.rb index c5545055d..273362cf8 100644 --- a/nova/spec/classes/nova_quota_spec.rb +++ b/nova/spec/classes/nova_quota_spec.rb @@ -19,6 +19,8 @@ :quota_security_groups => 10, :quota_security_group_rules => 20, :quota_key_pairs => 100, + :quota_server_groups => 10, + :quota_server_group_members => 10, :reservation_expire => 86400, :until_refresh => 0, :max_age => 0, @@ -56,6 +58,8 @@ :quota_security_groups => 20, :quota_security_group_rules => 40, :quota_key_pairs => 200, + :quota_server_groups => 20, + :quota_server_group_members => 20, :reservation_expire => 6400, :until_refresh => 30, :max_age => 60 diff --git a/nova/spec/classes/nova_rabbitmq_spec.rb b/nova/spec/classes/nova_rabbitmq_spec.rb index 22fbb9d2b..4dc7fd8d6 100644 --- a/nova/spec/classes/nova_rabbitmq_spec.rb +++ b/nova/spec/classes/nova_rabbitmq_spec.rb @@ -3,10 +3,10 @@ describe 'nova::rabbitmq' do let :facts do - { + @default_facts.merge({ :puppetversion => '2.7', :osfamily => 'Debian' - } + }) end describe 'with defaults' do diff --git a/nova/spec/classes/nova_scheduler_spec.rb b/nova/spec/classes/nova_scheduler_spec.rb index 34e7f2c2a..fe9b22c64 100644 --- a/nova/spec/classes/nova_scheduler_spec.rb +++ b/nova/spec/classes/nova_scheduler_spec.rb @@ -56,7 +56,7 @@ it { is_expected.to_not contain_nova_config('database/connection') } it { is_expected.to_not contain_nova_config('database/slave_connection') } - it { is_expected.to_not contain_nova_config('database/idle_timeout').with_value('3600') } + it { is_expected.to_not contain_nova_config('database/idle_timeout').with_value('') } end context 'with overridden database parameters' do @@ -78,7 +78,7 @@ context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :platform_params do @@ -91,7 +91,7 @@ context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/nova/spec/classes/nova_serial_proxy_spec.rb b/nova/spec/classes/nova_serial_proxy_spec.rb index 01ef94ce0..95bfcc22a 100644 --- a/nova/spec/classes/nova_serial_proxy_spec.rb +++ b/nova/spec/classes/nova_serial_proxy_spec.rb @@ -50,8 +50,10 @@ context 'on Ubuntu system' do let :facts do - { :osfamily => 'Debian', - :operatingsystem => 'Ubuntu' } + @default_facts.merge({ + :osfamily => 'Debian', + :operatingsystem => 'Ubuntu' + }) end let :platform_params do @@ -64,8 +66,10 @@ context 'on Debian system' do let :facts do - { :osfamily => 'Debian', - :operatingsystem => 'Debian' } + @default_facts.merge({ + :osfamily => 'Debian', + :operatingsystem => 'Debian' + }) end let :platform_params do @@ -78,7 +82,7 @@ context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/nova/spec/classes/nova_spicehtml5_proxy_spec.rb b/nova/spec/classes/nova_spicehtml5_proxy_spec.rb index 36db3df14..1681202a7 100644 --- a/nova/spec/classes/nova_spicehtml5_proxy_spec.rb +++ b/nova/spec/classes/nova_spicehtml5_proxy_spec.rb @@ -46,9 +46,11 @@ context 'on Ubuntu system' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ + :osfamily => 'Debian', :operatingsystem => 'Ubuntu', - :os_package_type => 'ubuntu' } + :os_package_type => 'ubuntu' + }) end let :platform_params do @@ -61,9 +63,11 @@ context 'on Debian system' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ + :osfamily => 'Debian', :operatingsystem => 'Debian', - :os_package_type => 'debian' } + :os_package_type => 'debian' + }) end let :platform_params do @@ -76,9 +80,11 @@ context 'on Ubuntu system with Debian packages' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ + :osfamily => 'Debian', :operatingsystem => 'Ubuntu', - :os_package_type => 'debian' } + :os_package_type => 'debian' + }) end let :platform_params do @@ -92,7 +98,7 @@ context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end let :platform_params do diff --git a/nova/spec/classes/nova_utilities_spec.rb b/nova/spec/classes/nova_utilities_spec.rb index 2b0ae2ce0..f91f46ecd 100644 --- a/nova/spec/classes/nova_utilities_spec.rb +++ b/nova/spec/classes/nova_utilities_spec.rb @@ -4,7 +4,7 @@ describe 'on debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it 'installes utilities' do diff --git a/nova/spec/classes/nova_vnc_proxy_spec.rb b/nova/spec/classes/nova_vnc_proxy_spec.rb index 302708850..714556447 100644 --- a/nova/spec/classes/nova_vnc_proxy_spec.rb +++ b/nova/spec/classes/nova_vnc_proxy_spec.rb @@ -12,7 +12,7 @@ describe 'on debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it { is_expected.to contain_package('python-numpy').with( @@ -56,9 +56,11 @@ describe 'on debian OS' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ + :osfamily => 'Debian', :operatingsystem => 'Debian', - :os_package_type => 'debian' } + :os_package_type => 'debian' + }) end it { is_expected.to contain_package('nova-vncproxy').with( :name => "nova-consoleproxy", @@ -73,9 +75,11 @@ describe 'on Ubuntu OS with Debian packages' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ + :osfamily => 'Debian', :operatingsystem => 'Ubuntu', - :os_package_type => 'debian' } + :os_package_type => 'debian' + }) end it { is_expected.to contain_package('nova-vncproxy').with( :name => "nova-consoleproxy", @@ -91,7 +95,7 @@ describe 'on Redhatish platforms' do let :facts do - { :osfamily => 'Redhat' } + @default_facts.merge({ :osfamily => 'Redhat' }) end it { is_expected.to contain_package('python-numpy').with( diff --git a/nova/spec/defines/nova_generic_service_spec.rb b/nova/spec/defines/nova_generic_service_spec.rb index 1c127bcff..c5ce7ce38 100644 --- a/nova/spec/defines/nova_generic_service_spec.rb +++ b/nova/spec/defines/nova_generic_service_spec.rb @@ -14,7 +14,7 @@ end let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end let :title do @@ -27,8 +27,11 @@ 'enable' => true )} - it { is_expected.to contain_service('nova-foo').that_requires( - ['Package[nova-common]', 'Package[nova-foo]'] + it { is_expected.to contain_service('nova-foo').that_subscribes_to( + 'Anchor[nova::service::begin]', + )} + it { is_expected.to contain_service('nova-foo').that_notifies( + 'Anchor[nova::service::end]', )} end end diff --git a/nova/spec/defines/nova_manage_networks_spec.rb b/nova/spec/defines/nova_manage_networks_spec.rb index b8ec36876..338a7e3c1 100644 --- a/nova/spec/defines/nova_manage_networks_spec.rb +++ b/nova/spec/defines/nova_manage_networks_spec.rb @@ -3,7 +3,7 @@ describe 'nova::manage::network' do let :facts do - {:osfamily => 'RedHat'} + @default_facts.merge({:osfamily => 'RedHat'}) end let :pre_condition do diff --git a/nova/spec/hosts/test-001_spec.rb b/nova/spec/hosts/test-001_spec.rb index 62ad5f2f9..6927a808c 100644 --- a/nova/spec/hosts/test-001_spec.rb +++ b/nova/spec/hosts/test-001_spec.rb @@ -4,7 +4,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end # Bug #1278452 diff --git a/nova/spec/shared_examples.rb b/nova/spec/shared_examples.rb index d4a8e85c2..75e292156 100644 --- a/nova/spec/shared_examples.rb +++ b/nova/spec/shared_examples.rb @@ -9,11 +9,12 @@ context 'with default parameters' do it 'installs package and service' do is_expected.to contain_package(service[:name]).with({ - :name => service[:package_name], - :ensure => 'present', - :notify => "Service[#{service[:name]}]", - :tag => ['openstack', 'nova-package'], + :name => service[:package_name], + :ensure => 'present', + :tag => ['openstack', 'nova-package'], }) + is_expected.to contain_package(service[:name]).that_requires('Anchor[nova::install::begin]') + is_expected.to contain_package(service[:name]).that_notifies('Anchor[nova::install::end]') is_expected.to contain_service(service[:name]).with({ :name => service[:service_name], :ensure => 'running', @@ -21,6 +22,8 @@ :enable => true, :tag => 'nova-service', }) + is_expected.to contain_service(service[:name]).that_subscribes_to('Anchor[nova::service::begin]') + is_expected.to contain_service(service[:name]).that_notifies('Anchor[nova::service::end]') end end @@ -34,9 +37,10 @@ is_expected.to contain_package(service[:name]).with({ :name => service[:package_name], :ensure => '2012.1-2', - :notify => "Service[#{service[:name]}]", :tag => ['openstack', 'nova-package'], }) + is_expected.to contain_package(service[:name]).that_requires('Anchor[nova::install::begin]') + is_expected.to contain_package(service[:name]).that_notifies('Anchor[nova::install::end]') is_expected.to contain_service(service[:name]).with({ :name => service[:service_name], :ensure => 'stopped', @@ -44,6 +48,8 @@ :enable => false, :tag => 'nova-service', }) + is_expected.to contain_service(service[:name]).that_subscribes_to('Anchor[nova::service::begin]') + is_expected.to contain_service(service[:name]).that_notifies('Anchor[nova::service::end]') end end diff --git a/nova/spec/spec_helper.rb b/nova/spec/spec_helper.rb index 3df4cede1..9bc7bcf96 100644 --- a/nova/spec/spec_helper.rb +++ b/nova/spec/spec_helper.rb @@ -5,6 +5,9 @@ RSpec.configure do |c| c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' + c.before :each do + @default_facts = { :os_service_default => '' } + end end at_exit { RSpec::Puppet::Coverage.report! } diff --git a/nova/spec/unit/provider/nova_network/nova_spec.rb b/nova/spec/unit/provider/nova_network/nova_spec.rb new file mode 100644 index 000000000..091a0982c --- /dev/null +++ b/nova/spec/unit/provider/nova_network/nova_spec.rb @@ -0,0 +1,63 @@ +require 'puppet' +require 'puppet/provider/nova_network/nova' +require 'tempfile' + +provider_class = Puppet::Type.type(:nova_network).provider(:nova) + +describe provider_class do + + let :net_attrs do + { + :network => '10.20.0.0/16', + :label => 'novanetwork', + :ensure => 'present', + } + end + + let :resource do + Puppet::Type::Nova_network.new(net_attrs) + end + + let :provider do + provider_class.new(resource) + end + + shared_examples 'nova_network' do + describe '#exists?' do + it 'should check non-existsing network' do + provider.expects(:auth_nova).with("network-list") + .returns('"+--------------------------------------+-------------+-------------+\n| ID | Label | Cidr |\n+--------------------------------------+-------------+-------------+\n| 703edc62-36ab-4c41-9d73-884b30e9acbd | novanetwork | 10.0.0.0/16 |\n+--------------------------------------+-------------+-------------+\n" +') + expect(provider.exists?).to be_falsey + end + + it 'should check existsing network' do + provider.expects(:auth_nova).with("network-list") + .returns('"+--------------------------------------+-------------+-------------+\n| ID | Label | Cidr |\n+--------------------------------------+-------------+-------------+\n| 703edc62-36ab-4c41-9d73-884b30e9acbd | novanetwork | 10.20.0.0/16 |\n+--------------------------------------+-------------+-------------+\n" +') + expect(provider.exists?).to be_truthy + end + end + + describe '#create' do + it 'should create network' do + provider.expects(:auth_nova).with("network-create", ['novanetwork', '--fixed-range-v4', '10.20.0.0/16'] ) + .returns('"+--------------------------------------+-------------+-------------+\n| ID | Label | Cidr |\n+--------------------------------------+-------------+-------------+\n| 703edc62-36ab-4c41-9d73-88sdfsdfsdfsd | nova-network | 10.20.0.0/16 |\n+--------------------------------------+-------------+-------------+\n" +') + provider.create + end + end + + describe '#destroy' do + it 'should destroy network' do + resource[:ensure] = :absent + provider.expects(:auth_nova).with("network-delete", "10.20.0.0/16") + .returns('"+--------------------------------------+-------------+-------------+\n| ID | Label | Cidr |\n+--------------------------------------+-------------+-------------+\n +') + provider.destroy + end + end + end + + it_behaves_like('nova_network') +end diff --git a/nova/spec/unit/provider/nova_spec.rb b/nova/spec/unit/provider/nova_spec.rb index 46608a0df..acd3bd9ac 100644 --- a/nova/spec/unit/provider/nova_spec.rb +++ b/nova/spec/unit/provider/nova_spec.rb @@ -11,9 +11,7 @@ def klass let :credential_hash do { - 'auth_host' => '192.168.56.210', - 'auth_port' => '35357', - 'auth_protocol' => 'https', + 'auth_uri' => 'https://192.168.56.210:35357/v2.0/', 'admin_tenant_name' => 'admin_tenant', 'admin_user' => 'admin', 'admin_password' => 'password', @@ -58,7 +56,7 @@ def klass end.to raise_error(Puppet::Error, credential_error) end - it 'should use specified host/port/protocol in the auth endpoint' do + it 'should use specified uri in the auth endpoint' do conf = {'keystone_authtoken' => credential_hash} klass.expects(:nova_conf).returns(conf) expect(klass.get_auth_endpoint).to eq(auth_endpoint) diff --git a/opendaylight/CONTRIBUTING.markdown b/opendaylight/CONTRIBUTING.markdown index 7da349105..085948f69 100644 --- a/opendaylight/CONTRIBUTING.markdown +++ b/opendaylight/CONTRIBUTING.markdown @@ -101,7 +101,8 @@ The testing tools have a number of dependencies. We use [Bundler][10] to make installing them easy. ``` -[~/puppet-opendaylight]$ sudo yum install -y rubygems ruby-devel gcc-c++ zlib-devel patch +[~/puppet-opendaylight]$ sudo yum install -y rubygems ruby-devel gcc-c++ zlib-devel \ + patch redhat-rpm-config make [~/puppet-opendaylight]$ gem install bundler [~/puppet-opendaylight]$ bundle install [~/puppet-opendaylight]$ bundle update diff --git a/opendaylight/Rakefile b/opendaylight/Rakefile index 6cbc73988..a36f79c39 100644 --- a/opendaylight/Rakefile +++ b/opendaylight/Rakefile @@ -57,6 +57,9 @@ task :centos_tarball do sh "RS_SET=centos-7 INSTALL_METHOD=tarball bundle exec rake beaker" end +# NB: The centos:7.0.1406 and centos:7.1.1503 tags have fakesytemd, not +# the actually-functional systemd-container installed on centos:7 +# https://github.com/CentOS/sig-cloud-instance-build/commit/3bf1e7bbf14deaa8c047c1dfbead6d0e8d0665f2 desc "Run Beaker tests against CentOS 7 Docker node." task :centos_7_docker do sh "RS_SET=centos-7-docker INSTALL_METHOD=rpm bundle exec rake beaker" @@ -82,19 +85,17 @@ task :ubuntu_1404 do sh "RS_SET=ubuntu-1404 INSTALL_METHOD=tarball bundle exec rake beaker" end -desc "Run Beaker tests against Ubuntu 15.04 node." -task :ubuntu_1504 do - sh "RS_SET=ubuntu-1504 INSTALL_METHOD=tarball bundle exec rake beaker" +desc "Run Beaker tests against Ubuntu 14.04 Docker node." +task :ubuntu_1404_docker do + sh "RS_SET=ubuntu-1404-docker INSTALL_METHOD=tarball bundle exec rake beaker" end +# Note: Puppet currently doesn't support Ubuntu versions newer than 14.04 +# https://docs.puppetlabs.com/guides/install_puppet/install_debian_ubuntu.html + desc "All tests, including Beaker tests against all nodes." task :acceptance => [ :test, - :centos, - :centos_tarball, - :fedora_20, - :fedora_21, - :fedora_22, - :ubuntu_1404, - :ubuntu_1504, + :centos_7_docker, + :ubuntu_1404_docker, ] diff --git a/opendaylight/Vagrantfile b/opendaylight/Vagrantfile new file mode 100644 index 000000000..015762ffc --- /dev/null +++ b/opendaylight/Vagrantfile @@ -0,0 +1,47 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant.configure(2) do |config| + + # Not doing this causes `puppet apply` to fail at catalog compile + config.vm.synced_folder ".", "/home/vagrant/puppet-opendaylight", type: "rsync" + config.vm.synced_folder ".", "/vagrant", disabled: true + + # We run out of RAM once ODL starts with default 500MB + config.vm.provider :libvirt do |libvirt| + libvirt.memory = 4096 + libvirt.cpus = 2 + end + + config.vm.define "f23" do |f23| + f23.vm.box = "fedora/23-cloud-base" + + f23.vm.provision "shell", inline: "dnf update -y" + + # Install required gems via Bundler + f23.vm.provision "shell", inline: "dnf install -y rubygems ruby-devel gcc-c++ zlib-devel patch redhat-rpm-config make" + f23.vm.provision "shell", inline: "gem install bundler" + f23.vm.provision "shell", inline: "echo export PATH=$PATH:/usr/local/bin >> /home/vagrant/.bashrc" + f23.vm.provision "shell", inline: "echo export PATH=$PATH:/usr/local/bin >> /root/.bashrc" + f23.vm.provision "shell", inline: 'su -c "cd /home/vagrant/puppet-opendaylight; bundle install" vagrant' + f23.vm.provision "shell", inline: 'su -c "cd /home/vagrant/puppet-opendaylight; bundle update" vagrant' + + # Git is required for cloning Puppet module deps in `rake test` + f23.vm.provision "shell", inline: "dnf install -y git" + + # Install Docker for Docker-based Beaker tests + f23.vm.provision "shell", inline: "tee /etc/yum.repos.d/docker.repo <<-'EOF' +[dockerrepo] +name=Docker Repository +baseurl=https://yum.dockerproject.org/repo/main/fedora/$releasever/ +enabled=1 +gpgcheck=1 +gpgkey=https://yum.dockerproject.org/gpg +EOF +" + f23.vm.provision "shell", inline: "dnf install -y docker-engine xfsprogs" + f23.vm.provision "shell", inline: "usermod -a -G docker vagrant" + f23.vm.provision "shell", inline: "systemctl start docker" + f23.vm.provision "shell", inline: "systemctl enable docker" + end +end diff --git a/opendaylight/spec/acceptance/nodesets/ubuntu-1404-docker.yml b/opendaylight/spec/acceptance/nodesets/ubuntu-1404-docker.yml new file mode 100644 index 000000000..9b1c3d4bf --- /dev/null +++ b/opendaylight/spec/acceptance/nodesets/ubuntu-1404-docker.yml @@ -0,0 +1,7 @@ +HOSTS: + ubuntu-1404-docker: + platform: ubuntu-14.04-x64 + image: ubuntu:14.04 + hypervisor: docker +CONFIG: + type: foss diff --git a/opendaylight/spec/acceptance/nodesets/ubuntu-1504.yml b/opendaylight/spec/acceptance/nodesets/ubuntu-1504.yml deleted file mode 100644 index 25bee91ef..000000000 --- a/opendaylight/spec/acceptance/nodesets/ubuntu-1504.yml +++ /dev/null @@ -1,12 +0,0 @@ -HOSTS: - ubuntu-1504: - roles: - - master - platform: ubuntu-1504-amd64 - box: boxcutter/ubuntu1504 - box_url: https://atlas.hashicorp.com/boxcutter/boxes/ubuntu1504 - hypervisor: vagrant - -CONFIG: - log_level: verbose - type: foss diff --git a/opendaylight/spec/spec_helper_acceptance.rb b/opendaylight/spec/spec_helper_acceptance.rb index f7f64a04d..69e297f7d 100644 --- a/opendaylight/spec/spec_helper_acceptance.rb +++ b/opendaylight/spec/spec_helper_acceptance.rb @@ -158,7 +158,7 @@ def generic_validations() describe package('java-1.8.0-openjdk') do it { should be_installed } end - elsif ENV['RS_SET'] == 'ubuntu-1404' + elsif ['ubuntu-1404', 'ubuntu-1404-docker'].include? ENV['RS_SET'] # Ubuntu-specific validations # Verify ODL Upstart config file diff --git a/openstack_extras/CHANGELOG.md b/openstack_extras/CHANGELOG.md index e9bb37bb0..6c0a4b710 100644 --- a/openstack_extras/CHANGELOG.md +++ b/openstack_extras/CHANGELOG.md @@ -1,3 +1,22 @@ +##2015-11-24 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes +- repo: bump to Liberty by default + +####Features +- repo/ubuntu: allow to change uca repo name + +####Maintenance +- implement acceptance tests +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check +- acceptance: use common bits from puppet-openstack-integration +- fix RSpec 3.x syntax +- initial msync run for all Puppet OpenStack modules + ##2015-07-08 - 6.0.0 ###Summary diff --git a/openstack_extras/README.md b/openstack_extras/README.md index ec351fcc2..9dde5f551 100644 --- a/openstack_extras/README.md +++ b/openstack_extras/README.md @@ -1,7 +1,7 @@ openstack_extras ============ -6.0.0 - 2015.1.0 - Kilo +7.0.0 - 2015.2.0 - Liberty #### Table of Contents @@ -39,7 +39,7 @@ Setup ### Installing openstack_extras - example% puppet module install puppetlabs/openstack_extras + example% puppet module install openstack/openstack_extras ### Beginning with openstack_extras diff --git a/openstack_extras/metadata.json b/openstack_extras/metadata.json index 979433203..8d18f496f 100644 --- a/openstack_extras/metadata.json +++ b/openstack_extras/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-openstack_extras", - "version": "6.0.0", + "version": "7.0.0", "author": "OpenStack Contributors", "summary": "Puppet OpenStack Extras Module", "license": "Apache-2.0", diff --git a/openstacklib/Rakefile b/openstacklib/Rakefile index 9692ffdd2..ed79bead4 100644 --- a/openstacklib/Rakefile +++ b/openstacklib/Rakefile @@ -58,7 +58,7 @@ task :spec_prep do zuul_clone_cmd += ['git://git.openstack.org', "#{repo}"] sh(*zuul_clone_cmd) else - sh("git clone https://git.openstack.org/#{repo} -b stable/liberty #{repo}") + sh("git clone https://git.openstack.org/#{repo} #{repo}") end script = ['env'] script += ["PUPPETFILE_DIR=#{Dir.pwd}/spec/fixtures/modules"] diff --git a/openstacklib/lib/puppet/provider/openstack.rb b/openstacklib/lib/puppet/provider/openstack.rb index 155e5b919..f71019937 100644 --- a/openstacklib/lib/puppet/provider/openstack.rb +++ b/openstacklib/lib/puppet/provider/openstack.rb @@ -50,7 +50,7 @@ def self.request(service, action, properties, credentials=nil) end break rescue Puppet::ExecutionFailure => e - if e.message =~ /HTTP 401/ + if e.message =~ /HTTP 40[13]/ raise(Puppet::Error::OpenstackUnauthorizedError, 'Could not authenticate.') elsif e.message =~ /Unable to establish connection/ current_time = Time.now.to_i diff --git a/openstacklib/lib/puppet/provider/openstack_config/ruby.rb b/openstacklib/lib/puppet/provider/openstack_config/ruby.rb new file mode 100644 index 000000000..c70a09ca8 --- /dev/null +++ b/openstacklib/lib/puppet/provider/openstack_config/ruby.rb @@ -0,0 +1,79 @@ + +require File.expand_path('../../../util/openstackconfig', __FILE__) + + +Puppet::Type.type(:openstack_config).provide(:ruby) do + + def self.instances + if self.respond_to?(:file_path) + config = Puppet::Util::OpenStackConfig.new(file_path) + resources = [] + config.section_names.each do |section_name| + config.get_settings(section_name).each do |setting, value| + resources.push( + new( + :name => namevar(section_name, setting), + :value => value, + :ensure => :present + ) + ) + end + end + resources + else + raise(Puppet::Error, + 'OpenStackConfig only support collecting instances when a file path ' + + 'is hard coded' + ) + end + end + + def exists? + if resource[:value] == ensure_absent_val + resource[:ensure] = :absent + end + !config.get_value(section, setting).nil? + end + + def create + config.set_value(section, setting, resource[:value]) + config.save + @config = nil + end + + def destroy + config.remove_setting(section, setting) + config.save + @config = nil + end + + def value=(value) + config.set_value(section, setting, resource[:value]) + config.save + end + + def value + config.get_value(section, setting) + end + + def section + resource[:name].split('/', 2).first + end + + def setting + resource[:name].split('/', 2).last + end + + def ensure_absent_val + resource[:ensure_absent_val] + end + + def file_path + self.class.file_path + end + + private + def config + @config ||= Puppet::Util::OpenStackConfig.new(file_path) + end +end diff --git a/openstacklib/lib/puppet/util/openstackconfig.rb b/openstacklib/lib/puppet/util/openstackconfig.rb new file mode 100644 index 000000000..4ba0ce264 --- /dev/null +++ b/openstacklib/lib/puppet/util/openstackconfig.rb @@ -0,0 +1,117 @@ +# +# Author: Martin Magr +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Forked from https://github.com/puppetlabs/puppetlabs-inifile . + +require File.expand_path('../openstackconfig/section', __FILE__) + + +module Puppet +module Util + class OpenStackConfig + + @@SECTION_REGEX = /^\s*\[(.*)\]\s*$/ + + def initialize(path) + @path = path + @order = [] + @sections = {} + parse_file + end + + attr_reader :path + + def section_names + @sections.keys + end + + def get_settings(section_name) + @sections[section_name].settings + end + + def get_value(section_name, setting_name) + if @sections.has_key?(section_name) + @sections[section_name].settings[setting_name] + end + end + + def set_value(section_name, setting_name, value) + unless @sections.has_key?(section_name) + add_section(section_name) + end + if @sections[section_name].settings.has_key?(setting_name) + @sections[section_name].update_setting(setting_name, value) + else + @sections[section_name].add_setting(setting_name, value) + end + end + + def remove_setting(section_name, setting_name, value=nil) + @sections[section_name].remove_setting(setting_name, value) + end + + def save + File.open(@path, 'w') do |fh| + @order.each do |section_name| + if section_name.length > 0 + fh.puts("[#{section_name}]") + end + unless @sections[section_name].lines.empty? + @sections[section_name].lines.each do |line| + fh.puts(line) + end + end + end + end + end + + private + # This is mostly here because it makes testing easier + # --we don't have to try to stub any methods on File. + def self.readlines(path) + # If this type is ever used with very large files, we should + # write this in a different way, using a temp + # file; for now assuming that this type is only used on + # small-ish config files that can fit into memory without + # too much trouble. + File.file?(path) ? File.readlines(path) : [] + end + + def parse_file + # We always create a "global" section at the beginning of the file, + # for anything that appears before the first named section. + lines = [] + current_section = '' + OpenStackConfig.readlines(@path).each do |line| + if match = @@SECTION_REGEX.match(line) + add_section(current_section, lines) + # start new section parsing + lines = [] + current_section = match[1] + else + lines.push(line) + end + end + add_section(current_section, lines) + end + + def add_section(section_name, lines=nil) + @order.push(section_name) + @sections[section_name] = Section.new(section_name, lines) + end + + end +end +end diff --git a/openstacklib/lib/puppet/util/openstackconfig/section.rb b/openstacklib/lib/puppet/util/openstackconfig/section.rb new file mode 100644 index 000000000..e75b71a10 --- /dev/null +++ b/openstacklib/lib/puppet/util/openstackconfig/section.rb @@ -0,0 +1,172 @@ +# +# Author: Martin Magr +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Forked from https://github.com/puppetlabs/puppetlabs-inifile . + + +module Puppet +module Util +class OpenStackConfig + class Section + + @@SETTING_REGEX = /^(\s*)([^#;\s]|[^#;\s].*?[^\s=])(\s*=[ \t]*)(.*)\s*$/ + @@COMMENTED_SETTING_REGEX = /^(\s*)[#;]+(\s*)(.*?[^\s=])(\s*=[ \t]*)(.*)\s*$/ + + def initialize(name, lines=nil) + @name = name + @lines = lines.nil? ? [] : lines + # parse lines + @indentation = nil + @settings = {} + @lines.each do |line| + if match = @@SETTING_REGEX.match(line) + indent = match[1].length + @indentation = [indent, @indentation || indent].min + if @settings.include?(match[2]) + if not @settings[match[2]].kind_of?(Array) + @settings[match[2]] = [@settings[match[2]]] + end + @settings[match[2]].push(match[4]) + else + @settings[match[2]] = match[4] + end + end + end + end + + attr_reader :name, :indentation + + def settings + Marshal.load(Marshal.dump(@settings)) + end + + def lines + @lines.clone + end + + def is_global? + @name == '' + end + + def is_new_section? + @lines.empty? + end + + def setting_names + @settings.keys + end + + def add_setting(setting_name, value) + @settings[setting_name] = value + add_lines(setting_name, value) + end + + def update_setting(setting_name, value) + old_value = @settings[setting_name] + @settings[setting_name] = value + if value.kind_of?(Array) or old_value.kind_of?(Array) + # ---- update lines for multi setting ---- + old_value = old_value.kind_of?(Array) ? old_value : [old_value] + new_value = value.kind_of?(Array) ? value : [value] + if useless = old_value - new_value + remove_lines(setting_name, useless) + end + if missing = new_value - old_value + add_lines(setting_name, missing) + end + else + # ---- update lines for single setting ---- + @lines.each_with_index do |line, index| + if match = @@SETTING_REGEX.match(line) + if (match[2] == setting_name) + @lines[index] = "#{match[1]}#{match[2]}#{match[3]}#{value}\n" + end + end + end + end + end + + def remove_setting(setting_name, value=nil) + if value.nil? or @settings[setting_name] == value + @settings.delete(setting_name) + else + value.eafach do |val| + @settings[setting_name].delete(val) + end + end + remove_lines(setting_name, value) + end + + private + def find_commented_setting(setting_name) + @lines.each_with_index do |line, index| + if match = @@COMMENTED_SETTING_REGEX.match(line) + if match[3] == setting_name + return index + end + end + end + nil + end + + def find_last_setting(setting_name) + result = nil + @lines.each_with_index do |line, index| + if match = @@SETTING_REGEX.match(line) + if match[2] == setting_name + result = index + end + end + end + result + end + + def remove_lines(setting_name, value=nil) + @lines.each_with_index do |line, index| + if (match = @@SETTING_REGEX.match(line)) + if match[2] == setting_name + if value.nil? or ( + value.kind_of?(Array) and value.include?(match[4]) + ) + lines.delete_at(index) + end + end + end + end + end + + def add_lines(setting_name, value) + indent_str = ' ' * (indentation || 0) + if current = find_last_setting(setting_name) + offset = current + elsif comment = find_commented_setting(setting_name) + offset = comment + 1 + else + offset = @lines.length + end + if value.kind_of?(Array) + value.each do |val| + @lines.insert(offset, "#{indent_str}#{setting_name}=#{val}\n") + offset += 1 + end + else + @lines.insert(offset, "#{indent_str}#{setting_name}=#{value}\n") + end + end + + end +end +end +end diff --git a/openstacklib/manifests/openstackclient.pp b/openstacklib/manifests/openstackclient.pp index 096741c22..25e23adcd 100644 --- a/openstacklib/manifests/openstackclient.pp +++ b/openstacklib/manifests/openstackclient.pp @@ -11,8 +11,5 @@ class openstacklib::openstackclient( $package_ensure = 'present', ){ - package { 'python-openstackclient': - ensure => $package_ensure, - tag => 'openstack', - } + ensure_packages('python-openstackclient', {'ensure' => $package_ensure, tag => 'openstack'}) } diff --git a/openstacklib/spec/defines/openstacklib_db_postgresql_spec.rb b/openstacklib/spec/defines/openstacklib_db_postgresql_spec.rb index 4e2dfc7c7..6a9f3ce26 100644 --- a/openstacklib/spec/defines/openstacklib_db_postgresql_spec.rb +++ b/openstacklib/spec/defines/openstacklib_db_postgresql_spec.rb @@ -9,11 +9,18 @@ { :password_hash => password_hash } end + let (:pre_condition) do + "include ::postgresql::server" + end + context 'on a RedHat osfamily' do let :facts do { - :postgres_default_version => '8.4', - :osfamily => 'RedHat' + :osfamily => 'RedHat', + :operatingsystem => 'RedHat', + :operatingsystemrelease => '7.1', + :operatingsystemmajrelease => '7', + :concat_basedir => '/tmp', } end @@ -45,7 +52,8 @@ context 'when notifying other resources' do let :pre_condition do - 'exec { "nova-db-sync": }' + "include ::postgresql::server + exec { 'nova-db-sync': }" end let :params do { :notify => 'Exec[nova-db-sync]'}.merge(required_params) @@ -56,7 +64,8 @@ context 'when required for other openstack services' do let :pre_condition do - 'service {"keystone":}' + "include ::postgresql::server + service {'keystone':}" end let :title do 'keystone' @@ -73,7 +82,11 @@ context 'on a Debian osfamily' do let :facts do { - :osfamily => 'Debian' + :osfamily => 'Debian', + :operatingsystem => 'Debian', + :operatingsystemrelease => 'jessie', + :operatingsystemmajrelease => '8.2', + :concat_basedir => '/tmp', } end @@ -105,7 +118,8 @@ context 'when notifying other resources' do let :pre_condition do - 'exec { "nova-db-sync": }' + "include ::postgresql::server + exec { 'nova-db-sync': }" end let :params do { :notify => 'Exec[nova-db-sync]'}.merge(required_params) @@ -116,7 +130,8 @@ context 'when required for other openstack services' do let :pre_condition do - 'service {"keystone":}' + "include ::postgresql::server + service {'keystone':}" end let :title do 'keystone' diff --git a/openstacklib/spec/spec_helper_acceptance.rb b/openstacklib/spec/spec_helper_acceptance.rb index 15e8cc977..429e807c4 100644 --- a/openstacklib/spec/spec_helper_acceptance.rb +++ b/openstacklib/spec/spec_helper_acceptance.rb @@ -38,7 +38,7 @@ zuul_clone_cmd += "git://git.openstack.org #{repo}" on host, zuul_clone_cmd else - on host, "git clone https://git.openstack.org/#{repo} -b stable/liberty #{repo}" + on host, "git clone https://git.openstack.org/#{repo} #{repo}" end on host, "ZUUL_REF=#{zuul_ref} ZUUL_BRANCH=#{zuul_branch} ZUUL_URL=#{zuul_url} bash #{repo}/install_modules.sh" diff --git a/openstacklib/spec/unit/provider/openstack_spec.rb b/openstacklib/spec/unit/provider/openstack_spec.rb index c9c22eed5..35a1d4bb3 100644 --- a/openstacklib/spec/unit/provider/openstack_spec.rb +++ b/openstacklib/spec/unit/provider/openstack_spec.rb @@ -53,6 +53,30 @@ Puppet::Provider::Openstack.request('project', 'list', ['--long']) end end + + context 'catch unauthorized errors' do + it 'should raise an error with non-existent user' do + ENV['OS_USERNAME'] = 'test' + ENV['OS_PASSWORD'] = 'abc123' + ENV['OS_PROJECT_NAME'] = 'test' + ENV['OS_AUTH_URL'] = 'http://127.0.0.1:5000' + provider.class.stubs(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', ['--long']) + .raises(Puppet::ExecutionFailure, 'Could not find user: test (HTTP 401)') + expect do + Puppet::Provider::Openstack.request('project', 'list', ['--long']) + end.to raise_error(Puppet::Error::OpenstackUnauthorizedError, /Could not authenticate/) + end + + it 'should raise an error with not authorized to perform' do + provider.class.stubs(:openstack) + .with('role', 'list', '--quiet', '--format', 'csv', ['--long']) + .raises(Puppet::ExecutionFailure, 'You are not authorized to perform the requested action: identity:list_grants (HTTP 403)') + expect do + Puppet::Provider::Openstack.request('role', 'list', ['--long']) + end.to raise_error(Puppet::Error::OpenstackUnauthorizedError, /Could not authenticate/) + end + end end describe 'parse_csv' do diff --git a/openstacklib/spec/unit/puppet/util/openstackconfig_spec.rb b/openstacklib/spec/unit/puppet/util/openstackconfig_spec.rb new file mode 100644 index 000000000..757948339 --- /dev/null +++ b/openstacklib/spec/unit/puppet/util/openstackconfig_spec.rb @@ -0,0 +1,322 @@ + +# +# Forked from https://github.com/puppetlabs/puppetlabs-inifile . +# + +require 'spec_helper' +require 'puppet/util/openstackconfig' + + +describe Puppet::Util::OpenStackConfig do + include PuppetlabsSpec::Files + + let(:subject) do + Puppet::Util::OpenStackConfig.new("/my/config/path") + end + + before :each do + Puppet::Util::OpenStackConfig.stubs(:readlines).returns(sample_content) + end + + context "when parsing a file" do + let(:sample_content) { + template = <<-EOS +# This is a comment +[section1] +; This is also a comment +foo=foovalue + +bar = barvalue +baz = +[section2] + +foo= foovalue2 +baz=bazvalue + ; commented = out setting + #another comment + ; yet another comment + zot = multi word value + xyzzy['thing1']['thing2']=xyzzyvalue + l=git log + + [section3] + multi_setting = value1 + multi_setting = value2 + EOS + template.split("\n") + } + + it "should parse the correct number of sections" do + # there is always a "global" section, so our count should be 3. + subject.section_names.length.should == 4 + end + + it "should parse the correct section_names" do + # there should always be a "global" section named "" at the beginning of the list + subject.section_names.should == ["", "section1", "section2", "section3"] + end + + it "should expose settings for sections" do + subject.get_settings("section1").should == { + "bar" => "barvalue", + "baz" => "", + "foo" => "foovalue" + } + + subject.get_settings("section2").should == { + "baz" => "bazvalue", + "foo" => "foovalue2", + "l" => "git log", + "xyzzy['thing1']['thing2']" => "xyzzyvalue", + "zot" => "multi word value" + } + + subject.get_settings("section3").should == { + "multi_setting" => ["value1", "value2"] + } + end + + end + + context "when parsing a file whose first line is a section" do + let(:sample_content) { + template = <<-EOS +[section1] +; This is a comment +foo=foovalue + EOS + template.split("\n") + } + + it "should parse the correct number of sections" do + # there is always a "global" section, so our count should be 2. + subject.section_names.length.should == 2 + end + + it "should parse the correct section_names" do + # there should always be a "global" section named "" at the beginning of the list + subject.section_names.should == ["", "section1"] + end + + it "should expose settings for sections" do + subject.get_value("section1", "foo").should == "foovalue" + end + + end + + context "when parsing a file with a 'global' section" do + let(:sample_content) { + template = <<-EOS +foo = bar +[section1] +; This is a comment +foo=foovalue + EOS + template.split("\n") + } + + it "should parse the correct number of sections" do + # there is always a "global" section, so our count should be 2. + subject.section_names.length.should == 2 + end + + it "should parse the correct section_names" do + # there should always be a "global" section named "" at the beginning of the list + subject.section_names.should == ["", "section1"] + end + + it "should expose settings for sections" do + subject.get_value("", "foo").should == "bar" + subject.get_value("section1", "foo").should == "foovalue" + end + end + + context "when updating a file with existing empty values" do + let(:sample_content) { + template = <<-EOS +[section1] +foo= +#bar= +#xyzzy['thing1']['thing2']='xyzzyvalue' + EOS + template.split("\n") + } + + it "should properly update uncommented values" do + subject.get_value("section1", "far").should == nil + subject.set_value("section1", "foo", "foovalue") + subject.get_value("section1", "foo").should == "foovalue" + end + + it "should properly update commented values" do + subject.get_value("section1", "bar").should == nil + subject.set_value("section1", "bar", "barvalue") + subject.get_value("section1", "bar").should == "barvalue" + subject.get_value("section1", "xyzzy['thing1']['thing2']").should == nil + subject.set_value("section1", "xyzzy['thing1']['thing2']", "xyzzyvalue") + subject.get_value("section1", "xyzzy['thing1']['thing2']").should == "xyzzyvalue" + end + + it "should properly add new empty values" do + subject.get_value("section1", "baz").should == nil + end + end + + context 'the file has quotation marks in its section names' do + let(:sample_content) do + template = <<-EOS +[branch "master"] + remote = origin + merge = refs/heads/master + +[alias] +to-deploy = log --merges --grep='pull request' --format='%s (%cN)' origin/production..origin/master +[branch "production"] + remote = origin + merge = refs/heads/production + EOS + template.split("\n") + end + + it 'should parse the sections' do + subject.section_names.should match_array ['', + 'branch "master"', + 'alias', + 'branch "production"' + ] + end + end + + context 'Samba INI file with dollars in section names' do + let(:sample_content) do + template = <<-EOS + [global] + workgroup = FELLOWSHIP + ; ... + idmap config * : backend = tdb + + [printers] + comment = All Printers + ; ... + browseable = No + + [print$] + comment = Printer Drivers + path = /var/lib/samba/printers + + [Shares] + path = /home/shares + read only = No + guest ok = Yes + EOS + template.split("\n") + end + + it "should parse the correct section_names" do + subject.section_names.should match_array [ + '', + 'global', + 'printers', + 'print$', + 'Shares' + ] + end + end + + context 'section names with forward slashes in them' do + let(:sample_content) do + template = <<-EOS +[monitor:///var/log/*.log] +disabled = test_value + EOS + template.split("\n") + end + + it "should parse the correct section_names" do + subject.section_names.should match_array [ + '', + 'monitor:///var/log/*.log' + ] + end + end + + context 'KDE Configuration with braces in setting names' do + let(:sample_content) do + template = <<-EOS + [khotkeys] +_k_friendly_name=khotkeys +{5465e8c7-d608-4493-a48f-b99d99fdb508}=Print,none,PrintScreen +{d03619b6-9b3c-48cc-9d9c-a2aadb485550}=Search,none,Search +EOS + template.split("\n") + end + + it "should expose settings for sections" do + subject.get_value("khotkeys", "{5465e8c7-d608-4493-a48f-b99d99fdb508}").should == "Print,none,PrintScreen" + subject.get_value("khotkeys", "{d03619b6-9b3c-48cc-9d9c-a2aadb485550}").should == "Search,none,Search" + end + end + + context 'Configuration with colons in setting names' do + let(:sample_content) do + template = <<-EOS + [Drive names] +A:=5.25" Floppy +B:=3.5" Floppy +C:=Winchester +EOS + template.split("\n") + end + + it "should expose settings for sections" do + subject.get_value("Drive names", "A:").should eq '5.25" Floppy' + subject.get_value("Drive names", "B:").should eq '3.5" Floppy' + subject.get_value("Drive names", "C:").should eq 'Winchester' + end + end + + context 'Configuration with spaces in setting names' do + let(:sample_content) do + template = <<-EOS + [global] + # log files split per-machine: + log file = /var/log/samba/log.%m + + kerberos method = system keytab + passdb backend = tdbsam + security = ads +EOS + template.split("\n") + end + + it "should expose settings for sections" do + subject.get_value("global", "log file").should eq '/var/log/samba/log.%m' + subject.get_value("global", "kerberos method").should eq 'system keytab' + subject.get_value("global", "passdb backend").should eq 'tdbsam' + subject.get_value("global", "security").should eq 'ads' + end + end + + + context 'Multi settings' do + let(:sample_content) do + template = <<-EOS + [test] + # multi values + test = value1 + test = value2 + test = value3 +EOS + template.split("\n") + end + + it "should expose setting with array value" do + subject.get_value("test", "test").should eq ['value1', 'value2', 'value3'] + end + + it "should create setting with array value" do + subject.set_value("test", "test2", ['valueA', 'valueB', 'valueC']) + subject.get_value("test", "test2").should eq ['valueA', 'valueB', 'valueC'] + end + end +end diff --git a/rabbitmq/lib/puppet/provider/rabbitmq_erlang_cookie/ruby.rb b/rabbitmq/lib/puppet/provider/rabbitmq_erlang_cookie/ruby.rb index 74a96168f..2784c3888 100644 --- a/rabbitmq/lib/puppet/provider/rabbitmq_erlang_cookie/ruby.rb +++ b/rabbitmq/lib/puppet/provider/rabbitmq_erlang_cookie/ruby.rb @@ -3,10 +3,15 @@ Puppet::Type.type(:rabbitmq_erlang_cookie).provide(:ruby) do defaultfor :feature => :posix - has_command(:puppet, 'puppet') do - environment :PATH => '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin' + + env_path = '/opt/puppetlabs/bin:/usr/local/bin:/usr/bin:/bin' + puppet_path = Puppet::Util.withenv(:PATH => env_path) do + Puppet::Util.which('puppet') end + confine :false => puppet_path.nil? + has_command(:puppet, puppet_path) unless puppet_path.nil? + def exists? # Hack to prevent the create method from being called. # We never need to create or destroy this resource, only change its value diff --git a/rabbitmq/manifests/init.pp b/rabbitmq/manifests/init.pp index f1346bbc8..2bde1ec98 100644 --- a/rabbitmq/manifests/init.pp +++ b/rabbitmq/manifests/init.pp @@ -103,7 +103,9 @@ } validate_bool($wipe_db_on_cookie_change) validate_bool($tcp_keepalive) - validate_re($file_limit, '^(\d+|-1|unlimited|infinity)$', '$file_limit must be a positive integer, \'-1\', \'unlimited\', or \'infinity\'.') + # using sprintf for conversion to string, because "${file_limit}" doesn't + # pass lint, despite being nicer + validate_re(sprintf('%s', $file_limit), '^(\d+|-1|unlimited|infinity)$', '$file_limit must be a positive integer, \'-1\', \'unlimited\', or \'infinity\'.') # Validate service parameters. validate_re($service_ensure, '^(running|stopped)$') validate_bool($service_manage) diff --git a/rabbitmq/manifests/params.pp b/rabbitmq/manifests/params.pp index 74b2a7d41..1101ebb36 100644 --- a/rabbitmq/manifests/params.pp +++ b/rabbitmq/manifests/params.pp @@ -40,7 +40,7 @@ $package_ensure = 'installed' $package_name = 'rabbitmq-server' $service_name = 'rabbitmq-server' - $package_provider = 'rpm' + $package_provider = 'yum' $version = '3.1.5-1' $rabbitmq_user = 'rabbitmq' $rabbitmq_group = 'rabbitmq' diff --git a/rabbitmq/spec/classes/rabbitmq_spec.rb b/rabbitmq/spec/classes/rabbitmq_spec.rb index 45b4b7cd2..c65b44e8f 100644 --- a/rabbitmq/spec/classes/rabbitmq_spec.rb +++ b/rabbitmq/spec/classes/rabbitmq_spec.rb @@ -160,6 +160,11 @@ it { should contain_file('/etc/default/rabbitmq-server').with_content(/ulimit -n 1234/) } end + context 'with file_limit => 1234' do + let(:params) {{ :file_limit => 1234 }} + it { should contain_file('/etc/default/rabbitmq-server').with_content(/ulimit -n 1234/) } + end + context 'with file_limit => \'-42\'' do let(:params) {{ :file_limit => '-42' }} it 'does not compile' do @@ -176,7 +181,7 @@ end context 'on Redhat' do - let(:facts) {{ :osfamily => 'RedHat' }} + let(:facts) {{ :osfamily => 'RedHat', :operatingsystemmajrelease => '7' }} it 'includes rabbitmq::repo::rhel' do should contain_class('rabbitmq::repo::rhel') should contain_exec('rpm --import http://www.rabbitmq.com/rabbitmq-signing-key-public.asc') @@ -251,7 +256,7 @@ context 'on Redhat' do let(:params) {{ :repos_ensure => false }} - let(:facts) {{ :osfamily => 'RedHat' }} + let(:facts) {{ :osfamily => 'RedHat', :operatingsystemmajrelease => '7' }} it 'does not import repo public key when repos_ensure is false' do should contain_class('rabbitmq::repo::rhel') should_not contain_exec('rpm --import http://www.rabbitmq.com/rabbitmq-signing-key-public.asc') @@ -260,7 +265,7 @@ context 'on Redhat' do let(:params) {{ :repos_ensure => true }} - let(:facts) {{ :osfamily => 'RedHat' }} + let(:facts) {{ :osfamily => 'RedHat', :operatingsystemmajrelease => '7' }} it 'does import repo public key when repos_ensure is true' do should contain_class('rabbitmq::repo::rhel') should contain_exec('rpm --import http://www.rabbitmq.com/rabbitmq-signing-key-public.asc') @@ -269,7 +274,7 @@ context 'on Redhat' do let(:params) {{ :manage_repos => false }} - let(:facts) {{ :osfamily => 'RedHat' }} + let(:facts) {{ :osfamily => 'RedHat', :operatingsystemmajrelease => '7' }} it 'does not import repo public key when manage_repos is false' do should_not contain_class('rabbitmq::repo::rhel') should_not contain_exec('rpm --import http://www.rabbitmq.com/rabbitmq-signing-key-public.asc') @@ -278,7 +283,7 @@ context 'on Redhat' do let(:params) {{ :manage_repos => true }} - let(:facts) {{ :osfamily => 'RedHat' }} + let(:facts) {{ :osfamily => 'RedHat', :operatingsystemmajrelease => '7' }} it 'does import repo public key when manage_repos is true' do should contain_class('rabbitmq::repo::rhel') should contain_exec('rpm --import http://www.rabbitmq.com/rabbitmq-signing-key-public.asc') @@ -287,7 +292,7 @@ context 'on Redhat' do let(:params) {{ :manage_repos => false, :repos_ensure => true }} - let(:facts) {{ :osfamily => 'RedHat' }} + let(:facts) {{ :osfamily => 'RedHat', :operatingsystemmajrelease => '7' }} it 'does not import repo public key when manage_repos is false and repos_ensure is true' do should_not contain_class('rabbitmq::repo::rhel') should_not contain_exec('rpm --import http://www.rabbitmq.com/rabbitmq-signing-key-public.asc') @@ -296,7 +301,7 @@ context 'on Redhat' do let(:params) {{ :manage_repos => true, :repos_ensure => true }} - let(:facts) {{ :osfamily => 'RedHat' }} + let(:facts) {{ :osfamily => 'RedHat', :operatingsystemmajrelease => '7' }} it 'does import repo public key when manage_repos is true and repos_ensure is true' do should contain_class('rabbitmq::repo::rhel') should contain_exec('rpm --import http://www.rabbitmq.com/rabbitmq-signing-key-public.asc') @@ -305,7 +310,7 @@ context 'on Redhat' do let(:params) {{ :manage_repos => false, :repos_ensure => false }} - let(:facts) {{ :osfamily => 'RedHat' }} + let(:facts) {{ :osfamily => 'RedHat', :operatingsystemmajrelease => '7' }} it 'does not import repo public key when manage_repos is false and repos_ensure is false' do should_not contain_class('rabbitmq::repo::rhel') should_not contain_exec('rpm --import http://www.rabbitmq.com/rabbitmq-signing-key-public.asc') @@ -314,7 +319,7 @@ context 'on Redhat' do let(:params) {{ :manage_repos => true, :repos_ensure => false }} - let(:facts) {{ :osfamily => 'RedHat' }} + let(:facts) {{ :osfamily => 'RedHat', :operatingsystemmajrelease => '7' }} it 'does not import repo public key when manage_repos is true and repos_ensure is false' do should contain_class('rabbitmq::repo::rhel') should_not contain_exec('rpm --import http://www.rabbitmq.com/rabbitmq-signing-key-public.asc') @@ -391,12 +396,24 @@ end ['Debian', 'RedHat', 'SUSE', 'Archlinux'].each do |distro| - context "on #{distro}" do - let(:facts) {{ - :osfamily => distro, + osfacts = { + :osfamily => distro, + } + + case distro + when 'Debian' + osfacts.merge!({ :lsbdistcodename => 'squeeze', :lsbdistid => 'Debian' - }} + }) + when 'RedHat' + osfacts.merge!({ + :operatingsystemmajrelease => '7', + }) + end + + context "on #{distro}" do + let(:facts) { osfacts } it { should contain_class('rabbitmq::install') } it { should contain_class('rabbitmq::config') } @@ -447,7 +464,6 @@ end context 'configures config_cluster' do - let(:facts) {{ :osfamily => distro, :lsbdistid => 'Debian' }} let(:params) {{ :config_cluster => true, :cluster_nodes => ['hare-1', 'hare-2'], @@ -1111,13 +1127,13 @@ ## rabbitmq::install ## context "on RHEL" do - let(:facts) {{ :osfamily => 'RedHat' }} + let(:facts) {{ :osfamily => 'RedHat', :operatingsystemmajrelease => '7' }} let(:params) {{ :package_source => 'http://www.rabbitmq.com/releases/rabbitmq-server/v3.2.3/rabbitmq-server-3.2.3-1.noarch.rpm' }} it 'installs the rabbitmq package' do should contain_package('rabbitmq-server').with( 'ensure' => 'installed', 'name' => 'rabbitmq-server', - 'provider' => 'rpm', + 'provider' => 'yum', 'source' => 'http://www.rabbitmq.com/releases/rabbitmq-server/v3.2.3/rabbitmq-server-3.2.3-1.noarch.rpm' ) end @@ -1182,9 +1198,25 @@ end ['RedHat', 'SuSE'].each do |distro| + osfacts = { + :osfamily => distro, + } + + case distro + when 'Debian' + osfacts.merge!({ + :lsbdistcodename => 'squeeze', + :lsbdistid => 'Debian' + }) + when 'RedHat' + osfacts.merge!({ + :operatingsystemmajrelease => '7', + }) + end + describe "repo management on #{distro}" do describe 'imports the key' do - let(:facts) {{ :osfamily => distro }} + let(:facts) { osfacts } let(:params) {{ :package_gpg_key => 'http://www.rabbitmq.com/rabbitmq-signing-key-public.asc' }} it { should contain_exec("rpm --import #{params[:package_gpg_key]}").with( diff --git a/redis/.gitignore b/redis/.gitignore index c6c26c3b8..7a0324898 100644 --- a/redis/.gitignore +++ b/redis/.gitignore @@ -1,7 +1,9 @@ *.swp .DS_Store +.vagrant/ +.yardoc Gemfile.lock +doc +log/ pkg spec/fixtures -.yardoc -doc diff --git a/redis/Gemfile b/redis/Gemfile index a8f3419df..38f9229d7 100644 --- a/redis/Gemfile +++ b/redis/Gemfile @@ -1,5 +1,12 @@ source 'https://rubygems.org' +# special dependencies for Ruby 1.8 +# since there are still several OSes with it +if RUBY_VERSION =~ /^1\.8\./ + gem 'rspec-core', '~> 3.1.7' + gem 'nokogiri', '~> 1.5.0' +end + puppetversion = ENV.key?('PUPPET_VERSION') ? "~> #{ENV['PUPPET_VERSION']}" : ['>= 3.2.1'] gem 'puppet', puppetversion @@ -18,3 +25,6 @@ gem 'puppet-lint', '>=0.3.2' gem 'rspec-puppet', '>=0.1.6' gem 'puppetlabs_spec_helper', '>=0.4.1' +gem 'beaker-rspec' +gem 'bundler', '<= 1.10.5' +gem 'vagrant-wrapper' diff --git a/redis/README.md b/redis/README.md index 6adfef6ec..9c4e12e3f 100644 --- a/redis/README.md +++ b/redis/README.md @@ -26,6 +26,16 @@ #masterauth => 'secret'; } +### Redis 3.0 Clustering + + class { 'redis': + bind => '10.0.1.2', + appendonly => true, + cluster_enabled => true, + cluster_config_file => 'nodes.conf', + cluster_node_timeout => 5000, + } + ### Manage repositories Disabled by default but if you really want the module to manage the required diff --git a/redis/manifests/config.pp b/redis/manifests/config.pp index c7f5324ad..b0ff449b9 100644 --- a/redis/manifests/config.pp +++ b/redis/manifests/config.pp @@ -3,47 +3,52 @@ # This class provides configuration for Redis. # class redis::config { - $daemonize = $::redis::daemonize - $pid_file = $::redis::pid_file - $port = $::redis::port + $activerehashing = $::redis::activerehashing + $appendfsync = $::redis::appendfsync + $appendonly = $::redis::appendonly + $auto_aof_rewrite_min_size = $::redis::auto_aof_rewrite_min_size + $auto_aof_rewrite_percentage = $::redis::auto_aof_rewrite_percentage $bind = $::redis::bind - $timeout = $::redis::timeout - $log_level = $::redis::log_level - $log_file = $::redis::log_file - $syslog_enabled = $::redis::syslog_enabled - $syslog_facility = $::redis::syslog_facility + $cluster_config_file = $::redis::cluster_config_file + $cluster_enabled = $::redis::cluster_enabled + $cluster_node_timeout = $::redis::cluster_node_timeout + $daemonize = $::redis::daemonize $databases = $::redis::databases - $rdbcompression = $::redis::rdbcompression $dbfilename = $::redis::dbfilename - $workdir = $::redis::workdir - $slaveof = $::redis::slaveof + $extra_config_file = $::redis::extra_config_file + $hash_max_ziplist_entries = $::redis::hash_max_ziplist_entries + $hash_max_ziplist_value = $::redis::hash_max_ziplist_value + $hz = $::redis::hz + $list_max_ziplist_entries = $::redis::list_max_ziplist_entries + $list_max_ziplist_value = $::redis::list_max_ziplist_value + $log_file = $::redis::log_file + $log_level = $::redis::log_level $masterauth = $::redis::masterauth - $slave_serve_stale_data = $::redis::slave_serve_stale_data - $slave_read_only = $::redis::slave_read_only - $repl_timeout = $::redis::repl_timeout - $requirepass = $::redis::requirepass - $save_db_to_disk = $::redis::save_db_to_disk $maxclients = $::redis::maxclients $maxmemory = $::redis::maxmemory $maxmemory_policy = $::redis::maxmemory_policy $maxmemory_samples = $::redis::maxmemory_samples - $appendonly = $::redis::appendonly - $appendfsync = $::redis::appendfsync $no_appendfsync_on_rewrite = $::redis::no_appendfsync_on_rewrite - $auto_aof_rewrite_percentage = $::redis::auto_aof_rewrite_percentage - $auto_aof_rewrite_min_size = $::redis::auto_aof_rewrite_min_size + $pid_file = $::redis::pid_file + $port = $::redis::port + $rdbcompression = $::redis::rdbcompression + $repl_timeout = $::redis::repl_timeout + $requirepass = $::redis::requirepass + $save_db_to_disk = $::redis::save_db_to_disk + $set_max_intset_entries = $::redis::set_max_intset_entries + $slave_read_only = $::redis::slave_read_only + $slave_serve_stale_data = $::redis::slave_serve_stale_data + $slaveof = $::redis::slaveof $slowlog_log_slower_than = $::redis::slowlog_log_slower_than $slowlog_max_len = $::redis::slowlog_max_len - $hash_max_ziplist_entries = $::redis::hash_max_ziplist_entries - $hash_max_ziplist_value = $::redis::hash_max_ziplist_value - $hz = $::redis::hz - $list_max_ziplist_entries = $::redis::list_max_ziplist_entries - $list_max_ziplist_value = $::redis::list_max_ziplist_value - $set_max_intset_entries = $::redis::set_max_intset_entries + $stop_writes_on_bgsave_error = $::redis::stop_writes_on_bgsave_error + $syslog_enabled = $::redis::syslog_enabled + $syslog_facility = $::redis::syslog_facility + $tcp_keepalive = $::redis::tcp_keepalive + $timeout = $::redis::timeout + $workdir = $::redis::workdir $zset_max_ziplist_entries = $::redis::zset_max_ziplist_entries $zset_max_ziplist_value = $::redis::zset_max_ziplist_value - $activerehashing = $::redis::activerehashing - $extra_config_file = $::redis::extra_config_file if $::redis::notify_service { File { diff --git a/redis/manifests/init.pp b/redis/manifests/init.pp index a420bddd3..66dd6f58e 100644 --- a/redis/manifests/init.pp +++ b/redis/manifests/init.pp @@ -23,7 +23,7 @@ # [*auto_aof_rewrite_min_size*] # Adjust minimum size for auto-aof-rewrite. # -# Default: 64min +# Default: 64mb # # [*auto_aof_rewrite_percentage*] # Adjust percentatge for auto-aof-rewrite. @@ -331,6 +331,12 @@ # # Default: 1024 # +# [*stop_writes_on_bgsave_error*] +# If false then Redis will continue to work as usual even if there +# are problems with disk, permissions, and so forth. +# +# Default: true +# # [*syslog_enabled*] # Enable/disable logging to the system logger. # @@ -342,6 +348,22 @@ # # Default: undef # +# [*tcp_keepalive*] +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +# # [*timeout*] # Close the connection after a client is idle for N seconds (0 to disable). # @@ -368,6 +390,23 @@ # # Default: 64 # +# [*cluster_enabled*] +# Enables redis 3.0 cluster functionality +# +# Default: false +# +# [*cluster_config_file*] +# Config file for saving cluster nodes configuration. This file is never touched by humans. +# Only set if cluster_enabled is true +# +# Default: nodes.conf +# +# [*cluster_node_timeout*] +# Node timeout +# Only set if cluster_enabled is true +# +# Default: 5000 +# # == Actions: # - Install and configure Redis # @@ -439,13 +478,18 @@ $slaveof = $::redis::params::slaveof, $slowlog_log_slower_than = $::redis::params::slowlog_log_slower_than, $slowlog_max_len = $::redis::params::slowlog_max_len, + $stop_writes_on_bgsave_error = $::redis::params::stop_writes_on_bgsave_error, $syslog_enabled = $::redis::params::syslog_enabled, $syslog_facility = $::redis::params::syslog_facility, + $tcp_keepalive = $::redis::params::tcp_keepalive, $timeout = $::redis::params::timeout, $ulimit = $::redis::params::ulimit, $workdir = $::redis::params::workdir, $zset_max_ziplist_entries = $::redis::params::zset_max_ziplist_entries, $zset_max_ziplist_value = $::redis::params::zset_max_ziplist_value, + $cluster_enabled = $::redis::params::cluster_enabled, + $cluster_config_file = $::redis::params::cluster_config_file, + $cluster_node_timeout = $::redis::params::cluster_node_timeout, ) inherits redis::params { anchor { 'redis::begin': } anchor { 'redis::end': } diff --git a/redis/manifests/params.pp b/redis/manifests/params.pp index e4d7fc31e..d0697fc8e 100644 --- a/redis/manifests/params.pp +++ b/redis/manifests/params.pp @@ -10,7 +10,7 @@ $activerehashing = true $appendfsync = 'everysec' $appendonly = false - $auto_aof_rewrite_min_size = '64min' + $auto_aof_rewrite_min_size = '64mb' $auto_aof_rewrite_percentage = 100 $bind = '127.0.0.1' $conf_template = 'redis/redis.conf.erb' @@ -56,8 +56,10 @@ $set_max_intset_entries = 512 $slowlog_log_slower_than = 10000 $slowlog_max_len = 1024 + $stop_writes_on_bgsave_error = true $syslog_enabled = undef $syslog_facility = undef + $tcp_keepalive = 0 $timeout = 0 $ulimit = 65536 $workdir = '/var/lib/redis/' @@ -72,6 +74,11 @@ $slave_serve_stale_data = true $slaveof = undef + # redis.conf.erb - redis 3.0 clustering + $cluster_enabled = false + $cluster_config_file = 'nodes.conf' + $cluster_node_timeout = 5000 + case $::osfamily { 'Debian': { $config_dir = '/etc/redis' @@ -86,6 +93,7 @@ $package_name = 'redis-server' $sentinel_config_file = '/etc/redis/redis-sentinel.conf' $sentinel_config_file_orig = '/etc/redis/redis-sentinel.conf.puppet' + $sentinel_daemonize = true $sentinel_init_script = '/etc/init.d/redis-sentinel' $sentinel_package_name = 'redis-server' $sentinel_package_ensure = 'present' @@ -107,12 +115,13 @@ $config_file_mode = '0644' $config_group = 'root' $config_owner = 'redis' - $daemonize = false + $daemonize = true $log_dir_mode = '0755' $package_ensure = 'present' $package_name = 'redis' $sentinel_config_file = '/etc/redis-sentinel.conf' $sentinel_config_file_orig = '/etc/redis-sentinel.conf.puppet' + $sentinel_daemonize = false $sentinel_init_script = undef $sentinel_package_name = 'redis' $sentinel_package_ensure = 'present' @@ -140,6 +149,7 @@ $package_name = 'redis' $sentinel_config_file = '/usr/local/etc/redis-sentinel.conf' $sentinel_config_file_orig = '/usr/local/etc/redis-sentinel.conf.puppet' + $sentinel_daemonize = true $sentinel_init_script = undef $sentinel_package_name = 'redis' $sentinel_package_ensure = 'present' @@ -167,6 +177,7 @@ $package_name = 'redis' $sentinel_config_file = '/etc/redis/redis-sentinel.conf' $sentinel_config_file_orig = '/etc/redis/redis-sentinel.conf.puppet' + $sentinel_daemonize = true $sentinel_init_script = undef $sentinel_package_name = 'redis' $sentinel_package_ensure = 'present' diff --git a/redis/manifests/preinstall.pp b/redis/manifests/preinstall.pp index e7b5bd056..5f3ce6ae7 100644 --- a/redis/manifests/preinstall.pp +++ b/redis/manifests/preinstall.pp @@ -7,10 +7,10 @@ if $::redis::manage_repo { case $::operatingsystem { 'RedHat', 'CentOS', 'Scientific', 'OEL': { - if $::operatingsystemmajrelease < '7' { - $rpm_url = $::operatingsystemmajrelease ? { - '5' => "http://download.powerstack.org/5/${::architecture}/", - '6' => "http://download.powerstack.org/6/${::architecture}/", + if (versioncmp($::operatingsystemrelease, '7.0') == -1) { + $rpm_url = $::operatingsystemrelease ? { + /^5/ => "http://download.powerstack.org/5/${::architecture}/", + /^6/ => "http://download.powerstack.org/6/${::architecture}/", default => Fail['Operating system or release not supported.'], } @@ -29,7 +29,7 @@ } } - if $::operatingsystemmajrelease == '7' { + if (versioncmp($::operatingsystemmajrelease, '7') >= 0) { require ::epel } } diff --git a/redis/manifests/sentinel.pp b/redis/manifests/sentinel.pp index 552df9496..4f3663865 100644 --- a/redis/manifests/sentinel.pp +++ b/redis/manifests/sentinel.pp @@ -36,6 +36,11 @@ # # Default: redis/redis-sentinel.conf.erb # +# [*daemonize*] +# Have Redis sentinel run as a daemon. +# +# Default: true +# # [*down_after*] # Number of milliseconds the master (or any attached slave or sentinel) # should be unreachable (as in, not acceptable reply to PING, continuously, @@ -149,6 +154,7 @@ $config_file_orig = $::redis::params::sentinel_config_file_orig, $config_file_mode = $::redis::params::sentinel_config_file_mode, $conf_template = $::redis::params::sentinel_conf_template, + $daemonize = $::redis::params::sentinel_daemonize, $down_after = $::redis::params::sentinel_down_after, $failover_timeout = $::redis::params::sentinel_failover_timeout, $init_script = $::redis::params::sentinel_init_script, @@ -169,9 +175,8 @@ $working_dir = $::redis::params::sentinel_working_dir, $notification_script = $::redis::params::sentinel_notification_script, ) inherits redis::params { - $daemonize = $::redis::daemonize - unless defined(Package['$package_name']) { + unless defined(Package[$package_name]) { ensure_resource('package', $package_name, { 'ensure' => $package_ensure }) @@ -196,6 +201,7 @@ } if $init_script { + file { $init_script: ensure => present, @@ -205,10 +211,13 @@ content => template($init_template), require => Package[$package_name]; } + exec { '/usr/sbin/update-rc.d redis-sentinel defaults': - require => File[$init_script]; + subscribe => File[$init_script], + refreshonly => true; } + } service { $service_name: diff --git a/redis/metadata.json b/redis/metadata.json index 76da52ee6..f93c305e7 100644 --- a/redis/metadata.json +++ b/redis/metadata.json @@ -1,6 +1,6 @@ { "name": "arioch-redis", - "version": "1.1.3", + "version": "1.2.1", "author": "Tom De Vylder", "summary": "Redis module", "license": "Apache-2.0", diff --git a/redis/spec/acceptance/nodesets/default.yml b/redis/spec/acceptance/nodesets/default.yml new file mode 100644 index 000000000..780db9ef0 --- /dev/null +++ b/redis/spec/acceptance/nodesets/default.yml @@ -0,0 +1,11 @@ +HOSTS: + centos-65-x64: + roles: + - master + platform: el-6-x86_64 + box : centos-65-x64-vbox436-nocm + box_url : http://puppet-vagrant-boxes.puppetlabs.com/centos-65-x64-virtualbox-nocm.box + hypervisor : vagrant + +CONFIG: + type: foss diff --git a/redis/spec/acceptance/redis_spec.rb b/redis/spec/acceptance/redis_spec.rb new file mode 100644 index 000000000..1e1998a7a --- /dev/null +++ b/redis/spec/acceptance/redis_spec.rb @@ -0,0 +1,23 @@ +require 'spec_helper_acceptance' + +describe 'redis' do + it 'should run successfully' do + pp = <<-EOS + Exec { + path => [ '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', ] + } + + class { '::redis': + manage_repo => true, + } + EOS + + # Apply twice to ensure no errors the second time. + apply_manifest(pp, :catch_failures => true) + apply_manifest(pp, :catch_changes => true) + end + + describe package('redis') do + it { should be_installed } + end +end diff --git a/redis/spec/classes/redis_spec.rb b/redis/spec/classes/redis_spec.rb index 97a159933..193d99936 100644 --- a/redis/spec/classes/redis_spec.rb +++ b/redis/spec/classes/redis_spec.rb @@ -699,6 +699,19 @@ } end + describe 'with parameter stop_writes_on_bgsave_error' do + let (:params) { + { + :stop_writes_on_bgsave_error => true + } + } + + it { should contain_file('/etc/redis/redis.conf').with( + 'content' => /stop-writes-on-bgsave-error.*yes/ + ) + } + end + describe 'with parameter syslog_enabled' do let (:params) { { @@ -726,6 +739,19 @@ } end + describe 'with parameter tcp_keepalive' do + let (:params) { + { + :tcp_keepalive => '_VALUE_' + } + } + + it { should contain_file('/etc/redis/redis.conf').with( + 'content' => /tcp-keepalive.*_VALUE_/ + ) + } + end + describe 'with parameter timeout' do let (:params) { { @@ -777,5 +803,60 @@ ) } end + + describe 'with parameter cluster_enabled-false' do + let (:params) { + { + :cluster_enabled => false + } + } + + it { should_not contain_file('/etc/redis/redis.conf').with( + 'content' => /cluster-enabled/ + ) + } + end + + describe 'with parameter cluster_enabled-true' do + let (:params) { + { + :cluster_enabled => true + } + } + + it { should contain_file('/etc/redis/redis.conf').with( + 'content' => /cluster-enabled.*yes/ + ) + } + end + + describe 'with parameter cluster_config_file' do + let (:params) { + { + :cluster_enabled => true, + :cluster_config_file => '_VALUE_' + } + } + + it { should contain_file('/etc/redis/redis.conf').with( + 'content' => /cluster-config-file.*_VALUE_/ + ) + } + end + + describe 'with parameter cluster_config_file' do + let (:params) { + { + :cluster_enabled => true, + :cluster_node_timeout => '_VALUE_' + } + } + + it { should contain_file('/etc/redis/redis.conf').with( + 'content' => /cluster-node-timeout.*_VALUE_/ + ) + } + end + end diff --git a/redis/spec/spec_helper_acceptance.rb b/redis/spec/spec_helper_acceptance.rb new file mode 100644 index 000000000..ac4f001d7 --- /dev/null +++ b/redis/spec/spec_helper_acceptance.rb @@ -0,0 +1,31 @@ +require 'beaker-rspec' + +unless ENV['RS_PROVISION'] == 'no' + hosts.each do |host| + # Install Puppet + if host.is_pe? + install_pe + else + install_puppet + end + end +end + +RSpec.configure do |c| + # Project root + proj_root = File.expand_path(File.join(File.dirname(__FILE__), '..')) + + # Readable test descriptions + c.formatter = :documentation + + c.before :suite do + # Install module and dependencies + puppet_module_install(:source => proj_root, :module_name => 'redis') + + hosts.each do |host| + shell("/bin/touch #{default['puppetpath']}/hiera.yaml") + + shell('puppet module install puppetlabs-stdlib', { :acceptable_exit_codes => [0,1] }) + end + end +end diff --git a/redis/templates/redis.conf.erb b/redis/templates/redis.conf.erb index 952207fce..e44542708 100644 --- a/redis/templates/redis.conf.erb +++ b/redis/templates/redis.conf.erb @@ -39,6 +39,22 @@ bind <%= @bind %> # Close the connection after a client is idle for N seconds (0 to disable) timeout <%= @timeout %> +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive <%= @tcp_keepalive %> + # Set server verbosity to 'debug' # it can be one of: # debug (a lot of information, useful for development/testing) @@ -108,7 +124,7 @@ save 60 10000 # and persistence, you may want to disable this feature so that Redis will # continue to work as usually even if there are problems with disk, # permissions, and so forth. -stop-writes-on-bgsave-error yes +<% if @stop_writes_on_bgsave_error -%>stop-writes-on-bgsave-error yes<% else -%>stop-writes-on-bgsave-error no<% end -%> # Compress string objects using LZF when dump .rdb databases? # For default that's set to 'yes' as it's almost always a win. @@ -551,6 +567,14 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # 100 only in environments where very low latency is required. hz <%= @hz %> +# Redis Cluster Settings +<% if @cluster_enabled -%> +cluster-enabled yes +cluster-config-file <%= @cluster_config_file %> +cluster-node-timeout <%= @cluster_node_timeout %> +<% end -%> + + ################################## INCLUDES ################################### # Include one or more other config files here. This is useful if you @@ -563,4 +587,3 @@ hz <%= @hz %> <% if @extra_config_file -%> include <%= @extra_config_file %> <% end -%> - diff --git a/sahara/CHANGELOG.md b/sahara/CHANGELOG.md index 9a769f084..79fe88816 100644 --- a/sahara/CHANGELOG.md +++ b/sahara/CHANGELOG.md @@ -1,3 +1,36 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes +- remove kilo deprecated parameters + +####Features +- add distribute mode support +- introduce Sahara extended logging class +- remove POSIX file modes +- add tag to package and service resources +- add sahara::config class +- add sahara::db::sync +- add support of SSL +- add an ability to manage use_stderr parameter +- reflect provider change in puppet-openstacklib +- introduce sahara::db class +- db: use postgresql lib class for psycopg package +- add new parameter 'plugins' for Sahara +- configure rpc options separately from ceilometer notifications + +####Bugfixes +- rely on autorequire for config resource ordering +- remove Sqlite validation for database_connection + +####Maintenance +- initial msync run for all Puppet OpenStack modules +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check +- acceptance: use common bits from puppet-openstack-integration + ##2015-10-10 - 6.1.0 ###Summary diff --git a/sahara/README.md b/sahara/README.md index 667762deb..a462c39f1 100644 --- a/sahara/README.md +++ b/sahara/README.md @@ -1,7 +1,7 @@ sahara ====== -6.1.0 - 2015.1 - Kilo +7.0.0 - 2015.2 - Liberty #### Table of Contents diff --git a/sahara/examples/basic.pp b/sahara/examples/basic.pp index ded521d7c..9fa262300 100644 --- a/sahara/examples/basic.pp +++ b/sahara/examples/basic.pp @@ -22,7 +22,7 @@ # Then the common class class { '::sahara': - database_connection => 'mysql://sahara:a_big_secret@127.0.0.1:3306/sahara', + database_connection => 'mysql+pymysql://sahara:a_big_secret@127.0.0.1:3306/sahara', verbose => true, debug => true, admin_user => 'admin', diff --git a/sahara/manifests/db.pp b/sahara/manifests/db.pp index 32925b8e0..830026a4a 100644 --- a/sahara/manifests/db.pp +++ b/sahara/manifests/db.pp @@ -6,43 +6,45 @@ # # [*database_connection*] # (Optional) The connection string to use to connect to the database. -# Defaults to 'mysql://sahara:secrete@localhost:3306/sahara' +# Defaults to 'mysql+pymysql://sahara:secrete@localhost:3306/sahara' # # [*database_max_retries*] # (Optional) Maximum number of database connection retries during startup. # Set to -1 to specify an infinite retry count. -# Defaults to 10. +# Defaults to $::os_service_default. # # [*database_idle_timeout*] # (Optional) Timeout before idle SQL connections are reaped. -# Defaults to 3600. +# Defaults to $::os_service_default. # # [*database_retry_interval*] # (optional) Interval between retries of opening a database connection. -# Defaults to 10. +# Defaults to $::os_service_default. # # [*database_min_pool_size*] # (optional) Minimum number of SQL connections to keep open in a pool. -# Defaults to 1. +# Defaults to $::os_service_default. # # [*database_max_pool_size*] # (optional) Maximum number of SQL connections to keep open in a pool. -# Defaults to 10. +# Defaults to $::os_service_default. # # [*database_max_overflow*] # (optional) If set, use this value for max_overflow with sqlalchemy. -# Defaults to 20. +# Defaults to $::os_service_default. # class sahara::db ( - $database_connection = 'mysql://sahara:secrete@localhost:3306/sahara', - $database_idle_timeout = 3600, - $database_min_pool_size = 1, - $database_max_pool_size = 10, - $database_max_retries = 10, - $database_retry_interval = 10, - $database_max_overflow = 20, + $database_connection = 'mysql+pymysql://sahara:secrete@localhost:3306/sahara', + $database_idle_timeout = $::os_service_default, + $database_min_pool_size = $::os_service_default, + $database_max_pool_size = $::os_service_default, + $database_max_retries = $::os_service_default, + $database_retry_interval = $::os_service_default, + $database_max_overflow = $::os_service_default, ) { + include ::sahara::params + # NOTE(degorenko): In order to keep backward compatibility we rely on the pick function # to use sahara:: if sahara::db:: isn't specified. $database_connection_real = pick($::sahara::database_connection, $database_connection) @@ -53,13 +55,18 @@ $database_retry_interval_real = pick($::sahara::database_retry_interval, $database_retry_interval) $database_max_overflow_real = pick($::sahara::database_max_overflow, $database_max_overflow) - validate_re($database_connection_real, '(mysql|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') + validate_re($database_connection_real, + '^(mysql(\+pymysql)?|postgresql):\/\/(\S+:\S+@\S+\/\S+)?') case $database_connection_real { - /^mysql:\/\//: { - $backend_package = false + /^mysql(\+pymysql)?:\/\//: { require mysql::bindings require mysql::bindings::python + if $database_connection_real =~ /^mysql\+pymysql/ { + $backend_package = $::sahara::params::pymysql_package_name + } else { + $backend_package = false + } } /^postgresql:\/\//: { $backend_package = false diff --git a/sahara/manifests/db/sync.pp b/sahara/manifests/db/sync.pp index c500649d7..a97735be3 100644 --- a/sahara/manifests/db/sync.pp +++ b/sahara/manifests/db/sync.pp @@ -1,7 +1,16 @@ # # Class to execute sahara dbsync # -class sahara::db::sync { +# == Parameters +# +# [*extra_params*] +# (optional) String of extra command line parameters to append +# to the sahara-db-manage command. +# Defaults to '--config-file /etc/sahara/sahara.conf' +# +class sahara::db::sync( + $extra_params = '--config-file /etc/sahara/sahara.conf', +) { include ::sahara::params @@ -12,7 +21,7 @@ Sahara_config <| title == 'database/connection' |> ~> Exec['sahara-dbmanage'] exec { 'sahara-dbmanage': - command => $::sahara::params::dbmanage_command, + command => "sahara-db-manage ${extra_params} upgrade head", path => '/usr/bin', user => 'sahara', refreshonly => true, diff --git a/sahara/manifests/init.pp b/sahara/manifests/init.pp index 176bdd381..f1119ee61 100644 --- a/sahara/manifests/init.pp +++ b/sahara/manifests/init.pp @@ -35,40 +35,40 @@ # # [*host*] # (Optional) Hostname for sahara to listen on -# Defaults to '0.0.0.0'. +# Defaults to $::os_service_default. # # [*port*] # (Optional) Port for sahara to listen on -# Defaults to 8386. +# Defaults to $::os_service_default. # # [*plugins*] # (Optional) List of plugins to be loaded. # Sahara preserves the order of the list when returning it. -# Defaults to undef +# Defaults to $::os_service_default. # # [*use_neutron*] # (Optional) Whether to use neutron -# Defaults to 'false'. +# Defaults to $::os_service_default. # # [*use_floating_ips*] # (Optional) Whether to use floating IPs to communicate with instances. -# Defaults to 'true'. +# Defaults to $::os_service_default. # # [*use_ssl*] # (optional) Enable SSL on the API server -# Defaults to false, not set +# Defaults to $::os_service_default, not set. # # [*cert_file*] # (optinal) Certificate file to use when starting API server securely -# Defaults to undef +# Defaults to $::os_service_default. # # [*key_file*] # (optional) Private key file to use when starting API server securely -# Defaults to undef +# Defaults to $::os_service_default. # # [*ca_file*] # (optional) CA certificate file to use to verify connecting clients -# Defaults to undef +# Defaults to $::os_service_default. # # == database configuration options # @@ -135,125 +135,76 @@ # rabbit (for rabbitmq) # qpid (for qpid) # zmq (for zeromq) -# Defaults to undef +# Defaults to $::os_service_default. # # [*amqp_durable_queues*] # (optional) Use durable queues in AMQP -# Defaults to false. +# Defaults to $::os_service_default. # # [*rabbit_ha_queues*] # (Optional) Use durable queues in RabbitMQ. -# Defaults to false. +# Defaults to $::os_service_default. # # [*rabbit_host*] # (Optional) IP or hostname of the rabbit server. -# Defaults to '127.0.0.1'. +# Defaults to $::os_service_default. # # [*rabbit_port*] # (Optional) Port of the rabbit server. -# Defaults to 5672. +# Defaults to $::os_service_default. # # [*rabbit_hosts*] # (Optional) IP or hostname of the rabbits servers. # comma separated array (ex: ['1.0.0.10:5672','1.0.0.11:5672']) -# Defaults to false. +# Defaults to $::os_service_default. # # [*rabbit_use_ssl*] # (Optional) Connect over SSL for RabbitMQ. -# Defaults to false. +# Defaults to $::os_service_default. # # [*rabbit_userid*] # (Optional) User to connect to the rabbit server. -# Defaults to 'guest'. +# Defaults to $::os_service_default. # # [*rabbit_password*] # (Optional) Password to connect to the rabbit server. -# Defaults to 'guest'. +# Defaults to $::os_service_default. # # [*rabbit_login_method*] # (Optional) Method to auth with the rabbit server. -# Defaults to 'AMQPLAIN'. +# Defaults to $::os_service_default. # # [*rabbit_virtual_host*] # (Optional) Virtual host to use. -# Defaults to '/'. +# Defaults to $::os_service_default. # # [*rabbit_retry_interval*] # (Optional) Reconnection attempt frequency for rabbit. -# Defaults to 1. +# Defaults to $::os_service_default. # # [*rabbit_retry_backoff*] # (Optional) Backoff between reconnection attempts for rabbit. -# Defaults to 2. +# Defaults to $::os_service_default. # # [*rabbit_max_retries*] # (Optional) Number of times to retry (0 == no limit). -# Defaults to 0. -# -# [*qpid_hostname*] -# (Optional) IP or hostname of the qpid server. -# Defaults to '127.0.0.1'. -# -# [*qpid_port*] -# (Optional) Port of the qpid server. -# Defaults to 5672. -# -# [*qpid_hosts*] -# (Optional) Qpid HA cluster host:port pairs.. -# comma separated array (ex: ['1.0.0.10:5672','1.0.0.11:5672']) -# Defaults to false. -# -# [*qpid_username*] -# (Optional) User to connect to the qpid server. -# Defaults to 'guest'. -# -# [*qpid_password*] -# (Optional) Password to connect to the qpid server. -# Defaults to 'guest'. -# -# [*qpid_sasl_mechanisms*] -# (Optional) String of SASL mechanisms to use. -# Defaults to ''. -# -# [*qpid_heartbeat*] -# (Optional) Seconds between connection keepalive heartbeats. -# Defaults to 60. -# -# [*qpid_protocol*] -# (Optional) Protocol to use for qpid (tcp/ssl). -# Defaults to tcp. -# -# [*qpid_tcp_nodelay*] -# (Optional) Whether to disable the Nagle algorithm. -# Defaults to true. -# -# [*qpid_receiver_capacity*] -# (Optional) Number of prefetched messages to hold. -# Defaults to 1. -# -# [*qpid_topology_version*] -# (Optional) Version of qpid toplogy to use. -# Defaults to 2. +# Defaults to $::os_service_default. # # [*zeromq_bind_address*] # (Optional) Bind address; wildcard, ethernet, or ip address. -# Defaults to '*'. -# -# [*zeromq_port*] -# (Optional) Receiver listening port. -# Defaults to 9501. +# Defaults to $::os_service_default. # # [*zeromq_contexts*] # (Optional) Number of contexsts for zeromq. -# Defaults to 1. +# Defaults to $::os_service_default. # # [*zeromq_topic_backlog*] # (Optional) Number of incoming messages to buffer. -# Defaults to 'None'. +# Defaults to $::os_service_default. # # [*zeromq_ipc_dir*] # (Optional) Directory for zeromq IPC. -# Defaults to '/var/run/openstack'. +# Defaults to $::os_service_default. # # [*zeromq_host*] # (Optional) Name of the current node: hostname, FQDN, or IP. @@ -261,25 +212,25 @@ # # [*cast_timeout*] # (Optional) TTL for zeromq messages. -# Defaults to 30. +# Defaults to $::os_service_default. # # [*kombu_ssl_version*] # (optional) SSL version to use (valid only if SSL enabled). # Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be # available on some distributions. -# Defaults to 'TLSv1' +# Defaults to $::os_service_default. # # [*kombu_ssl_keyfile*] # (Optional) SSL key file (valid only if SSL enabled). -# Defaults to undef. +# Defaults to $::os_service_default. # # [*kombu_ssl_certfile*] # (Optional) SSL cert file (valid only if SSL enabled). -# Defaults to undef. +# Defaults to $::os_service_default. # # [*kombu_ssl_ca_certs*] # (Optional) SSL certification authority file (valid only if SSL enabled). -# Defaults to undef +# Defaults to $::os_service_default. # # [*kombu_reconnect_delay*] # (Optional) Backoff on cancel notification (valid only if SSL enabled). @@ -287,12 +238,53 @@ # # == DEPRECATED PARAMETERS # -# [*manage_service*] -# (optional) Whether the service should be managed by Puppet. +# [*zeromq_port*] +# (Optional) Receiver listening port. # Defaults to undef. # -# [*enabled*] -# (optional) Should the service be enabled. +# [*qpid_hostname*] +# (Optional) IP or hostname of the qpid server. +# Defaults to undef. +# +# [*qpid_port*] +# (Optional) Port of the qpid server. +# Defaults to undef. +# +# [*qpid_hosts*] +# (Optional) Qpid HA cluster host:port pairs.. +# comma separated array (ex: ['1.0.0.10:5672','1.0.0.11:5672']) +# Defaults to undef. +# +# [*qpid_username*] +# (Optional) User to connect to the qpid server. +# Defaults to undef. +# +# [*qpid_password*] +# (Optional) Password to connect to the qpid server. +# Defaults to undef. +# +# [*qpid_sasl_mechanisms*] +# (Optional) String of SASL mechanisms to use. +# Defaults to undef. +# +# [*qpid_heartbeat*] +# (Optional) Seconds between connection keepalive heartbeats. +# Defaults to undef. +# +# [*qpid_protocol*] +# (Optional) Protocol to use for qpid (tcp/ssl). +# Defaults to undef. +# +# [*qpid_tcp_nodelay*] +# (Optional) Whether to disable the Nagle algorithm. +# Defaults to undef. +# +# [*qpid_receiver_capacity*] +# (Optional) Number of prefetched messages to hold. +# Defaults to undef. +# +# [*qpid_topology_version*] +# (Optional) Version of qpid toplogy to use. # Defaults to undef. # class sahara( @@ -303,15 +295,15 @@ $use_stderr = undef, $log_facility = undef, $log_dir = undef, - $host = '0.0.0.0', - $port = '8386', - $plugins = undef, - $use_neutron = false, - $use_floating_ips = true, - $use_ssl = false, - $ca_file = undef, - $cert_file = undef, - $key_file = undef, + $host = $::os_service_default, + $port = $::os_service_default, + $plugins = $::os_service_default, + $use_neutron = $::os_service_default, + $use_floating_ips = $::os_service_default, + $use_ssl = $::os_service_default, + $ca_file = $::os_service_default, + $cert_file = $::os_service_default, + $key_file = $::os_service_default, $database_connection = undef, $database_max_retries = undef, $database_idle_timeout = undef, @@ -326,46 +318,44 @@ $admin_tenant_name = 'services', $auth_uri = 'http://127.0.0.1:5000/v2.0/', $identity_uri = 'http://127.0.0.1:35357/', - $rpc_backend = undef, - $amqp_durable_queues = false, - $rabbit_ha_queues = false, - $rabbit_host = 'localhost', - $rabbit_hosts = false, - $rabbit_port = 5672, - $rabbit_use_ssl = false, - $rabbit_userid = 'guest', - $rabbit_password = 'guest', - $rabbit_login_method = 'AMQPLAIN', - $rabbit_virtual_host = '/', - $rabbit_retry_interval = 1, - $rabbit_retry_backoff = 2, - $rabbit_max_retries = 0, - $qpid_hostname = 'localhost', - $qpid_port = 5672, - $qpid_hosts = false, - $qpid_username = 'guest', - $qpid_password = 'guest', - $qpid_sasl_mechanisms = '', - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, - $qpid_receiver_capacity = 1, - $qpid_topology_version = 2, - $zeromq_bind_address = '*', - $zeromq_port = 9501, - $zeromq_contexts = 1, - $zeromq_topic_backlog = 'None', - $zeromq_ipc_dir = '/var/run/openstack', + $rpc_backend = $::os_service_default, + $amqp_durable_queues = $::os_service_default, + $rabbit_ha_queues = $::os_service_default, + $rabbit_host = $::os_service_default, + $rabbit_hosts = $::os_service_default, + $rabbit_port = $::os_service_default, + $rabbit_use_ssl = $::os_service_default, + $rabbit_userid = $::os_service_default, + $rabbit_password = $::os_service_default, + $rabbit_login_method = $::os_service_default, + $rabbit_virtual_host = $::os_service_default, + $rabbit_retry_interval = $::os_service_default, + $rabbit_retry_backoff = $::os_service_default, + $rabbit_max_retries = $::os_service_default, + $zeromq_bind_address = $::os_service_default, + $zeromq_contexts = $::os_service_default, + $zeromq_topic_backlog = $::os_service_default, + $zeromq_ipc_dir = $::os_service_default, $zeromq_host = 'sahara', - $cast_timeout = 30, - $kombu_ssl_version = 'TLSv1', - $kombu_ssl_keyfile = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_ca_certs = undef, - $kombu_reconnect_delay = '1.0', + $cast_timeout = $::os_service_default, + $kombu_ssl_version = $::os_service_default, + $kombu_ssl_keyfile = $::os_service_default, + $kombu_ssl_certfile = $::os_service_default, + $kombu_ssl_ca_certs = $::os_service_default, + $kombu_reconnect_delay = $::os_service_default, # DEPRECATED PARAMETERS - $manage_service = undef, - $enabled = undef, + $zeromq_port = undef, + $qpid_hostname = undef, + $qpid_port = undef, + $qpid_hosts = undef, + $qpid_username = undef, + $qpid_password = undef, + $qpid_sasl_mechanisms = undef, + $qpid_heartbeat = undef, + $qpid_protocol = undef, + $qpid_tcp_nodelay = undef, + $qpid_receiver_capacity = undef, + $qpid_topology_version = undef, ) { include ::sahara::params include ::sahara::logging @@ -380,17 +370,8 @@ Package['sahara-common'] -> Class['sahara::policy'] - if $plugins { - sahara_config { - 'DEFAULT/plugins': value => join(any2array($plugins),','); - } - } else { - sahara_config { - 'DEFAULT/plugins': ensure => absent; - } - } - sahara_config { + 'DEFAULT/plugins': value => join(any2array($plugins),','); 'DEFAULT/use_neutron': value => $use_neutron; 'DEFAULT/use_floating_ips': value => $use_floating_ips; 'DEFAULT/host': value => $host; @@ -409,40 +390,10 @@ } } - if $rpc_backend == 'rabbit' { - if $rabbit_use_ssl { - if $kombu_ssl_ca_certs { - sahara_config { 'oslo_messaging_rabbit/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs; } - } else { - sahara_config { 'oslo_messaging_rabbit/kombu_ssl_ca_certs': ensure => absent; } - } - if $kombu_ssl_certfile or $kombu_ssl_keyfile { - sahara_config { - 'oslo_messaging_rabbit/kombu_ssl_certfile': value => $kombu_ssl_certfile; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': value => $kombu_ssl_keyfile; - } - } else { - sahara_config { - 'oslo_messaging_rabbit/kombu_ssl_certfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': ensure => absent; - } - } - if $kombu_ssl_version { - sahara_config { 'oslo_messaging_rabbit/kombu_ssl_version': value => $kombu_ssl_version; } - } else { - sahara_config { 'oslo_messaging_rabbit/kombu_ssl_version': ensure => absent; } - } - } else { + if $rpc_backend == 'rabbit' or is_service_default($rpc_backend) { + if ! is_service_default($rabbit_hosts) and $rabbit_hosts { sahara_config { - 'oslo_messaging_rabbit/kombu_ssl_ca_certs': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_certfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_version': ensure => absent; - } - } - if $rabbit_hosts { - sahara_config { - 'oslo_messaging_rabbit/rabbit_hosts': value => join($rabbit_hosts, ','); + 'oslo_messaging_rabbit/rabbit_hosts': value => join(any2array($rabbit_hosts), ','); 'oslo_messaging_rabbit/rabbit_ha_queues': value => true; } } else { @@ -450,7 +401,7 @@ 'oslo_messaging_rabbit/rabbit_host': value => $rabbit_host; 'oslo_messaging_rabbit/rabbit_port': value => $rabbit_port; 'oslo_messaging_rabbit/rabbit_ha_queues': value => $rabbit_ha_queues; - 'oslo_messaging_rabbit/rabbit_hosts': value => "${rabbit_host}:${rabbit_port}"; + 'oslo_messaging_rabbit/rabbit_hosts': ensure => absent; } } sahara_config { @@ -466,45 +417,27 @@ 'oslo_messaging_rabbit/rabbit_retry_interval': value => $rabbit_retry_interval; 'oslo_messaging_rabbit/rabbit_retry_backoff': value => $rabbit_retry_backoff; 'oslo_messaging_rabbit/rabbit_max_retries': value => $rabbit_max_retries; + 'oslo_messaging_rabbit/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs; + 'oslo_messaging_rabbit/kombu_ssl_certfile': value => $kombu_ssl_certfile; + 'oslo_messaging_rabbit/kombu_ssl_keyfile': value => $kombu_ssl_keyfile; + 'oslo_messaging_rabbit/kombu_ssl_version': value => $kombu_ssl_version; 'oslo_messaging_rabbit/kombu_reconnect_delay': value => $kombu_reconnect_delay; } } if $rpc_backend == 'qpid' { + warning('Qpid driver is removed from Oslo.messaging in the Mitaka release') + } - if $qpid_hosts { - sahara_config { - 'oslo_messaging_qpid/qpid_hosts': value => join($qpid_hosts, ','); - } - } else { - sahara_config { - 'oslo_messaging_qpid/qpid_hostname': value => $qpid_hostname; - 'oslo_messaging_qpid/qpid_port': value => $qpid_port; - 'oslo_messaging_qpid/qpid_hosts': value => "${qpid_hostname}:${qpid_port}"; - } - } + if $rpc_backend == 'zmq' { - sahara_config { - 'DEFAULT/rpc_backend': value => 'qpid'; - 'oslo_messaging_qpid/amqp_durable_queues': value => $amqp_durable_queues; - 'oslo_messaging_qpid/qpid_username': value => $qpid_username; - 'oslo_messaging_qpid/qpid_password': - value => $qpid_password, - secret => true; - 'oslo_messaging_qpid/qpid_sasl_mechanisms': value => $qpid_sasl_mechanisms; - 'oslo_messaging_qpid/qpid_heartbeat': value => $qpid_heartbeat; - 'oslo_messaging_qpid/qpid_protocol': value => $qpid_protocol; - 'oslo_messaging_qpid/qpid_tcp_nodelay': value => $qpid_tcp_nodelay; - 'oslo_messaging_qpid/qpid_receiver_capacity': value => $qpid_receiver_capacity; - 'oslo_messaging_qpid/qpid_topology_version': value => $qpid_topology_version; + if $zeromq_port { + warning('The zeromq_port parameter is deprecated and has no effect.') } - } - if $rpc_backend == 'zmq' { sahara_config { 'DEFAULT/rpc_backend': value => 'zmq'; 'DEFAULT/rpc_zmq_bind_address': value => $zeromq_bind_address; - 'DEFAULT/rpc_zmq_port': value => $zeromq_port; 'DEFAULT/rpc_zmq_contexts': value => $zeromq_contexts; 'DEFAULT/rpc_zmq_topic_backlog': value => $zeromq_topic_backlog; 'DEFAULT/rpc_zmq_ipc_dir': value => $zeromq_ipc_dir; @@ -513,14 +446,14 @@ } } - if $use_ssl { - if !$ca_file { + if ! is_service_default($use_ssl) and $use_ssl { + if is_service_default($ca_file) { fail('The ca_file parameter is required when use_ssl is set to true') } - if !$cert_file { + if is_service_default($cert_file) { fail('The cert_file parameter is required when use_ssl is set to true') } - if !$key_file { + if is_service_default($key_file) { fail('The key_file parameter is required when use_ssl is set to true') } sahara_config { @@ -528,25 +461,10 @@ 'ssl/key_file' : value => $key_file; 'ssl/ca_file' : value => $ca_file; } - } else { - sahara_config { - 'ssl/cert_file' : ensure => absent; - 'ssl/key_file' : ensure => absent; - 'ssl/ca_file' : ensure => absent; - } } if $sync_db { include ::sahara::db::sync } - if $manage_service or $enabled { - warning('Configuring daemon services from init class is deprecated.') - warning('Use ::sahara::service::{all|api|engine}.pp for configuring daemon services instead.') - class { '::sahara::service::all': - enabled => $enabled, - manage_service => $manage_service, - package_ensure => $package_ensure, - } - } } diff --git a/sahara/manifests/logging.pp b/sahara/manifests/logging.pp index 089509b30..eacad183e 100644 --- a/sahara/manifests/logging.pp +++ b/sahara/manifests/logging.pp @@ -6,23 +6,23 @@ # # [*verbose*] # (Optional) Should the daemons log verbose messages -# Defaults to 'false'. +# Defaults to $::os_service_default. # # [*debug*] # (Optional) Should the daemons log debug messages -# Defaults to 'false'. +# Defaults to $::os_service_default. # # [*use_syslog*] # Use syslog for logging. -# (Optional) Defaults to 'false'. +# (Optional) Defaults to $::os_service_default. # # [*use_stderr*] # (optional) Use stderr for logging -# Defaults to 'true' +# Defaults to $::os_service_default. # # [*log_facility*] # Syslog facility to receive log lines. -# (Optional) Defaults to 'LOG_USER'. +# (Optional) Defaults to $::os_service_default. # # [*log_dir*] # (optional) Directory where logs should be stored. @@ -31,34 +31,34 @@ # # [*logging_context_format_string*] # (optional) Format string to use for log messages with context. -# Defaults to undef. +# Defaults to $::os_service_default. # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [%(request_id)s %(user_identity)s] %(instance)s%(message)s' # # [*logging_default_format_string*] # (optional) Format string to use for log messages without context. -# Defaults to undef. +# Defaults to $::os_service_default. # Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ # [-] %(instance)s%(message)s' # # [*logging_debug_format_suffix*] # (optional) Formatted data to append to log format when level is DEBUG. -# Defaults to undef. +# Defaults to $::os_service_default. # Example: '%(funcName)s %(pathname)s:%(lineno)d' # # [*logging_exception_prefix*] # (optional) Prefix each line of exception output with this format. -# Defaults to undef. +# Defaults to $::os_service_default. # Example: '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s' # # [*log_config_append*] # The name of an additional logging configuration file. -# Defaults to undef. +# Defaults to $::os_service_default. # See https://docs.python.org/2/howto/logging.html # # [*default_log_levels*] # (optional) Hash of logger (keys) and level (values) pairs. -# Defaults to undef. +# Defaults to $::os_service_default. # Example: # {'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', # 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', @@ -67,47 +67,47 @@ # # [*publish_errors*] # (optional) Publish error events (boolean value). -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default (false if unconfigured). # # [*fatal_deprecations*] # (optional) Make deprecations fatal (boolean value) -# Defaults to undef (false if unconfigured). +# Defaults to $::os_service_default (false if unconfigured). # # [*instance_format*] # (optional) If an instance is passed with the log message, format it # like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default. # Example: '[instance: %(uuid)s] ' # # [*instance_uuid_format*] # (optional) If an instance UUID is passed with the log message, format # It like this (string value). -# Defaults to undef. +# Defaults to $::os_service_default. # Example: instance_uuid_format='[instance: %(uuid)s] ' # [*log_date_format*] # (optional) Format string for %%(asctime)s in log records. -# Defaults to undef. +# Defaults to $::os_service_default. # Example: 'Y-%m-%d %H:%M:%S' # class sahara::logging( - $verbose = false, - $debug = false, - $use_syslog = false, - $use_stderr = true, - $log_facility = 'LOG_USER', + $verbose = $::os_service_default, + $debug = $::os_service_default, + $use_syslog = $::os_service_default, + $use_stderr = $::os_service_default, + $log_facility = $::os_service_default, $log_dir = '/var/log/sahara', - $logging_context_format_string = undef, - $logging_default_format_string = undef, - $logging_debug_format_suffix = undef, - $logging_exception_prefix = undef, - $log_config_append = undef, - $default_log_levels = undef, - $publish_errors = undef, - $fatal_deprecations = undef, - $instance_format = undef, - $instance_uuid_format = undef, - $log_date_format = undef, + $logging_context_format_string = $::os_service_default, + $logging_default_format_string = $::os_service_default, + $logging_debug_format_suffix = $::os_service_default, + $logging_exception_prefix = $::os_service_default, + $log_config_append = $::os_service_default, + $default_log_levels = $::os_service_default, + $publish_errors = $::os_service_default, + $fatal_deprecations = $::os_service_default, + $instance_format = $::os_service_default, + $instance_uuid_format = $::os_service_default, + $log_date_format = $::os_service_default, ) { # NOTE(degorenko): In order to keep backward compatibility we rely on the pick function @@ -119,133 +119,29 @@ $verbose_real = pick($::sahara::verbose, $verbose) $debug_real = pick($::sahara::debug, $debug) - sahara_config { - 'DEFAULT/debug': value => $debug_real; - 'DEFAULT/verbose': value => $verbose_real; - 'DEFAULT/use_stderr': value => $use_stderr_real; - 'DEFAULT/use_syslog': value => $use_syslog_real; - 'DEFAULT/log_dir': value => $log_dir_real; - } - - if $use_syslog_real { - sahara_config { - 'DEFAULT/syslog_log_facility': value => $log_facility_real; - } - } else { - sahara_config { - 'DEFAULT/syslog_log_facility': ensure => absent; - } - } - - if $logging_context_format_string { - sahara_config { - 'DEFAULT/logging_context_format_string': value => $logging_context_format_string; - } - } else { - sahara_config { - 'DEFAULT/logging_context_format_string': ensure => absent; - } - } - - if $logging_default_format_string { - sahara_config { - 'DEFAULT/logging_default_format_string': value => $logging_default_format_string; - } - } else { - sahara_config { - 'DEFAULT/logging_default_format_string': ensure => absent; - } - } - - if $logging_debug_format_suffix { - sahara_config { - 'DEFAULT/logging_debug_format_suffix': value => $logging_debug_format_suffix; - } - } else { - sahara_config { - 'DEFAULT/logging_debug_format_suffix': ensure => absent; - } - } - - if $logging_exception_prefix { - sahara_config { - 'DEFAULT/logging_exception_prefix': value => $logging_exception_prefix; - } - } else { - sahara_config { - 'DEFAULT/logging_exception_prefix': ensure => absent; - } - } - - if $log_config_append { - sahara_config { - 'DEFAULT/log_config_append': value => $log_config_append; - } + if is_service_default($default_log_levels) { + $default_log_levels_real = $default_log_levels } else { - sahara_config { - 'DEFAULT/log_config_append': ensure => absent; - } + $default_log_levels_real = join(sort(join_keys_to_values($default_log_levels, '=')), ',') } - if $default_log_levels { - sahara_config { - 'DEFAULT/default_log_levels': - value => join(sort(join_keys_to_values($default_log_levels, '=')), ','); - } - } else { - sahara_config { - 'DEFAULT/default_log_levels': ensure => absent; - } - } - - if $publish_errors { - sahara_config { - 'DEFAULT/publish_errors': value => $publish_errors; - } - } else { - sahara_config { - 'DEFAULT/publish_errors': ensure => absent; - } - } - - if $fatal_deprecations { - sahara_config { - 'DEFAULT/fatal_deprecations': value => $fatal_deprecations; - } - } else { - sahara_config { - 'DEFAULT/fatal_deprecations': ensure => absent; - } - } - - if $instance_format { - sahara_config { - 'DEFAULT/instance_format': value => $instance_format; - } - } else { - sahara_config { - 'DEFAULT/instance_format': ensure => absent; - } - } - - if $instance_uuid_format { - sahara_config { - 'DEFAULT/instance_uuid_format': value => $instance_uuid_format; - } - } else { - sahara_config { - 'DEFAULT/instance_uuid_format': ensure => absent; - } - } - - if $log_date_format { - sahara_config { - 'DEFAULT/log_date_format': value => $log_date_format; - } - } else { - sahara_config { - 'DEFAULT/log_date_format': ensure => absent; - } + sahara_config { + 'DEFAULT/debug': value => $debug_real; + 'DEFAULT/verbose': value => $verbose_real; + 'DEFAULT/use_stderr': value => $use_stderr_real; + 'DEFAULT/use_syslog': value => $use_syslog_real; + 'DEFAULT/log_dir': value => $log_dir_real; + 'DEFAULT/syslog_log_facility': value => $log_facility_real; + 'DEFAULT/logging_context_format_string': value => $logging_context_format_string; + 'DEFAULT/logging_default_format_string': value => $logging_default_format_string; + 'DEFAULT/logging_debug_format_suffix': value => $logging_debug_format_suffix; + 'DEFAULT/logging_exception_prefix': value => $logging_exception_prefix; + 'DEFAULT/log_config_append': value => $log_config_append; + 'DEFAULT/default_log_levels': value => $default_log_levels_real; + 'DEFAULT/publish_errors': value => $publish_errors; + 'DEFAULT/fatal_deprecations': value => $fatal_deprecations; + 'DEFAULT/instance_format': value => $instance_format; + 'DEFAULT/instance_uuid_format': value => $instance_uuid_format; + 'DEFAULT/log_date_format': value => $log_date_format; } - } diff --git a/sahara/manifests/notify.pp b/sahara/manifests/notify.pp index c90d9c833..95f5b3388 100644 --- a/sahara/manifests/notify.pp +++ b/sahara/manifests/notify.pp @@ -6,7 +6,7 @@ # # [*control_exchange*] # (Optional) The default exchange to scope topics. -# Defaults to 'openstack'. +# Defaults to $::os_service_default. # # [*enable_notifications*] # (Optional) Enables sending notifications to Ceilometer. @@ -18,22 +18,24 @@ # # [*notification_topics*] # (Optional) Topic to use for notifications. -# Defaults to 'notifications'. +# Defaults to $::os_service_default. # # [*notification_level*] # (Optional) Notification level for outgoing notifications. -# Defaults to 'INFO'. +# Defaults to $::os_service_default. # class sahara::notify ( - $control_exchange = 'openstack', + $control_exchange = $::os_service_default, $enable_notifications = false, $notification_driver = 'messaging', - $notification_topics = 'notifications', - $notification_level = 'INFO', + $notification_topics = $::os_service_default, + $notification_level = $::os_service_default, ) { if $enable_notifications { + warning('The puppet default for notification_driver parameter is different from OpenStack project default') + sahara_config { 'DEFAULT/control_exchange': value => $control_exchange; 'DEFAULT/enable_notifications': value => $enable_notifications; diff --git a/sahara/manifests/notify/qpid.pp b/sahara/manifests/notify/qpid.pp deleted file mode 100644 index e1c95d8d1..000000000 --- a/sahara/manifests/notify/qpid.pp +++ /dev/null @@ -1,149 +0,0 @@ -# == Class: sahara::notify::qpid -# -# Qpid broker configuration for Sahara -# Deprecated class -# -# === Parameters -# -# [*durable_queues*] -# (Optional) Use durable queues in broker. -# Defaults to false. -# -# [*qpid_hostname*] -# (Optional) IP or hostname of the qpid server. -# Defaults to '127.0.0.1'. -# -# [*qpid_port*] -# (Optional) Port of the qpid server. -# Defaults to 5672. -# -# [*qpid_username*] -# (Optional) User to connect to the qpid server. -# Defaults to 'guest'. -# -# [*qpid_password*] -# (Optional) Password to connect to the qpid server. -# Defaults to 'guest'. -# -# [*qpid_sasl_mechanisms*] -# (Optional) String of SASL mechanisms to use. -# Defaults to ''. -# -# [*qpid_heartbeat*] -# (Optional) Seconds between connection keepalive heartbeats. -# Defaults to 60. -# -# [*qpid_protocol*] -# (Optional) Protocol to use for qpid (tcp/ssl). -# Defaults to tcp. -# -# [*qpid_tcp_nodelay*] -# (Optional) Whether to disable the Nagle algorithm. -# Defaults to true. -# -# [*qpid_receiver_capacity*] -# (Optional) Number of prefetched messages to hold. -# Defaults to 1. -# -# [*qpid_topology_version*] -# (Optional) Version of qpid toplogy to use. -# Defaults to 2. -# -# [*notification_topics*] -# (Optional) Topic to use for notifications. -# Defaults to 'notifications'. -# -# [*control_exchange*] -# (Optional) The default exchange to scope topics. -# Defaults to 'openstack'. -# -# == DEPRECATED PARAMETERS -# -# [*kombu_ssl_version*] -# (optional) SSL version to use (valid only if SSL enabled). -# Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be -# available on some distributions. -# Defaults to undef -# -# [*kombu_ssl_keyfile*] -# (Optional) SSL key file (valid only if SSL enabled). -# Defaults to undef. -# -# [*kombu_ssl_certfile*] -# (Optional) SSL cert file (valid only if SSL enabled). -# Defaults to undef. -# -# [*kombu_ssl_ca_certs*] -# (optional) SSL certification authority file (valid only if SSL enabled). -# Defaults to undef. -# -# [*kombu_reconnect_delay*] -# (Optional) Backoff on cancel notification (valid only if SSL enabled). -# Defaults to undef -# -class sahara::notify::qpid( - $durable_queues = false, - $qpid_hostname = 'localhost', - $qpid_port = 5672, - $qpid_username = 'guest', - $qpid_password = 'guest', - $qpid_sasl_mechanisms = '', - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, - $qpid_receiver_capacity = 1, - $qpid_topology_version = 2, - $notification_topics = 'notifications', - $control_exchange = 'openstack', - # DEPRECATED PARAMETERS - $kombu_ssl_version = undef, - $kombu_ssl_keyfile = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_ca_certs = undef, - $kombu_reconnect_delay = undef, -) { - - warning('This class is deprecated. Use sahara::init for configuration rpc options instead') - warning('This class is deprecated. Use sahara::notify for configuration ceilometer notifications instead') - - if $kombu_ssl_version { - warning('The kombu_ssl_version parameter is deprecated and has no effect.') - } - - if $kombu_ssl_keyfile { - warning('The kombu_ssl_keyfile parameter is deprecated and has no effect.') - } - - if $kombu_ssl_certfile { - warning('The kombu_ssl_certfile is deprecated and has no effect.') - } - - if $kombu_ssl_ca_certs { - warning('The kombu_ssl_ca_certs is deprecated and has no effect.') - } - - if $kombu_reconnect_delay { - warning('The kombu_reconnect_delay is deprecated and has no effect.') - } - - sahara_config { - 'DEFAULT/rpc_backend': value => 'qpid'; - 'oslo_messaging_qpid/qpid_hosts': value => '$qpid_hostname:$qpid_port'; - - 'oslo_messaging_qpid/amqp_durable_queues': value => $durable_queues; - 'oslo_messaging_qpid/qpid_hostname': value => $qpid_hostname; - 'oslo_messaging_qpid/qpid_port': value => $qpid_port; - 'oslo_messaging_qpid/qpid_username': value => $qpid_username; - 'oslo_messaging_qpid/qpid_password': - value => $qpid_password, - secret => true; - 'oslo_messaging_qpid/qpid_sasl_mechanisms': value => $qpid_sasl_mechanisms; - 'oslo_messaging_qpid/qpid_heartbeat': value => $qpid_heartbeat; - 'oslo_messaging_qpid/qpid_protocol': value => $qpid_protocol; - 'oslo_messaging_qpid/qpid_tcp_nodelay': value => $qpid_tcp_nodelay; - 'oslo_messaging_qpid/qpid_receiver_capacity': value => $qpid_receiver_capacity; - 'oslo_messaging_qpid/qpid_topology_version': value => $qpid_topology_version; - 'DEFAULT/notification_topics': value => $notification_topics; - 'DEFAULT/control_exchange': value => $control_exchange; - } -} diff --git a/sahara/manifests/notify/rabbitmq.pp b/sahara/manifests/notify/rabbitmq.pp deleted file mode 100644 index dec4974ab..000000000 --- a/sahara/manifests/notify/rabbitmq.pp +++ /dev/null @@ -1,178 +0,0 @@ -# == Class: sahara::notify::rabbitmq -# -# RabbitMQ broker configuration for Sahara -# Deprecated class -# -# === Parameters -# -# [*durable_queues*] -# (Optional) Use durable queues in broker. -# Defaults to false. -# -# [*rabbit_host*] -# (Optional) IP or hostname of the rabbit server. -# Defaults to '127.0.0.1'. -# -# [*rabbit_port*] -# (Optional) Port of the rabbit server. -# Defaults to 5672. -# -# [*rabbit_hosts*] -# (Optional) IP or hostname of the rabbits servers. -# comma separated array (ex: ['1.0.0.10:5672','1.0.0.11:5672']) -# Defaults to false. -# -# [*rabbit_use_ssl*] -# (Optional) Connect over SSL for RabbitMQ. -# Defaults to false. -# -# [*rabbit_userid*] -# (Optional) User to connect to the rabbit server. -# Defaults to 'guest'. -# -# [*rabbit_password*] -# (Optional) Password to connect to the rabbit server. -# Defaults to 'guest'. -# -# [*rabbit_login_method*] -# (Optional) Method to auth with the rabbit server. -# Defaults to 'AMQPLAIN'. -# -# [*rabbit_virtual_host*] -# (Optional) Virtual host to use. -# Defaults to '/'. -# -# [*rabbit_retry_interval*] -# (Optional) Reconnection attempt frequency for rabbit. -# Defaults to 1. -# -# [*rabbit_retry_backoff*] -# (Optional) Backoff between reconnection attempts for rabbit. -# Defaults to 2. -# -# [*rabbit_max_retries*] -# (Optional) Number of times to retry (0 == no limit). -# Defaults to 0. -# -# [*notification_topics*] -# (Optional) Topic to use for notifications. -# Defaults to 'notifications'. -# -# [*control_exchange*] -# (Optional) The default exchange to scope topics. -# Defaults to 'openstack'. -# -# [*kombu_ssl_version*] -# (optional) SSL version to use (valid only if SSL enabled). -# Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be -# available on some distributions. -# Defaults to 'TLSv1' -# -# [*kombu_ssl_keyfile*] -# (Optional) SSL key file (valid only if SSL enabled). -# Defaults to undef. -# -# [*kombu_ssl_certfile*] -# (Optional) SSL cert file (valid only if SSL enabled). -# Defaults to undef. -# -# [*kombu_ssl_ca_certs*] -# (Optional) SSL certification authority file (valid only if SSL enabled). -# Defaults to undef -# -# [*kombu_reconnect_delay*] -# (Optional) Backoff on cancel notification (valid only if SSL enabled). -# Defaults to '1.0'; floating-point value. -# -class sahara::notify::rabbitmq( - $durable_queues = false, - $rabbit_host = 'localhost', - $rabbit_hosts = false, - $rabbit_port = 5672, - $rabbit_use_ssl = false, - $rabbit_userid = 'guest', - $rabbit_password = 'guest', - $rabbit_login_method = 'AMQPLAIN', - $rabbit_virtual_host = '/', - $rabbit_retry_interval = 1, - $rabbit_retry_backoff = 2, - $rabbit_max_retries = 0, - $notification_topics = 'notifications', - $control_exchange = 'openstack', - $kombu_ssl_version = 'TLSv1', - $kombu_ssl_keyfile = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_ca_certs = undef, - $kombu_reconnect_delay = '1.0', -) { - - warning('This class is deprecated. Use sahara::init for configuration rpc options instead') - warning('This class is deprecated. Use sahara::notify for configuration ceilometer notifications instead') - - if $rabbit_use_ssl { - - if $kombu_ssl_ca_certs { - sahara_config { 'oslo_messaging_rabbit/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs; } - } else { - sahara_config { 'oslo_messaging_rabbit/kombu_ssl_ca_certs': ensure => absent; } - } - - if $kombu_ssl_certfile or $kombu_ssl_keyfile { - sahara_config { - 'oslo_messaging_rabbit/kombu_ssl_certfile': value => $kombu_ssl_certfile; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': value => $kombu_ssl_keyfile; - } - } else { - sahara_config { - 'oslo_messaging_rabbit/kombu_ssl_certfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': ensure => absent; - } - } - - if $kombu_ssl_version { - sahara_config { 'oslo_messaging_rabbit/kombu_ssl_version': value => $kombu_ssl_version; } - } else { - sahara_config { 'oslo_messaging_rabbit/kombu_ssl_version': ensure => absent; } - } - - } else { - sahara_config { - 'oslo_messaging_rabbit/kombu_ssl_ca_certs': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_certfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_keyfile': ensure => absent; - 'oslo_messaging_rabbit/kombu_ssl_version': ensure => absent; - } - } - - if $rabbit_hosts { - sahara_config { - 'oslo_messaging_rabbit/rabbit_hosts': value => join($rabbit_hosts, ','); - 'oslo_messaging_rabbit/rabbit_ha_queues': value => true; - } - } else { - sahara_config { - 'oslo_messaging_rabbit/rabbit_host': value => $rabbit_host; - 'oslo_messaging_rabbit/rabbit_port': value => $rabbit_port; - 'oslo_messaging_rabbit/rabbit_ha_queues': value => false; - 'oslo_messaging_rabbit/rabbit_hosts': value => "${rabbit_host}:${rabbit_port}"; - } - } - - sahara_config { - 'DEFAULT/rpc_backend': value => 'rabbit'; - 'oslo_messaging_rabbit/amqp_durable_queues': value => $durable_queues; - 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl; - 'oslo_messaging_rabbit/rabbit_userid': value => $rabbit_userid; - 'oslo_messaging_rabbit/rabbit_password': - value => $rabbit_password, - secret => true; - 'oslo_messaging_rabbit/rabbit_login_method': value => $rabbit_login_method; - 'oslo_messaging_rabbit/rabbit_virtual_host': value => $rabbit_virtual_host; - 'oslo_messaging_rabbit/rabbit_retry_interval': value => $rabbit_retry_interval; - 'oslo_messaging_rabbit/rabbit_retry_backoff': value => $rabbit_retry_backoff; - 'oslo_messaging_rabbit/rabbit_max_retries': value => $rabbit_max_retries; - 'DEFAULT/notification_topics': value => $notification_topics; - 'DEFAULT/control_exchange': value => $control_exchange; - 'oslo_messaging_rabbit/kombu_reconnect_delay': value => $kombu_reconnect_delay; - } -} diff --git a/sahara/manifests/notify/zeromq.pp b/sahara/manifests/notify/zeromq.pp deleted file mode 100644 index 3a17fbf26..000000000 --- a/sahara/manifests/notify/zeromq.pp +++ /dev/null @@ -1,108 +0,0 @@ -# == Class: sahara::notify::zeromq -# -# Zeromq broker configuration for Sahara -# Deprecated class -# -# === Parameters -# -# [*zeromq_bind_address*] -# (Optional) Bind address; wildcard, ethernet, or ip address. -# Defaults to '*'. -# -# [*zeromq_port*] -# (Optional) Receiver listening port. -# Defaults to 9501. -# -# [*zeromq_contexts*] -# (Optional) Number of contexsts for zeromq. -# Defaults to 1. -# -# [*zeromq_topic_backlog*] -# (Optional) Number of incoming messages to buffer. -# Defaults to 'None'. -# -# [*zeromq_ipc_dir*] -# (Optional) Directory for zeromq IPC. -# Defaults to '/var/run/openstack'. -# -# [*zeromq_host*] -# (Optional) Name of the current node: hostname, FQDN, or IP. -# Defaults to 'sahara'. -# -# [*cast_timeout*] -# (Optional) TTL for zeromq messages. -# Defaults to 30. -# -# == DEPRECATED PARAMETERS -# -# [*kombu_ssl_version*] -# (optional) SSL version to use (valid only if SSL enabled). -# Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be -# available on some distributions. -# Defaults to undef -# -# [*kombu_ssl_keyfile*] -# (Optional) SSL key file (valid only if SSL enabled). -# Defaults to undef. -# -# [*kombu_ssl_certfile*] -# (Optional) SSL cert file (valid only if SSL enabled). -# Defaults to undef. -# -# [*kombu_ssl_ca_certs*] -# (optional) SSL certification authority file (valid only if SSL enabled). -# Defaults to undef. -# -# [*kombu_reconnect_delay*] -# (Optional) Backoff on cancel notification (valid only if SSL enabled). -# Defaults to undef -# -class sahara::notify::zeromq( - $zeromq_bind_address = '*', - $zeromq_port = 9501, - $zeromq_contexts = 1, - $zeromq_topic_backlog = 'None', - $zeromq_ipc_dir = '/var/run/openstack', - $zeromq_host = 'sahara', - $cast_timeout = 30, - # DEPRECATED PARAMETERS - $kombu_ssl_version = undef, - $kombu_ssl_keyfile = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_ca_certs = undef, - $kombu_reconnect_delay = undef, -) { - - warning('This class is deprecated. Use sahara::init for configuration rpc options instead') - - if $kombu_ssl_version { - warning('The kombu_ssl_version parameter is deprecated and has no effect.') - } - - if $kombu_ssl_keyfile { - warning('The kombu_ssl_keyfile parameter is deprecated and has no effect.') - } - - if $kombu_ssl_certfile { - warning('The kombu_ssl_certfile is deprecated and has no effect.') - } - - if $kombu_ssl_ca_certs { - warning('The kombu_ssl_ca_certs is deprecated and has no effect.') - } - - if $kombu_reconnect_delay { - warning('The kombu_reconnect_delay is deprecated and has no effect.') - } - - sahara_config { - 'DEFAULT/rpc_backend': value => 'zmq'; - 'DEFAULT/rpc_zmq_bind_address': value => $zeromq_bind_address; - 'DEFAULT/rpc_zmq_port': value => $zeromq_port; - 'DEFAULT/rpc_zmq_contexts': value => $zeromq_contexts; - 'DEFAULT/rpc_zmq_topic_backlog': value => $zeromq_topic_backlog; - 'DEFAULT/rpc_zmq_ipc_dir': value => $zeromq_ipc_dir; - 'DEFAULT/rpc_zmq_host': value => $zeromq_host; - 'DEFAULT/rpc_cast_timeout': value => $cast_timeout; - } -} diff --git a/sahara/manifests/params.pp b/sahara/manifests/params.pp index e830b0db9..3c328754b 100644 --- a/sahara/manifests/params.pp +++ b/sahara/manifests/params.pp @@ -3,7 +3,6 @@ # Parameters for puppet-sahara # class sahara::params { - $dbmanage_command = 'sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head' $client_package_name = 'python-saharaclient' case $::osfamily { @@ -15,6 +14,7 @@ $all_service_name = 'openstack-sahara-all' $api_service_name = 'openstack-sahara-api' $engine_service_name = 'openstack-sahara-engine' + $pymysql_package_name = undef } 'Debian': { $common_package_name = 'sahara-common' @@ -24,6 +24,7 @@ $all_service_name = 'sahara' $api_service_name = 'sahara-api' $engine_service_name = 'sahara-engine' + $pymysql_package_name = 'python-pymysql' } default: { fail("Unsupported osfamily: ${::osfamily} operatingsystem: ${::operatingsystem}") diff --git a/sahara/manifests/service/all.pp b/sahara/manifests/service/all.pp index 2127344f3..b40706083 100644 --- a/sahara/manifests/service/all.pp +++ b/sahara/manifests/service/all.pp @@ -22,7 +22,8 @@ $package_ensure = 'present', ) { - require ::sahara + include ::sahara::policy + include ::sahara::params Sahara_config<||> ~> Service['sahara-all'] Class['sahara::policy'] ~> Service['sahara-all'] diff --git a/sahara/manifests/service/api.pp b/sahara/manifests/service/api.pp index 1979c3c7c..155107912 100644 --- a/sahara/manifests/service/api.pp +++ b/sahara/manifests/service/api.pp @@ -7,7 +7,7 @@ # [*api_workers*] # (Optional) Number of workers for Sahara API service # 0 means all-in-one-thread configuration -# Defaults to 0 +# Defaults to $::os_service_default # # [*enabled*] # (Optional) Should the service be enabled. @@ -22,13 +22,14 @@ # Defaults to 'present' # class sahara::service::api ( - $api_workers = 0, + $api_workers = $::os_service_default, $enabled = true, $manage_service = true, $package_ensure = 'present', ) { - require ::sahara + include ::sahara::policy + include ::sahara::params Sahara_config<||> ~> Service['sahara-api'] Class['sahara::policy'] ~> Service['sahara-api'] diff --git a/sahara/manifests/service/engine.pp b/sahara/manifests/service/engine.pp index 89f3b9efe..832967288 100644 --- a/sahara/manifests/service/engine.pp +++ b/sahara/manifests/service/engine.pp @@ -22,10 +22,9 @@ $package_ensure = 'present', ) { - require ::sahara + include ::sahara::params Sahara_config<||> ~> Service['sahara-engine'] - Class['sahara::policy'] ~> Service['sahara-engine'] package { 'sahara-engine': ensure => $package_ensure, diff --git a/sahara/metadata.json b/sahara/metadata.json index ee52a0d43..bb878f28a 100644 --- a/sahara/metadata.json +++ b/sahara/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-sahara", - "version": "6.1.0", + "version": "7.0.0", "author": "Red Hat and OpenStack Contributors", "summary": "Puppet module for OpenStack Sahara", "license": "Apache-2.0", @@ -28,10 +28,10 @@ "description": "Installs and configures OpenStack Sahara (Data Processing).", "dependencies": [ { "name": "duritong/sysctl", "version_requirement": ">=0.0.1 <1.0.0" }, - { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, { "name": "puppetlabs/postgresql", "version_requirement": ">=3.0.0" }, - { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=7.0.0 <8.0.0" } ] } diff --git a/sahara/spec/acceptance/basic_sahara_spec.rb b/sahara/spec/acceptance/basic_sahara_spec.rb index f39e3cf97..000060239 100644 --- a/sahara/spec/acceptance/basic_sahara_spec.rb +++ b/sahara/spec/acceptance/basic_sahara_spec.rb @@ -36,7 +36,7 @@ class { '::sahara': rabbit_password => 'an_even_bigger_secret', rabbit_host => '127.0.0.1', rpc_backend => 'rabbit', - database_connection => 'mysql://sahara:a_big_secret@127.0.0.1/sahara?charset=utf8', + database_connection => 'mysql+pymysql://sahara:a_big_secret@127.0.0.1/sahara?charset=utf8', admin_password => 'a_big_secret', } class { '::sahara::service::api': } diff --git a/sahara/spec/classes/sahara_all_spec.rb b/sahara/spec/classes/sahara_all_spec.rb index f604f8c70..d1833f033 100644 --- a/sahara/spec/classes/sahara_all_spec.rb +++ b/sahara/spec/classes/sahara_all_spec.rb @@ -2,22 +2,14 @@ describe 'sahara::service::all' do - shared_examples_for 'sahara-all' do - context 'require main class' do - it { is_expected.to contain_class('sahara') } - end - end - context 'on Debian platforms' do let :facts do - { + @default_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian' - } + }) end - it_configures 'sahara-all' - it_behaves_like 'generic sahara service', { :name => 'sahara-all', :package_name => 'sahara', @@ -26,11 +18,9 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end - it_configures 'sahara-all' - it_behaves_like 'generic sahara service', { :name => 'sahara-all', :package_name => 'openstack-sahara', diff --git a/sahara/spec/classes/sahara_api_spec.rb b/sahara/spec/classes/sahara_api_spec.rb index eab6d11f9..d5df33cf1 100644 --- a/sahara/spec/classes/sahara_api_spec.rb +++ b/sahara/spec/classes/sahara_api_spec.rb @@ -4,12 +4,8 @@ shared_examples_for 'sahara-api' do - context 'require main class' do - it { is_expected.to contain_class('sahara') } - end - context 'default params' do - it { is_expected.to contain_sahara_config('DEFAULT/api_workers').with_value('0') } + it { is_expected.to contain_sahara_config('DEFAULT/api_workers').with_value('') } end context 'passing params' do @@ -26,10 +22,10 @@ context 'on Debian platforms' do let :facts do - { + @default_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian' - } + }) end it_configures 'sahara-api' @@ -42,7 +38,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'sahara-api' diff --git a/sahara/spec/classes/sahara_db_spec.rb b/sahara/spec/classes/sahara_db_spec.rb index c39f3b6c9..f8a0671dd 100644 --- a/sahara/spec/classes/sahara_db_spec.rb +++ b/sahara/spec/classes/sahara_db_spec.rb @@ -4,18 +4,18 @@ shared_examples 'sahara::db' do context 'with default parameters' do - it { is_expected.to contain_sahara_config('database/connection').with_value('mysql://sahara:secrete@localhost:3306/sahara').with_secret(true) } - it { is_expected.to contain_sahara_config('database/idle_timeout').with_value('3600') } - it { is_expected.to contain_sahara_config('database/min_pool_size').with_value('1') } - it { is_expected.to contain_sahara_config('database/max_retries').with_value('10') } - it { is_expected.to contain_sahara_config('database/retry_interval').with_value('10') } - it { is_expected.to contain_sahara_config('database/max_pool_size').with_value('10') } - it { is_expected.to contain_sahara_config('database/max_overflow').with_value('20') } + it { is_expected.to contain_sahara_config('database/connection').with_value('mysql+pymysql://sahara:secrete@localhost:3306/sahara').with_secret(true) } + it { is_expected.to contain_sahara_config('database/idle_timeout').with_value('') } + it { is_expected.to contain_sahara_config('database/min_pool_size').with_value('') } + it { is_expected.to contain_sahara_config('database/max_retries').with_value('') } + it { is_expected.to contain_sahara_config('database/retry_interval').with_value('') } + it { is_expected.to contain_sahara_config('database/max_pool_size').with_value('') } + it { is_expected.to contain_sahara_config('database/max_overflow').with_value('') } end context 'with specific parameters' do let :params do - { :database_connection => 'mysql://sahara:sahara@localhost/sahara', + { :database_connection => 'mysql+pymysql://sahara:sahara@localhost/sahara', :database_idle_timeout => '3601', :database_min_pool_size => '2', :database_max_retries => '11', @@ -25,7 +25,7 @@ } end - it { is_expected.to contain_sahara_config('database/connection').with_value('mysql://sahara:sahara@localhost/sahara').with_secret(true) } + it { is_expected.to contain_sahara_config('database/connection').with_value('mysql+pymysql://sahara:sahara@localhost/sahara').with_secret(true) } it { is_expected.to contain_sahara_config('database/idle_timeout').with_value('3601') } it { is_expected.to contain_sahara_config('database/min_pool_size').with_value('2') } it { is_expected.to contain_sahara_config('database/max_retries').with_value('11') } @@ -34,6 +34,14 @@ it { is_expected.to contain_sahara_config('database/max_overflow').with_value('21') } end + context 'with MySQL-python library as backend package' do + let :params do + { :database_connection => 'mysql://sahara:sahara@localhost/sahara' } + end + + it { is_expected.to contain_sahara_config('database/connection').with_value('mysql://sahara:sahara@localhost/sahara').with_secret(true) } + end + context 'with postgresql backend' do let :params do { :database_connection => 'postgresql://sahara:sahara@localhost/sahara', } @@ -52,27 +60,52 @@ it_raises 'a Puppet::Error', /validate_re/ end + + context 'with incorrect database_connection string' do + let :params do + { :database_connection => 'foo+pymysql://sahara:sahara@localhost/sahara', } + end + + it_raises 'a Puppet::Error', /validate_re/ + end end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian', + @default_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian', :operatingsystemrelease => 'jessie', - } + }) end it_configures 'sahara::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://sahara:sahara@localhost/sahara' } + end + + it { is_expected.to contain_package('sahara-backend-package').with({ :ensure => 'present', :name => 'python-pymysql' }) } + end + end context 'on Redhat platforms' do let :facts do - { :osfamily => 'RedHat', + @default_facts.merge({ :osfamily => 'RedHat', :operatingsystemrelease => '7.1', - } + }) end it_configures 'sahara::db' + + context 'using pymysql driver' do + let :params do + { :database_connection => 'mysql+pymysql://sahara:sahara@localhost/sahara' } + end + + it { is_expected.not_to contain_package('sahara-backend-package') } + end end end diff --git a/sahara/spec/classes/sahara_db_sync_spec.rb b/sahara/spec/classes/sahara_db_sync_spec.rb index 6faad5ec7..54bcf6a60 100644 --- a/sahara/spec/classes/sahara_db_sync_spec.rb +++ b/sahara/spec/classes/sahara_db_sync_spec.rb @@ -14,8 +14,27 @@ ) end + describe 'overriding extra_params' do + let :params do + { + :extra_params => '--config-file /etc/sahara/sahara01.conf', + } + end + + it { + is_expected.to contain_exec('sahara-dbmanage').with( + :command => 'sahara-db-manage --config-file /etc/sahara/sahara01.conf upgrade head', + :path => '/usr/bin', + :user => 'sahara', + :refreshonly => 'true', + :logoutput => 'on_failure' + ) + } + end + end + context 'on a RedHat osfamily' do let :facts do { diff --git a/sahara/spec/classes/sahara_engine_spec.rb b/sahara/spec/classes/sahara_engine_spec.rb index 79ce3360b..64cbac54e 100644 --- a/sahara/spec/classes/sahara_engine_spec.rb +++ b/sahara/spec/classes/sahara_engine_spec.rb @@ -2,19 +2,11 @@ describe 'sahara::service::engine' do - shared_examples_for 'sahara-engine' do - context 'require main class' do - it { is_expected.to contain_class('sahara') } - end - end - context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end - it_configures 'sahara-engine' - it_behaves_like 'generic sahara service', { :name => 'sahara-engine', :package_name => 'sahara-engine', @@ -23,11 +15,9 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end - it_configures 'sahara-engine' - it_behaves_like 'generic sahara service', { :name => 'sahara-engine', :package_name => 'openstack-sahara-engine', diff --git a/sahara/spec/classes/sahara_init_spec.rb b/sahara/spec/classes/sahara_init_spec.rb index c23107950..abf7ebb36 100644 --- a/sahara/spec/classes/sahara_init_spec.rb +++ b/sahara/spec/classes/sahara_init_spec.rb @@ -14,6 +14,7 @@ shared_examples_for 'sahara' do it { is_expected.to contain_class('sahara::params') } it { is_expected.to contain_class('sahara::db') } + it { is_expected.to contain_class('sahara::logging') } it { is_expected.to contain_class('sahara::policy') } it { is_expected.to contain_class('mysql::bindings::python') } it { is_expected.to contain_exec('sahara-dbmanage') } @@ -21,16 +22,16 @@ shared_examples_for 'sahara config' do context 'with default params' do - it { is_expected.to contain_sahara_config('DEFAULT/use_neutron').with_value('false') } - it { is_expected.to contain_sahara_config('DEFAULT/use_floating_ips').with_value('true') } - it { is_expected.to contain_sahara_config('DEFAULT/host').with_value('0.0.0.0') } - it { is_expected.to contain_sahara_config('DEFAULT/port').with_value('8386') } + it { is_expected.to contain_sahara_config('DEFAULT/use_neutron').with_value('') } + it { is_expected.to contain_sahara_config('DEFAULT/use_floating_ips').with_value('') } + it { is_expected.to contain_sahara_config('DEFAULT/host').with_value('') } + it { is_expected.to contain_sahara_config('DEFAULT/port').with_value('') } it { is_expected.to contain_sahara_config('keystone_authtoken/auth_uri').with_value('http://127.0.0.1:5000/v2.0/') } it { is_expected.to contain_sahara_config('keystone_authtoken/identity_uri').with_value('http://127.0.0.1:35357/') } it { is_expected.to contain_sahara_config('keystone_authtoken/admin_user').with_value('sahara') } it { is_expected.to contain_sahara_config('keystone_authtoken/admin_tenant_name').with_value('services') } it { is_expected.to contain_sahara_config('keystone_authtoken/admin_password').with_value('secrete').with_secret(true) } - it { is_expected.to contain_sahara_config('DEFAULT/plugins').with_ensure('absent') } + it { is_expected.to contain_sahara_config('DEFAULT/plugins').with_value('') } end context 'with passing params' do @@ -68,14 +69,14 @@ it { is_expected.to contain_sahara_config('DEFAULT/rpc_backend').with_value('rabbit') } context 'when defaults with rabbit pass specified' do - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_password').with_value('guest').with_secret(true) } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_userid').with_value('guest') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_host').with_value('localhost') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_port').with_value('5672') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_hosts').with_value('localhost:5672') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_ha_queues').with_value('false') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value('/') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/amqp_durable_queues').with_value('false') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_password').with_value('').with_secret(true) } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_userid').with_value('') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_host').with_value('') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_port').with_value('') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_ha_queues').with_value('') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value('') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/amqp_durable_queues').with_value('') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_hosts').with_ensure('absent') } end context 'when passing params' do @@ -96,6 +97,7 @@ it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_port').with_value('5673') } it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_ha_queues').with_value('true') } it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/amqp_durable_queues').with_value('true') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_hosts').with_ensure('absent') } end context 'with rabbit ssl cert parameters' do @@ -106,6 +108,7 @@ :kombu_ssl_ca_certs => '/etc/ca.cert', :kombu_ssl_certfile => '/etc/certfile', :kombu_ssl_keyfile => '/etc/key', + :kombu_ssl_version => 'TLSv1', }) end @@ -116,34 +119,19 @@ it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('TLSv1') } end - context 'with rabbit ssl cert parameters' do - before do - params.merge!({ - :rabbit_password => 'pass', - :rabbit_use_ssl => 'true', - }) - end - - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('true') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('TLSv1') } - end - context 'with rabbit ssl disabled' do before do params.merge!({ :rabbit_password => 'pass', :rabbit_use_ssl => false, - :kombu_ssl_ca_certs => 'undef', - :kombu_ssl_certfile => 'undef', - :kombu_ssl_keyfile => 'undef' }) end it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('false') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('') } end context 'when passing params for single rabbit host' do @@ -160,8 +148,8 @@ it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_userid').with_value('guest2') } it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_host').with_value('localhost2') } it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_port').with_value('5673') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_hosts').with_value('localhost2:5673') } it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_ha_queues').with_value('true') } + it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_hosts').with_ensure('absent') } end context 'when passing params for multiple rabbit hosts' do @@ -181,56 +169,6 @@ end end - context 'with qpid rpc' do - before do - params.merge!({ :rpc_backend => 'qpid' }) - end - - it { is_expected.to contain_sahara_config('DEFAULT/rpc_backend').with_value('qpid') } - - context 'when default params' do - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_username').with_value('guest') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_password').with_value('guest').with_secret(true) } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_port').with_value('5672') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_hosts').with_value('localhost:5672') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/amqp_durable_queues').with_value('false') } - end - - context 'when passing params' do - before do - params.merge!({ - :qpid_password => 'pass', - :qpid_username => 'guest2', - :qpid_hostname => 'localhost2', - :qpid_port => '5673', - :rpc_backend => 'qpid', - :amqp_durable_queues => 'true', - }) - end - - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_username').with_value('guest2') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_password').with_value('pass').with_secret(true) } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost2') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_port').with_value('5673') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/amqp_durable_queues').with_value('true') } - end - - context 'when passing params for multiple qpid hosts' do - before do - params.merge!({ - :qpid_hosts => ['nonlocalhost3:5673', 'nonlocalhost4:5673'], - :rpc_backend => 'qpid', - }) - end - - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_hosts').with_value('nonlocalhost3:5673,nonlocalhost4:5673') } - it { is_expected.to_not contain_sahara_config('oslo_messaging_qpid/qpid_port') } - it { is_expected.to_not contain_sahara_config('oslo_messaging_qpid/qpid_hostname') } - end - end - context 'with zmq rpc' do before do params.merge!({ :rpc_backend => 'zmq' }) @@ -239,20 +177,18 @@ it { is_expected.to contain_sahara_config('DEFAULT/rpc_backend').with_value('zmq') } context 'with default params' do - it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_bind_address').with_value('*') } - it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_port').with_value('9501') } - it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_contexts').with_value('1') } - it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_topic_backlog').with_value('None') } - it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_ipc_dir').with_value('/var/run/openstack') } + it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_bind_address').with_value('') } + it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_contexts').with_value('') } + it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_topic_backlog').with_value('') } + it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_ipc_dir').with_value('') } it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_host').with_value('sahara') } - it { is_expected.to contain_sahara_config('DEFAULT/rpc_cast_timeout').with_value('30') } + it { is_expected.to contain_sahara_config('DEFAULT/rpc_cast_timeout').with_value('') } end context 'when passing params' do before do params.merge!({ :zeromq_bind_address => '*', - :zeromq_port => '9501', :zeromq_host => 'localhost', :cast_timeout => '30', :rpc_backend => 'zmq', @@ -260,7 +196,6 @@ end it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_bind_address').with_value('*') } - it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_port').with_value('9501') } it { is_expected.to contain_sahara_config('DEFAULT/rpc_zmq_host').with_value('localhost') } it { is_expected.to contain_sahara_config('DEFAULT/rpc_cast_timeout').with_value('30') } end @@ -269,9 +204,9 @@ shared_examples_for 'sahara ssl' do context 'without ssl' do - it { is_expected.to contain_sahara_config('ssl/ca_file').with_ensure('absent') } - it { is_expected.to contain_sahara_config('ssl/cert_file').with_ensure('absent') } - it { is_expected.to contain_sahara_config('ssl/key_file').with_ensure('absent') } + it { is_expected.to_not contain_sahara_config('ssl/ca_file') } + it { is_expected.to_not contain_sahara_config('ssl/cert_file') } + it { is_expected.to_not contain_sahara_config('ssl/key_file') } end context 'with ssl' do @@ -319,42 +254,12 @@ end end - shared_examples_for 'with deprecated service' do |service| - context 'with overridden parameters' do - let :params do - { :enabled => true, - :manage_service => true } - end - - it 'installs package and service' do - is_expected.to contain_class('sahara::service::all') - is_expected.to contain_package('sahara-all').with({ - :name => "#{service[:package_name]}", - :notify => ['Service[sahara-all]', 'Exec[sahara-dbmanage]'] - }) - is_expected.to contain_service('sahara-all').with({ - :name => "#{service[:service_name]}", - :ensure => 'running', - :hasstatus => true, - :enable => true - }) - end - end - - context 'with default parameters' do - it 'does not control service state' do - is_expected.to_not contain_service('sahara-all') - is_expected.to_not contain_package('sahara-all') - end - end - end - context 'on Debian platforms' do let :facts do - { + @default_facts.merge({ :osfamily => 'Debian', :operatingsystem => 'Debian' - } + }) end it_configures 'sahara' @@ -362,14 +267,11 @@ it_configures 'sahara ssl' it_configures 'sahara rpc_backend' - it_behaves_like 'with deprecated service', { - :package_name => 'sahara', - :service_name => 'sahara' } end context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'sahara' @@ -377,8 +279,5 @@ it_configures 'sahara ssl' it_configures 'sahara rpc_backend' - it_behaves_like 'with deprecated service', { - :package_name => 'openstack-sahara', - :service_name => 'openstack-sahara-all' } end end diff --git a/sahara/spec/classes/sahara_logging_spec.rb b/sahara/spec/classes/sahara_logging_spec.rb index dc456ebc2..1a02851e2 100644 --- a/sahara/spec/classes/sahara_logging_spec.rb +++ b/sahara/spec/classes/sahara_logging_spec.rb @@ -61,10 +61,10 @@ shared_examples_for 'basic logging options defaults' do context 'with defaults' do - it { is_expected.to contain_sahara_config('DEFAULT/use_stderr').with_value(true) } - it { is_expected.to contain_sahara_config('DEFAULT/use_syslog').with_value(false) } - it { is_expected.to contain_sahara_config('DEFAULT/debug').with_value(false) } - it { is_expected.to contain_sahara_config('DEFAULT/verbose').with_value(false) } + it { is_expected.to contain_sahara_config('DEFAULT/use_stderr').with_value('') } + it { is_expected.to contain_sahara_config('DEFAULT/use_syslog').with_value('') } + it { is_expected.to contain_sahara_config('DEFAULT/debug').with_value('') } + it { is_expected.to contain_sahara_config('DEFAULT/verbose').with_value('') } it { is_expected.to contain_sahara_config('DEFAULT/log_dir').with_value('/var/log/sahara') } end @@ -74,7 +74,7 @@ end it { is_expected.to contain_sahara_config('DEFAULT/use_syslog').with_value(true) } - it { is_expected.to contain_sahara_config('DEFAULT/syslog_log_facility').with_value('LOG_USER') } + it { is_expected.to contain_sahara_config('DEFAULT/syslog_log_facility').with_value('') } end end @@ -120,13 +120,13 @@ :logging_exception_prefix, :log_config_append, :publish_errors, :default_log_levels, :fatal_deprecations, :instance_format, :instance_uuid_format, :log_date_format, ].each { |param| - it { is_expected.to contain_sahara_config("DEFAULT/#{param}").with_ensure('absent') } + it { is_expected.to contain_sahara_config("DEFAULT/#{param}").with_value('') } } end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + @default_facts.merge({ :osfamily => 'Debian' }) end it_configures 'sahara-logging' @@ -134,7 +134,7 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + @default_facts.merge({ :osfamily => 'RedHat' }) end it_configures 'sahara-logging' diff --git a/sahara/spec/classes/sahara_notify_qpid_spec.rb b/sahara/spec/classes/sahara_notify_qpid_spec.rb deleted file mode 100644 index 3cb51fcb0..000000000 --- a/sahara/spec/classes/sahara_notify_qpid_spec.rb +++ /dev/null @@ -1,52 +0,0 @@ -require 'spec_helper' -describe 'sahara::notify::qpid' do - let :facts do - { - :osfamily => 'Debian' - } - end - - describe 'when default params and qpid_password' do - let :params do - {:qpid_password => 'pass'} - end - - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_username').with_value('guest') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_password').with_value('pass') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_password').with_value(params[:qpid_password]).with_secret(true) } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_port').with_value('5672') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') } - end - - describe 'when passing params' do - let :params do - { - :qpid_password => 'pass2', - :qpid_username => 'guest2', - :qpid_hostname => 'localhost2', - :qpid_port => '5673' - } - end - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_username').with_value('guest2') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost2') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_port').with_value('5673') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_protocol').with_value('tcp') } - end - - describe 'when configuring with ssl' do - let :params do - { - :qpid_password => 'pass3', - :qpid_username => 'guest3', - :qpid_hostname => 'localhost3', - :qpid_port => '5671', - :qpid_protocol => 'ssl', - } - end - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_username').with_value('guest3') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_hostname').with_value('localhost3') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_port').with_value('5671') } - it { is_expected.to contain_sahara_config('oslo_messaging_qpid/qpid_protocol').with_value('ssl') } - end -end diff --git a/sahara/spec/classes/sahara_notify_rabbitmq_spec.rb b/sahara/spec/classes/sahara_notify_rabbitmq_spec.rb deleted file mode 100644 index acf1c31ee..000000000 --- a/sahara/spec/classes/sahara_notify_rabbitmq_spec.rb +++ /dev/null @@ -1,123 +0,0 @@ -require 'spec_helper' -describe 'sahara::notify::rabbitmq' do - let :facts do - { - :osfamily => 'Debian' - } - end - - describe 'when defaults with rabbit pass specified' do - let :params do - {:rabbit_password => 'pass'} - end - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_password').with_value('pass') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_password').with_value(params[:rabbit_password]).with_secret(true) } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_userid').with_value('guest') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_host').with_value('localhost') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_port').with_value('5672') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_hosts').with_value('localhost:5672') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_ha_queues').with_value('false') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/amqp_durable_queues').with_value('false') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value('/') } - it { is_expected.to contain_sahara_config('DEFAULT/control_exchange').with_value('openstack') } - it { is_expected.to contain_sahara_config('DEFAULT/notification_topics').with_value('notifications') } - end - - describe 'when passing params' do - let :params do - { - :rabbit_password => 'pass', - :rabbit_userid => 'guest2', - :rabbit_host => 'localhost2', - :rabbit_port => '5673', - :durable_queues => true, - } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_userid').with_value('guest2') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_host').with_value('localhost2') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_port').with_value('5673') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_durable_queues').with_value('true') } - end - end - - describe 'with rabbit ssl cert parameters' do - let :params do - { - :rabbit_password => 'pass', - :rabbit_use_ssl => 'true', - :kombu_ssl_ca_certs => '/etc/ca.cert', - :kombu_ssl_certfile => '/etc/certfile', - :kombu_ssl_keyfile => '/etc/key', - } - end - - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('true') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('/etc/ca.cert') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('/etc/certfile') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('/etc/key') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('TLSv1') } - end - - describe 'with rabbit ssl cert parameters' do - let :params do - { - :rabbit_password => 'pass', - :rabbit_use_ssl => 'true', - } - end - - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('true') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('TLSv1') } - end - - describe 'with rabbit ssl disabled' do - let :params do - { - :rabbit_password => 'pass', - :rabbit_use_ssl => false, - :kombu_ssl_ca_certs => 'undef', - :kombu_ssl_certfile => 'undef', - :kombu_ssl_keyfile => 'undef' - } - end - - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('false') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') } - end - - describe 'when passing params for single rabbit host' do - let :params do - { - :rabbit_password => 'pass', - :rabbit_userid => 'guest2', - :rabbit_host => 'localhost2', - :rabbit_port => '5673', - :durable_queues => true, - } - end - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_userid').with_value('guest2') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_host').with_value('localhost2') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_port').with_value('5673') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_hosts').with_value('localhost2:5673') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/amqp_durable_queues').with_value('true') } - end - - describe 'when passing params for multiple rabbit hosts' do - let :params do - { - :rabbit_password => 'pass', - :rabbit_userid => 'guest3', - :rabbit_hosts => ['nonlocalhost3:5673', 'nonlocalhost4:5673'] - } - end - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_userid').with_value('guest3') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_hosts').with_value( - 'nonlocalhost3:5673,nonlocalhost4:5673') } - it { is_expected.to contain_sahara_config('oslo_messaging_rabbit/rabbit_ha_queues').with_value('true') } - it { is_expected.to_not contain_sahara_config('oslo_messaging_rabbit/rabbit_port') } - it { is_expected.to_not contain_sahara_config('oslo_messaging_rabbit/rabbit_host') } - end - -end diff --git a/sahara/spec/classes/sahara_notify_spec.rb b/sahara/spec/classes/sahara_notify_spec.rb index ba0b9c1e5..381486933 100644 --- a/sahara/spec/classes/sahara_notify_spec.rb +++ b/sahara/spec/classes/sahara_notify_spec.rb @@ -1,19 +1,19 @@ require 'spec_helper' describe 'sahara::notify' do let :facts do - { + @default_facts.merge({ :osfamily => 'Debian' - } + }) end describe 'when defaults with notify enabled' do let :params do {:enable_notifications => 'true'} end - it { is_expected.to contain_sahara_config('DEFAULT/control_exchange').with_value('openstack') } + it { is_expected.to contain_sahara_config('DEFAULT/control_exchange').with_value('') } it { is_expected.to contain_sahara_config('DEFAULT/notification_driver').with_value('messaging') } - it { is_expected.to contain_sahara_config('DEFAULT/notification_topics').with_value('notifications') } - it { is_expected.to contain_sahara_config('DEFAULT/notification_level').with_value('INFO') } + it { is_expected.to contain_sahara_config('DEFAULT/notification_topics').with_value('') } + it { is_expected.to contain_sahara_config('DEFAULT/notification_level').with_value('') } end describe 'when passing params' do diff --git a/sahara/spec/shared_examples.rb b/sahara/spec/shared_examples.rb index 70dc4c3dd..88010d7e2 100644 --- a/sahara/spec/shared_examples.rb +++ b/sahara/spec/shared_examples.rb @@ -11,7 +11,7 @@ is_expected.to contain_package(service[:name]).with({ :name => service[:package_name], :ensure => 'present', - :notify => ["Service[#{service[:name]}]", 'Exec[sahara-dbmanage]'] + :notify => "Service[#{service[:name]}]" }) is_expected.to contain_service(service[:name]).with({ :name => service[:service_name], @@ -32,7 +32,7 @@ is_expected.to contain_package(service[:name]).with({ :name => service[:package_name], :ensure => '2014.2-1', - :notify => ["Service[#{service[:name]}]", 'Exec[sahara-dbmanage]'] + :notify => "Service[#{service[:name]}]" }) is_expected.to contain_service(service[:name]).with({ :name => service[:service_name], diff --git a/sahara/spec/spec_helper.rb b/sahara/spec/spec_helper.rb index 3df4cede1..9bc7bcf96 100644 --- a/sahara/spec/spec_helper.rb +++ b/sahara/spec/spec_helper.rb @@ -5,6 +5,9 @@ RSpec.configure do |c| c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' + c.before :each do + @default_facts = { :os_service_default => '' } + end end at_exit { RSpec::Puppet::Coverage.report! } diff --git a/sensu/README.md b/sensu/README.md index e6837108d..24720373d 100644 --- a/sensu/README.md +++ b/sensu/README.md @@ -69,7 +69,7 @@ To quickly try out Sensu, spin up a test virtual machine with Vagrant that alrea You can then access the API. - $ curl http://admin:secret@localhost:4567/info + $ curl http://admin:secret@192.168.56.10:4567/info Navigate to `192.168.56.10:3000` to use the uchiwa dashboard diff --git a/sensu/lib/puppet/provider/sensu_client_config/json.rb b/sensu/lib/puppet/provider/sensu_client_config/json.rb index 467be4017..8456bd96b 100644 --- a/sensu/lib/puppet/provider/sensu_client_config/json.rb +++ b/sensu/lib/puppet/provider/sensu_client_config/json.rb @@ -90,7 +90,7 @@ def keepalive end def keepalive=(value) - conf['client']['keepalive'] = value + conf['client']['keepalive'] = to_type(value) end def safe_mode diff --git a/sensu/lib/puppet/provider/sensu_enterprise_dashboard_config/json.rb b/sensu/lib/puppet/provider/sensu_enterprise_dashboard_config/json.rb index 78e41d3cc..8f0a5edf5 100644 --- a/sensu/lib/puppet/provider/sensu_enterprise_dashboard_config/json.rb +++ b/sensu/lib/puppet/provider/sensu_enterprise_dashboard_config/json.rb @@ -109,15 +109,15 @@ def user=(value) # Public: Retrieve the password for the Dashboard # # Returns the String password. - def password - conf['dashboard']['password'] + def pass + conf['dashboard']['pass'] end # Public: Set the Dashboard password # # Returns nothing. - def password=(value) - conf['dashboard']['password'] = value + def pass=(value) + conf['dashboard']['pass'] = value end # Public: Retrieve the Github config diff --git a/sensu/lib/puppet/type/sensu_client_config.rb b/sensu/lib/puppet/type/sensu_client_config.rb index 1c8300289..aaf6c697d 100644 --- a/sensu/lib/puppet/type/sensu_client_config.rb +++ b/sensu/lib/puppet/type/sensu_client_config.rb @@ -49,12 +49,8 @@ def insync?(is) desc "A set of attributes that configure the Sensu client socket." include PuppetX::Sensu::ToType - def is_to_s(hash = @is) - hash.keys.sort.map {|key| "#{key} => #{hash[key]}"}.join(", ") - end - - def should_to_s(hash = @should) - hash.keys.sort.map {|key| "#{key} => #{hash[key]}"}.join(", ") + munge do |value| + value.each { |k, v| value[k] = to_type(v) } end def insync?(is) @@ -88,6 +84,10 @@ def insync?(is) include PuppetX::Sensu::ToType + munge do |value| + value.each { |k, v| value[k] = to_type(v) } + end + def is_to_s(hash = @is) hash.keys.sort.map {|key| "#{key} => #{hash[key]}"}.join(", ") end @@ -116,6 +116,10 @@ def insync?(is) include PuppetX::Sensu::ToType + munge do |value| + value.each { |k, v| value[k] = to_type(v) } + end + def is_to_s(hash = @is) hash.keys.sort.map {|key| "#{key} => #{hash[key]}"}.join(", ") end diff --git a/sensu/manifests/repo/apt.pp b/sensu/manifests/repo/apt.pp index 80c95163d..d565ef7eb 100644 --- a/sensu/manifests/repo/apt.pp +++ b/sensu/manifests/repo/apt.pp @@ -36,6 +36,13 @@ 'source' => $sensu::repo_key_source, }, before => Package['sensu'], + notify => Exec['apt-update'], + } + + exec { + 'apt-update': + refreshonly => true, + command => '/usr/bin/apt-get update'; } if $sensu::enterprise { diff --git a/sensu/tests/provision_client.sh b/sensu/tests/provision_client.sh index 9ed969199..74011db02 100644 --- a/sensu/tests/provision_client.sh +++ b/sensu/tests/provision_client.sh @@ -5,7 +5,7 @@ # apt-get install -y python-software-properties wget --quiet http://apt.puppetlabs.com/puppetlabs-release-precise.deb -O /tmp/puppetlabs-release-precise.deb dpkg -i /tmp/puppetlabs-release-precise.deb -apt-get update -apt-get install -y ruby-json puppet-common #masterless puppet +apt-get update +apt-get install -y ruby-json puppet-common ruby-dev #masterless puppet sed -i '/templatedir/d' /etc/puppet/puppet.conf puppet module install sensu/sensu diff --git a/sensu/tests/provision_server.sh b/sensu/tests/provision_server.sh index e8e92fb1f..d9f15a305 100644 --- a/sensu/tests/provision_server.sh +++ b/sensu/tests/provision_server.sh @@ -5,8 +5,8 @@ # apt-get install -y python-software-properties wget --quiet http://apt.puppetlabs.com/puppetlabs-release-precise.deb -O /tmp/puppetlabs-release-precise.deb dpkg -i /tmp/puppetlabs-release-precise.deb -apt-get update -apt-get install -y ruby-json redis-server puppet-common #masterless puppet +apt-get update +apt-get install -y ruby-json redis-server puppet-common ruby-dev #masterless puppet sed -i '/templatedir/d' /etc/puppet/puppet.conf puppet module install sensu/sensu puppet module install puppetlabs/rabbitmq diff --git a/swift/CHANGELOG.md b/swift/CHANGELOG.md index 101173bda..b3bad2a0e 100644 --- a/swift/CHANGELOG.md +++ b/swift/CHANGELOG.md @@ -1,3 +1,43 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Liberty. + +####Backwards-incompatible changes +- remove tenant parameter from keystone_user + +####Features +- add tag to package and service resources +- add swift::config class +- reflect provider change in puppet-openstacklib +- keystone/auth: make service description configurable +- add support for swift-object-expirer service +- drop useless comment in authtoken.conf.erb +- improve File resources idempotency +- proxy: ceilometer httpd support +- stop managing file modes +- add support for DLO configuration +- warn that object storage parameter mount_check changes next release +- provide means to disable log_requests in config templates +- add incoming/outgoing chmod params to storage/all +- rely on autorequire for config resource ordering +- add tempauth middleware options +- add tempurl middleware options +- config resources applied after config template + +####Bugfixes +- fix swift.conf / Swift_config ordering +- make sure Facter is only executed on agent +- add a blank line to the beginning of each filter + +####Maintenance +- initial msync run for all Puppet OpenStack modules +- spec: Enable webmock connect to IPv4 link-local +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check +- acceptance: use common bits from puppet-openstack-integration +- fix rspec 3.x syntax + ##2015-10-10 - 6.1.0 ###Summary diff --git a/swift/README.md b/swift/README.md index 79b539e62..ba7491a40 100644 --- a/swift/README.md +++ b/swift/README.md @@ -1,7 +1,7 @@ swift ======= -6.1.0 - 2015.1 - Kilo +7.0.0 - 2015.2 - Liberty #### Table of Contents @@ -22,9 +22,9 @@ The swift module is a part of [OpenStack](https://github.com/openstack), an effo Module Description ------------------ -The swift module is a thorough attempt to make Puppet capable of managing the entirety of swift. This includes manifests to provision such things as keystone, storage backends, proxies, and the ring. Types are shipped as part of the swift module to assist in manipulation of configuration files. The classes in this module will deploy Swift using best practices for a typical deployment. +The swift module is a thorough attempt to make Puppet capable of managing the entirety of swift. This includes manifests to provision such things as keystone, storage backends, proxies, and the ring. Types are shipped as part of the swift module to assist in manipulation of configuration files. A custom service provider built around the swift-init tool is also provided as an option for enhanced swift service management. The classes in this module will deploy Swift using best practices for a typical deployment. -This module is tested in combination with other modules needed to build and leverage an entire Openstack software stack. These modules can be found, all pulled together in the [openstack module](https://github.com/stackforge/puppet-openstack). In addition, this module requires Puppet's [exported resources](http://docs.puppetlabs.com/puppet/3/reference/lang_exported.html). +This module is tested in combination with other modules needed to build and leverage an entire Openstack software stack. In addition, this module requires Puppet's [exported resources](http://docs.puppetlabs.com/puppet/3/reference/lang_exported.html). Setup ----- @@ -41,7 +41,7 @@ Setup You much first setup [exported resources](http://docs.puppetlabs.com/puppet/3/reference/lang_exported.html). -To utilize the swift module's functionality you will need to declare multiple resources. The following is a modified excerpt from the [openstack module](https://github.com/stackforge/puppet-openstack). This is not an exhaustive list of all the components needed, we recommend you consult and understand the [openstack module](https://github.com/stackforge/puppet-openstack) and the [core openstack](http://docs.openstack.org) documentation. +To utilize the swift module's functionality you will need to declare multiple resources. This is not an exhaustive list of all the components needed, we recommend you consult and understand the [core openstack](http://docs.openstack.org) documentation. **Defining a swift storage node** @@ -88,8 +88,7 @@ class { 'swift::storage::all': weight => 1, } -Swift::Ringsync<<||>> -``` +Swift::Ringsync<<||>>``` Usage ----- @@ -195,6 +194,34 @@ swift::storage::server { '6010': } ``` +### Define: swift::storage::filter::recon +Configure the swift recon middleware on a swift:storage::server +Can be configured on: account, container, object servers. + +### Define: swift::storage::filter::healthcheck +Configure the swift health check middleware on a swift:storage::server +Can be configured on: account, container, object servers. + +Declaring either the recon or health check middleware in a node manifest is required when specifying the recon or healthcheck middleware in an (account|container|object)_pipeline. + +example manifest: + +``` + +class { 'swift::storage::all': + storage_local_net_ip => $swift_local_net_ip, + account_pipeline => ['healthcheck', 'recon', 'account-server'], + container_pipeline => ['healthcheck', 'recon', 'container-server'], + object_pipeline => ['healthcheck', 'recon', 'object-server'], +} +$rings = [ + 'account', + 'object', + 'container'] +swift::storage::filter::recon { $rings: } +swift::storage::filter::healthcheck { $rings: } +``` + ####`namevar` The namevar/title for this type will map to the port where the server is hosted. @@ -245,6 +272,85 @@ class { 'swift::objectexpirer': } It is assumed that the object expirer service will usually be installed in a proxy node. On Red Hat-based distributions, if the class is included in a non-proxy node, the openstack-swift-proxy package will need to be installed. + +##Swiftinit service provider + +The 'swiftinit' provider is a custom provider of the service type. + +"Swift services are generally managed with swift-init. the general usage is swift-init , where service is the swift service to manage (for example object, container, account, proxy)" +From http://docs.openstack.org/developer/swift/admin_guide.html#managing-services + +This new provider is intended to improve puppet-swift deployments in the following ways: + +* The default service provider for puppet-swift is to use distribution specific service providers such as systemd and upstart. If distribution provided init scripts do not specify the full range of service commands, puppet will fall back to methods such as process name matching which is not very reliable. For example, if you were to tail a log file with the same name as a swift process, puppet will interpret that process table match as the swift-proxy service running and fail to start the swift service. +* Minimize customer impact: Using the swiftinit service provider enables more specific and targeted control of swift services. Swift-init provides grateful stop/start reload/restart of swift services which will allow swift processes to finish any current requests before completely stopping the old processes. +* Specific control of services starting at boot is implemented by adding or removing +a templated init or services file. This is managed by this provider. For EL and non Ubuntu Debian OS types, this provider will also make calls out to systemctl reload and systemctl enable/disable. +* Future use of the swiftinit provider is planed to allow for starting multiple servers using swift-init and multiple configuration files, to support a dedicated replication network. + + +### Using the swiftinit service provider +* To use the swiftinit service provider set "service_provider" on the supported components you have defined in your config manifest. + +``` + class { '::swift::storage::account': + service_provider => 'swiftinit', + } + class { '::swift::storage::container': + service_provider => 'swiftinit', + } + class { '::swift::storage::object': + service_provider => 'swiftinit', + } + class {'::swift::objectexpirer': + service_provider => 'swiftinit', + } + class { '::swift::proxy': + service_provider => 'swiftinit', + } +``` + +Moving from the default service providers to the swiftinit service provider is supported. On the next puppet run after setting the swiftinit service provider swift services are stopped on the old provider and immediately started using swift-init. This provides a supported upgrade path with no down time. + +The swiftinit service provider uses the following service type parameters to +manage swift services in a non standard way. + +* `manifest` is used to pass in the config file the service should be +configured with. Ex `object-server.conf` +* `pattern` is used to pass in the debian/redhat osfamily specific service names as found in params.pp. Used to match names on services files as provided by distro packages. Debian/Ubuntu service names already match names used by swift-init. + +To aid with input validation to the swiftinit provider there is a defined type swift::service + +### Class: swift::service + +This is a wrapper defined type for the swift service providers. +It provides a centraziled location to manage and validate in put for use to the default +as well as the swiftinit service providers. + +####`namevar` +The namevar/title of swift::service must be one of the swift_init_service_names listed in swift::params.pp. +These names are parsed by the swiftinit provider to provide service management as well as template boot files. + +####`os_family_service_name` +The distribution specific service name from swift::params. This name is passed to the default service provider. +This name is used by the swiftinit provider to match on default provider service names when moving from a default +provider to the swiftinit provider. The swiftinit provider also uses the service_name to manage service and init files. + +####`config_file_name` +The swift service configuration file name. It must be one of the following: +object-server.conf, account-server.conf, container-server.conf, proxy-server.conf, object-expirer.conf. + +####`service_ensure` +The state of the service to ensure, running or stopped. + +####`enabled` +Should the service be enabled to start at boot. + +####`service_provider` +To use the swiftinit service provider to manage swift services, set service_provider to "swiftinit". When enable is true the provider +will populate boot files that start swift using swift-init at boot. Defaults to $::swift::params::service_provider. + + ### Verifying installation This modules ships with a simple Ruby script that validates whether or not your swift cluster is functional. @@ -258,7 +364,7 @@ Implementation ### swift -swift is a combination of Puppet manifest and ruby code to delivery configuration and extra functionality through types and providers. +puppet-swift is a combination of Puppet manifest and ruby code to deliver configuration and extra functionality through types and providers. ### Types diff --git a/swift/files/swift-account.conf.upstart b/swift/files/swift-account.conf.upstart deleted file mode 100644 index cc85cd73c..000000000 --- a/swift/files/swift-account.conf.upstart +++ /dev/null @@ -1,23 +0,0 @@ -# Temporarily managed by Puppet until -# 931893 is resolved -# swift-account-server - SWIFT Object Server -# -# The swift account server. - -description "SWIFT Account Server" -author "Marc Cluet " - -start on runlevel [2345] -stop on runlevel [016] - -pre-start script - if [ $(find /etc/swift/account-server/ -type f 2>/dev/null | wc -l) -gt 0 ]; then - exec /usr/bin/swift-init account-server start - elif [ -f /etc/swift/account-server.conf ]; then - exec /usr/bin/swift-init account-server start - else - exit 1 - fi -end script - -post-stop exec /usr/bin/swift-init account-server stop diff --git a/swift/files/swift-container-sync.conf.upstart b/swift/files/swift-container-sync.conf.upstart deleted file mode 100644 index 54b010cf7..000000000 --- a/swift/files/swift-container-sync.conf.upstart +++ /dev/null @@ -1,20 +0,0 @@ -# swift-container-sync - SWIFT Container Sync -# -# The swift container sync. - -description "SWIFT Container Sync" -author "Sergio Rubio " - -start on runlevel [2345] -stop on runlevel [016] - -pre-start script - if [ -f "/etc/swift/container-server.conf" ]; then - exec /usr/bin/swift-init container-sync start - else - exit 1 - fi -end script - -post-stop exec /usr/bin/swift-init container-sync stop - diff --git a/swift/files/swift-container.conf.upstart b/swift/files/swift-container.conf.upstart deleted file mode 100644 index aeaab6511..000000000 --- a/swift/files/swift-container.conf.upstart +++ /dev/null @@ -1,23 +0,0 @@ -# Temporarily managed by Puppet until -# 931893 is resolved -# swift-container-server - SWIFT Object Server -# -# The swift container server. - -description "SWIFT Container Server" -author "Marc Cluet " - -start on runlevel [2345] -stop on runlevel [016] - -pre-start script - if [ $(find /etc/swift/container-server/ -type f 2>/dev/null | wc -l) -gt 0 ]; then - exec /usr/bin/swift-init container-server start - elif [ -f /etc/swift/container-server.conf ]; then - exec /usr/bin/swift-init container-server start - else - exit 1 - fi -end script - -post-stop exec /usr/bin/swift-init container-server stop diff --git a/swift/files/swift-object.conf.upstart b/swift/files/swift-object.conf.upstart deleted file mode 100644 index 45eb2f773..000000000 --- a/swift/files/swift-object.conf.upstart +++ /dev/null @@ -1,23 +0,0 @@ -# Temporarily managed by Puppet until -# 931893 is resolved -# swift-object-server - SWIFT Object Server -# -# The swift object server. - -description "SWIFT Object Server" -author "Marc Cluet " - -start on runlevel [2345] -stop on runlevel [016] - -pre-start script - if [ $(find /etc/swift/object-server/ -type f 2>/dev/null | wc -l) -gt 0 ]; then - exec /usr/bin/swift-init object-server start - elif [ -f /etc/swift/object-server.conf ]; then - exec /usr/bin/swift-init object-server start - else - exit 1 - fi -end script - -post-stop exec /usr/bin/swift-init object-server stop diff --git a/swift/lib/puppet/provider/service/swiftinit.rb b/swift/lib/puppet/provider/service/swiftinit.rb new file mode 100644 index 000000000..6bbab6a8d --- /dev/null +++ b/swift/lib/puppet/provider/service/swiftinit.rb @@ -0,0 +1,202 @@ +# swift-init service management +# +# author Adam Vinsh +Puppet::Type.type(:service).provide :swiftinit, :parent => :service do + desc 'Manage swift services using swift-init' + + has_feature :enableable + has_feature :refreshable + + confine :any => [ + Facter.value(:osfamily) == 'Debian', + Facter.value(:osfamily) == 'RedHat' + ] + + # Check if swift service is running using swift-init + def status + if swiftinit_run('status', false).exitstatus == 0 + return :running + else + # Transition block for systemd systems. If swift-init reports service is + # not running then send stop to systemctl so that service can be started + # with swift-init and fully managed by this provider. + if Facter.value(:operatingsystem) != 'Ubuntu' + systemctl_run('stop', [resource[:pattern]], false) + systemctl_run('disable', [resource[:pattern]], false) + end + return :stopped + end + end + + # Start this swift service using swift-init + def start + swiftinit_run('start', true) + end + + # Stop this swift service using swift-init allowing + # current requests to finish on supporting servers + def stop + swiftinit_run('shutdown', true) + end + + # Restart this swift service using swift-init reload, + # graceful shutdown then restart on supporting servers + def restart + swiftinit_run('reload', true) + end + + def refresh + if (@paramaters[:ensure] == running) + provider.restart + else + debug 'Skipping restart, service is not running' + end + end + + # Returns service enabled status using systemctl on Redhat/Debian + # and using presence of init file on Ubuntu. + def enabled? + if Facter.value(:operatingsystem) != 'Ubuntu' + if Puppet::FileSystem.exist?("/etc/systemd/system/#{resource[:pattern]}.service") + if systemctl_run('is-enabled', [resource[:pattern]], false).exitstatus == 0 + return :true + end + else + return :false + end + elsif Facter.value(:operatingsystem) == 'Ubuntu' + if Puppet::FileSystem.exist?("/etc/init/#{resource[:pattern]}.conf") + return :true + else + return :false + end + end + end + + # Enable the service at boot. For Redhat and Debian create services + # file and notify systemctl. For Ubuntu create init file. + def enable + if Facter.value(:operatingsystem) != 'Ubuntu' + file = Puppet::Type.type(:file).new( + :name => "/etc/systemd/system/#{resource[:pattern]}.service", + :ensure => :present, + :content => systemd_template, + :mode => '0644' + ) + file.write(file) + systemctl_run('daemon-reload', nil, true) + systemctl_run('enable', [resource[:pattern]], false) + elsif Facter.value(:operatingsystem) == 'Ubuntu' + file = Puppet::Type.type(:file).new( + :name => "/etc/init/#{resource[:pattern]}.conf", + :ensure => :present, + :content => upstart_template, + :mode => '0644' + ) + file.write(file) + end + end + + # Disable the service at boot. For Redhat and Debain, + # delete services file and notify systemctl. For Ubuntu + # remove init file. + def disable + if Facter.value(:operatingsystem) != 'Ubuntu' + systemctl_run('disable', [resource[:pattern]], false) + File.delete("/etc/systemd/system/#{resource[:pattern]}.service") + systemctl_run('daemon-reload', nil, true) + elsif Facter.value(:operatingsystem) == 'Ubuntu' + File.delete("/etc/init/#{resource[:pattern]}.conf") + end + end + + # Wrapper to handle swift-init calls on supported osfamily + def swiftinit_run(command, failonfail) + execute([['swift-init'], ["#{type}-#{subtype}#{manifest}"], [command]], + :failonfail => failonfail) + rescue Puppet::ExecutionFailure => detail + @resource.fail Puppet::Error, + "swift-init #{type}-#{subtype}#{manifest} #{command} + failed with: #{@resource.ref}: #{detail}", detail + end + + # Wrapper to handle systemctl calls on supported osfamily + def systemctl_run(command, unit_file, failonfail) + if unit_file + execute([['systemctl'], [command], [unit_file]], :failonfail => failonfail) + else + execute([['systemctl'], [command]], :failonfail => failonfail) + end + rescue Puppet::ExecutionFailure => detail + @resource.fail Puppet::Error, + "systemctl #{command} #{unit_file} + failed with: #{@resource.ref}: #{detail}", detail + end + + # Split the service type off of name + # type can be object, account, container. + def type + resource[:name].split(/-/)[1] + end + + # Split the service subtype off of name + # subtype can be: + # For type account: auditor, reaper, replicator, server. + # For type container: auditor, replicator, server, sync, updater. + # For type object: auditor, replicator, server, updater, expirer. + # For type proxy: server. + def subtype + resource[:name].split(/-/)[2] + end + + # In this provider 'manifest' is the name of the config file that the service + # uses to run. If the config file is a default name ex: object-server.conf. + # then swift-init can be called without specifying the config file. + # TODO add logic to start servers using multiple config files, used to run + # swift with a dedicated replication network. + def manifest + if "#{resource[:manifest]}" == "#{type}-server.conf" + return nil + elsif "#{resource[:manifest]}" == 'object-expirer.conf' + return nil + else return ".#{resource[:manifest].split('.conf')[1]}" + end + end + + # Begin service template boot section. + def upstart_template + %(# swift-#{type}-#{subtype} +# +# Starts the swift-#{type}-#{subtype}. + +description "SWIFT #{type} #{subtype}" +author "Puppet" + +start on runlevel [2345] +stop on runlevel [016] + +pre-start script +if [ -f /etc/swift/#{resource[:manifest]} ]; then + exec /usr/bin/swift-init #{type}-#{subtype} start +else + exit 1 +fi +end script + +post-stop exec /usr/bin/swift-init #{type}-#{subtype} stop) + end + + def systemd_template + %([Unit] +Description=OpenStack "SWIFT #{type} #{subtype}" +After=syslog.target network.target + +[Service] +Type=forking +User=root +ExecStart=/usr/bin/swift-init #{type}-#{subtype} start + +[Install] +WantedBy=multi-user.target) + end +end diff --git a/swift/lib/puppet/provider/swift_ring_builder.rb b/swift/lib/puppet/provider/swift_ring_builder.rb index 2b30cc8bd..a34f2e69f 100644 --- a/swift/lib/puppet/provider/swift_ring_builder.rb +++ b/swift/lib/puppet/provider/swift_ring_builder.rb @@ -20,19 +20,41 @@ def self.address_string(address) def self.lookup_ring object_hash = {} if File.exists?(builder_file_path) + # Swift < 2.2.2 Skip first 4 info lines from swift-ring-builder output if rows = swift_ring_builder(builder_file_path).split("\n")[4..-1] + # Swift 2.2.2+ Skip additional line to account for Overload info + if !rows[0].nil? and rows[0].start_with?('Devices:') + rows.shift + end rows.each do |row| # Swift 1.7+ output example: + # /etc/swift/object.builder, build version 1 + # 262144 partitions, 1.000000 replicas, 1 regions, 1 zones, 1 devices, 0.00 balance, 0.00 dispersion + # The minimum number of hours before a partition can be reassigned is 1 # Devices: id region zone ip address port name weight partitions balance meta # 0 1 2 127.0.0.1 6022 2 1.00 262144 0.00 # 0 1 3 192.168.101.15 6002 1 1.00 262144 -100.00 # # Swift 1.8.0 output example: + # /etc/swift/object.builder, build version 1 + # 262144 partitions, 1.000000 replicas, 1 regions, 1 zones, 1 devices, 0.00 balance, 0.00 dispersion + # The minimum number of hours before a partition can be reassigned is 1 # Devices: id region zone ip address port name weight partitions balance meta # 2 1 2 192.168.101.14 6002 1 1.00 262144 200.00 m2 # 0 1 3 192.168.101.15 6002 1 1.00 262144-100.00 m2 # # Swift 1.8+ output example: + # /etc/swift/object.builder, build version 1 + # 262144 partitions, 1.000000 replicas, 1 regions, 1 zones, 1 devices, 0.00 balance, 0.00 dispersion + # The minimum number of hours before a partition can be reassigned is 1 + # Devices: id region zone ip address port replication ip replication port name weight partitions balance meta + # 0 1 2 127.0.0.1 6021 127.0.0.1 6021 2 1.00 262144 0.00 + # + # Swift 2.2.2+ output example: + # /etc/swift/object.builder, build version 1 + # 262144 partitions, 1.000000 replicas, 1 regions, 1 zones, 1 devices, 0.00 balance, 0.00 dispersion + # The minimum number of hours before a partition can be reassigned is 1 + # The overload factor is 0.00% (0.000000) # Devices: id region zone ip address port replication ip replication port name weight partitions balance meta # 0 1 2 127.0.0.1 6021 127.0.0.1 6021 2 1.00 262144 0.00 # Swift 1.8+ output example: diff --git a/swift/manifests/objectexpirer.pp b/swift/manifests/objectexpirer.pp index ecf0dcd52..0cd3d75d7 100644 --- a/swift/manifests/objectexpirer.pp +++ b/swift/manifests/objectexpirer.pp @@ -56,7 +56,14 @@ # (optional) Report interval, in seconds. # Defaults to 300. # - +# [*service_provider*] +# (optional) +# To use the swiftinit service provider to manage swift services, set +# service_provider to "swiftinit". When enable is true the provider +# will populate boot files that start swift using swift-init at boot. +# See README for more details. +# Defaults to $::swift::params::service_provider. +# class swift::objectexpirer( $manage_service = true, $enabled = true, @@ -71,9 +78,8 @@ $reclaim_age = 604800, $recon_cache_path = '/var/cache/swift', $report_interval = 300, -) { - - include ::swift::params + $service_provider = $::swift::params::service_provider +) inherits ::swift::params { Swift_config<| |> ~> Service['swift-object-expirer'] Swift_object_expirer_config<||> ~> Service['swift-object-expirer'] @@ -109,12 +115,11 @@ } } - service { 'swift-object-expirer': - ensure => $service_ensure, - name => $::swift::params::object_expirer_service_name, - enable => $enabled, - provider => $::swift::params::service_provider, - tag => 'swift-service', + swift::service { 'swift-object-expirer': + os_family_service_name => $::swift::params::object_expirer_service_name, + service_ensure => $service_ensure, + enabled => $enabled, + config_file_name => 'object-expirer.conf', + service_provider => $service_provider, } } - diff --git a/swift/manifests/params.pp b/swift/manifests/params.pp index a4aa65d89..dc82cf283 100644 --- a/swift/manifests/params.pp +++ b/swift/manifests/params.pp @@ -6,21 +6,22 @@ $package_name = 'swift' $client_package = 'python-swiftclient' $proxy_package_name = 'swift-proxy' - $proxy_service_name = 'swift-proxy' + $proxy_server_service_name = 'swift-proxy' $object_package_name = 'swift-object' - $object_service_name = 'swift-object' + $object_server_service_name = 'swift-object' $object_auditor_service_name = 'swift-object-auditor' $object_replicator_service_name = 'swift-object-replicator' $object_updater_service_name = 'swift-object-updater' $object_expirer_package_name = 'swift-object-expirer' $object_expirer_service_name = 'swift-object-expirer' $container_package_name = 'swift-container' - $container_service_name = 'swift-container' + $container_server_service_name = 'swift-container' $container_auditor_service_name = 'swift-container-auditor' $container_replicator_service_name = 'swift-container-replicator' $container_updater_service_name = 'swift-container-updater' + $container_sync_service_name = 'swift-container-sync' $account_package_name = 'swift-account' - $account_service_name = 'swift-account' + $account_server_service_name = 'swift-account' $account_auditor_service_name = 'swift-account-auditor' $account_reaper_service_name = 'swift-account-reaper' $account_replicator_service_name = 'swift-account-replicator' @@ -35,21 +36,22 @@ $package_name = 'openstack-swift' $client_package = 'python-swiftclient' $proxy_package_name = 'openstack-swift-proxy' - $proxy_service_name = 'openstack-swift-proxy' + $proxy_server_service_name = 'openstack-swift-proxy' $object_package_name = 'openstack-swift-object' - $object_service_name = 'openstack-swift-object' + $object_server_service_name = 'openstack-swift-object' $object_auditor_service_name = 'openstack-swift-object-auditor' $object_replicator_service_name = 'openstack-swift-object-replicator' $object_updater_service_name = 'openstack-swift-object-updater' $object_expirer_package_name = 'openstack-swift-proxy' $object_expirer_service_name = 'openstack-swift-object-expirer' $container_package_name = 'openstack-swift-container' - $container_service_name = 'openstack-swift-container' + $container_server_service_name = 'openstack-swift-container' $container_auditor_service_name = 'openstack-swift-container-auditor' $container_replicator_service_name = 'openstack-swift-container-replicator' $container_updater_service_name = 'openstack-swift-container-updater' + $container_sync_service_name = 'openstack-swift-container-sync' $account_package_name = 'openstack-swift-account' - $account_service_name = 'openstack-swift-account' + $account_server_service_name = 'openstack-swift-account' $account_auditor_service_name = 'openstack-swift-account-auditor' $account_reaper_service_name = 'openstack-swift-account-reaper' $account_replicator_service_name = 'openstack-swift-account-replicator' @@ -60,4 +62,21 @@ fail("Unsupported osfamily: ${::osfamily} for os ${::operatingsystem}") } } + $swift_init_service_names = [ + 'swift-proxy-server', + 'swift-object-auditor', + 'swift-object-expirer', + 'swift-object-replicator', + 'swift-object-server', + 'swift-object-updater', + 'swift-account-auditor', + 'swift-account-reaper', + 'swift-account-replicator', + 'swift-account-server', + 'swift-container-auditor', + 'swift-container-replicator', + 'swift-container-server', + 'swift-container-sync', + 'swift-container-updater', + ] } diff --git a/swift/manifests/proxy.pp b/swift/manifests/proxy.pp index bec925e2a..7a9f898b0 100644 --- a/swift/manifests/proxy.pp +++ b/swift/manifests/proxy.pp @@ -98,6 +98,14 @@ # Configures log_name for swift proxy-server. # Optional. Defaults to proxy-server # +# [*service_provider*] +# (optional) +# To use the swiftinit service provider to manage swift services, set +# service_provider to "swiftinit". When enable is true the provider +# will populate boot files that start swift using swift-init at boot. +# See README for more details. +# Defaults to $::swift::params::service_provider. +# # == Examples # # == Authors @@ -129,13 +137,13 @@ $node_timeout = undef, $manage_service = true, $enabled = true, - $package_ensure = 'present' -) { + $package_ensure = 'present', + $service_provider = $::swift::params::service_provider +) inherits ::swift::params { - include ::swift::params include ::concat::setup - Swift_config<| |> ~> Service['swift-proxy'] + Swift_config<| |> ~> Service['swift-proxy-server'] validate_bool($account_autocreate) validate_bool($allow_account_management) @@ -209,13 +217,12 @@ } } - service { 'swift-proxy': - ensure => $service_ensure, - name => $::swift::params::proxy_service_name, - enable => $enabled, - provider => $::swift::params::service_provider, - hasstatus => true, - subscribe => Concat['/etc/swift/proxy-server.conf'], - tag => 'swift-service', + swift::service { 'swift-proxy-server': + os_family_service_name => $::swift::params::proxy_server_service_name, + service_ensure => $service_ensure, + enabled => $enabled, + config_file_name => 'proxy-server.conf', + service_provider => $service_provider, + subscribe => Concat['/etc/swift/proxy-server.conf'], } } diff --git a/swift/manifests/proxy/ceilometer.pp b/swift/manifests/proxy/ceilometer.pp index a829e8b28..bdcba47d7 100644 --- a/swift/manifests/proxy/ceilometer.pp +++ b/swift/manifests/proxy/ceilometer.pp @@ -7,6 +7,43 @@ # # == Parameters # +# [*rabbit_host*] +# (Optional) IP or hostname of the rabbit server. +# Defaults to '127.0.0.1'. +# +# [*rabbit_port*] +# (Optional) Port of the rabbit server. +# Defaults to 5672. +# +# [*rabbit_hosts*] +# (Optional) IP or hostname of the rabbits servers. +# comma separated array (ex: ['1.0.0.10:5672','1.0.0.11:5672']) +# Defaults to undef. +# +# [*rabbit_user*] +# (Optional) Username for rabbit. +# Defaults to 'guest'. +# +# [*rabbit_password*] +# (Optional) Password for rabbit user. +# Defaults to 'guest'. +# +# [*rabbit_virtual_host*] +# (Optional) Virtual host to use. +# Defaults to '/'. +# +# [*driver*] +# (Optional) The Drivers(s) to handle sending notifications. +# Defaults to undef. +# +# [*topic*] +# (Optional) AMQP topic used for OpenStack notifications. +# Defaults to undef. +# +# [*control_exchange*] +# (Optional) The default exchange under which topics are scoped. +# Defaults to undef. +# # [*ensure*] # Enable or not ceilometer fragment # Defaults to 'present' @@ -28,8 +65,17 @@ # Copyright 2013 eNovance licensing@enovance.com # class swift::proxy::ceilometer( - $ensure = 'present', - $group = 'ceilometer', + $rabbit_user = 'guest', + $rabbit_password = 'guest', + $rabbit_host = '127.0.0.1', + $rabbit_port = '5672', + $rabbit_hosts = undef, + $rabbit_virtual_host = '/', + $driver = undef, + $topic = undef, + $control_exchange = undef, + $ensure = 'present', + $group = 'ceilometer', ) inherits swift { User['swift'] { @@ -54,4 +100,9 @@ require => Class['::ceilometer'], } + package { 'python-ceilometermiddleware': + ensure => $ensure, + tag => 'openstack', + } + } diff --git a/swift/manifests/service.pp b/swift/manifests/service.pp new file mode 100644 index 000000000..4682c4b91 --- /dev/null +++ b/swift/manifests/service.pp @@ -0,0 +1,71 @@ +# == Define: swift::service +# +# Wrapper class to configure swift service providers +# +# === Parameters: +# +# [*title*] The name of the swift service to manage +# Mandatory. ex: 'swift-object-server' valid names +# are listed in swift::params.pp at $swift_init_service_names +# +# [*os_family_service_name*] +# (required) The distribution specific service name. +# +# [*config_file_name*] +# (required) The service configuration file name. +# Starting at the path "/etc/swift/" ex:"object-server.conf" +# +# [*service_ensure*] +# (optional) State of service to ensure, running or stopped. +# Default: undef +# +# [*enabled*] +# (optional) Should the service be enabled to start +# at boot. Default: true +# +# [*service_provider*] +# (optional) +# To use the swiftinit service provider to manage swift services, set +# service_provider to "swiftinit". When enable is true the provider +# will populate boot files that start swift using swift-init at boot. +# See README for more details. +# Defaults to $::swift::params::service_provider. +# +define swift::service( + $os_family_service_name, + $config_file_name, + $service_ensure = undef, + $enabled = true, + $service_provider = $::swift::params::service_provider, +) { + + include ::swift::params + + if(! member($::swift::params::swift_init_service_names, $name)) { + fail("swift::service name: ${name} is not a valid swift_init_service_name") + } + + if $service_provider != 'swiftinit' { + service { $name: + ensure => $service_ensure, + name => $os_family_service_name, + hasstatus => true, + enable => $enabled, + provider => $service_provider, + tag => 'swift-service', + subscribe => $subscribe, + } + } elsif $service_provider == 'swiftinit' { + service { $name: + ensure => $service_ensure, + enable => $enabled, + hasstatus => true, + hasrestart => true, + provider => 'swiftinit', + pattern => $os_family_service_name, + manifest => $config_file_name, + tag => 'swift-service', + subscribe => $subscribe, + } + } +} diff --git a/swift/manifests/storage/account.pp b/swift/manifests/storage/account.pp index 6ea122465..e4c41aebb 100644 --- a/swift/manifests/storage/account.pp +++ b/swift/manifests/storage/account.pp @@ -2,8 +2,8 @@ # # == Parameters # [*enabled*] -# (optional) Should the service be enabled. -# Defaults to true +# (optional) Should the service be enabled to start +# at boot. Defaults to true # # [*manage_service*] # (optional) Whether the service should be managed by Puppet. @@ -13,22 +13,37 @@ # (optional) Value of package resource parameter 'ensure'. # Defaults to 'present'. # +# [*config_file_name*] +# (optional) The configuration file name. +# Starting at the path "/etc/swift/" +# Defaults to "account-server.conf" +# +# [*service_provider*] +# (optional) +# To use the swiftinit service provider to manage swift services, set +# service_provider to "swiftinit". When enable is true the provider +# will populate boot files that start swift using swift-init at boot. +# See README for more details. +# Defaults to $::swift::params::service_provider. +# class swift::storage::account( - $manage_service = true, - $enabled = true, - $package_ensure = 'present' -) { + $manage_service = true, + $enabled = true, + $package_ensure = 'present', + $config_file_name = 'account-server.conf', + $service_provider = $::swift::params::service_provider +) inherits ::swift::params { Swift_config<| |> ~> Service['swift-account-reaper'] Swift_config<| |> ~> Service['swift-account-auditor'] swift::storage::generic { 'account': - manage_service => $manage_service, - enabled => $enabled, - package_ensure => $package_ensure, - } - - include ::swift::params + manage_service => $manage_service, + enabled => $enabled, + package_ensure => $package_ensure, + config_file_name => $config_file_name, + service_provider => $service_provider +} if $manage_service { if $enabled { @@ -38,21 +53,21 @@ } } - service { 'swift-account-reaper': - ensure => $service_ensure, - name => $::swift::params::account_reaper_service_name, - enable => $enabled, - provider => $::swift::params::service_provider, - require => Package['swift-account'], - tag => 'swift-service', + swift::service { 'swift-account-reaper': + os_family_service_name => $::swift::params::account_reaper_service_name, + service_ensure => $service_ensure, + enabled => $enabled, + config_file_name => $config_file_name, + service_provider => $service_provider, + require => Package['swift-account'], } - service { 'swift-account-auditor': - ensure => $service_ensure, - name => $::swift::params::account_auditor_service_name, - enable => $enabled, - provider => $::swift::params::service_provider, - require => Package['swift-account'], - tag => 'swift-service', + swift::service { 'swift-account-auditor': + os_family_service_name => $::swift::params::account_auditor_service_name, + service_ensure => $service_ensure, + enabled => $enabled, + config_file_name => $config_file_name, + service_provider => $service_provider, + require => Package['swift-account'], } } diff --git a/swift/manifests/storage/container.pp b/swift/manifests/storage/container.pp index f6dc76ab3..82eaee67c 100644 --- a/swift/manifests/storage/container.pp +++ b/swift/manifests/storage/container.pp @@ -1,9 +1,10 @@ +# Class swift::storage::container # # === Parameters # # [*enabled*] -# (optional) Should the service be enabled. -# Defaults to true +# (optional) Should the service be enabled to start +# at boot. Defaults to true # # [*manage_service*] # (optional) Whether the service should be managed by Puppet. @@ -17,24 +18,39 @@ # (optional) A list of hosts allowed in the X-Container-Sync-To # field for containers. Defaults to one entry list '127.0.0.1'. # +# [*config_file_name*] +# (optional) The configuration file name. +# Starting at the path "/etc/swift/" +# Defaults to "object-server.conf" +# +# [*service_provider*] +# (optional) +# To use the swiftinit service provider to manage swift services, set +# service_provider to "swiftinit". When enable is true the provider +# will populate boot files that start swift using swift-init at boot. +# See README for more details. +# Defaults to $::swift::params::service_provider. +# class swift::storage::container( $manage_service = true, $enabled = true, $package_ensure = 'present', $allowed_sync_hosts = ['127.0.0.1'], -) { + $config_file_name = 'container-server.conf', + $service_provider = $::swift::params::service_provider +) inherits ::swift::params { Swift_config<| |> ~> Service['swift-container-updater'] Swift_config<| |> ~> Service['swift-container-auditor'] swift::storage::generic { 'container': - manage_service => $manage_service, - enabled => $enabled, - package_ensure => $package_ensure, + manage_service => $manage_service, + enabled => $enabled, + package_ensure => $package_ensure, + config_file_name => $config_file_name, + service_provider => $service_provider } - include ::swift::params - if $manage_service { if $enabled { $service_ensure = 'running' @@ -43,35 +59,32 @@ } } - service { 'swift-container-updater': - ensure => $service_ensure, - name => $::swift::params::container_updater_service_name, - enable => $enabled, - provider => $::swift::params::service_provider, - require => Package['swift-container'], - tag => 'swift-service', + swift::service { 'swift-container-updater': + os_family_service_name => $::swift::params::container_updater_service_name, + service_ensure => $service_ensure, + enabled => $enabled, + config_file_name => $config_file_name, + service_provider => $service_provider, + require => Package['swift-container'], } - service { 'swift-container-auditor': - ensure => $service_ensure, - name => $::swift::params::container_auditor_service_name, - enable => $enabled, - provider => $::swift::params::service_provider, - require => Package['swift-container'], - tag => 'swift-service', + swift::service { 'swift-container-auditor': + os_family_service_name => $::swift::params::container_auditor_service_name, + service_ensure => $service_ensure, + enabled => $enabled, + config_file_name => $config_file_name, + service_provider => $service_provider, + require => Package['swift-container'], } - if $::operatingsystem == 'Ubuntu' { - # The following service conf is missing in Ubunty 12.04 - file { '/etc/init/swift-container-sync.conf': - source => 'puppet:///modules/swift/swift-container-sync.conf.upstart', - require => Package['swift-container'], - } - service { 'swift-container-sync': - ensure => $service_ensure, - enable => $enabled, - provider => $::swift::params::service_provider, - require => File['/etc/init/swift-container-sync.conf'], + if $::osfamily == 'Debian' { + swift::service { 'swift-container-sync': + os_family_service_name => $::swift::params::container_sync_service_name, + service_ensure => $service_ensure, + enabled => $enabled, + config_file_name => $config_file_name, + service_provider => $service_provider, + require => Package['swift-container'], } Swift_config<| |> ~> Service['swift-container-sync'] } diff --git a/swift/manifests/storage/generic.pp b/swift/manifests/storage/generic.pp index 763dcc672..3d5be94c3 100644 --- a/swift/manifests/storage/generic.pp +++ b/swift/manifests/storage/generic.pp @@ -3,8 +3,8 @@ # # == Parameters # [*enabled*] -# (optional) Should the service be enabled. -# Defaults to true +# (optional) Should the service be enabled to start +# at boot. Defaults to true # # [*manage_service*] # (optional) Whether the service should be managed by Puppet. @@ -14,45 +14,49 @@ # (optional) The desired ensure state of the swift storage packages. # Defaults to present. # +# [*config_file_name*] +# (optional) The configuration file name. +# Starting at the path "/etc/swift/" +# Defaults to "${name}-server.conf" +# # [*service_provider*] -# (optional) The provider to use for the service +# (optional) +# To use the swiftinit service provider to manage swift services, set +# service_provider to "swiftinit". When enable is true the provider +# will populate boot files that start swift using swift-init at boot. +# See README for more details. +# Defaults to $::swift::params::service_provider. # # == Dependencies # Requires Class[swift::storage] -# == Examples -# -# == Authors -# -# Dan Bode dan@puppetlabs.com # -# == Copyright -# -# Copyright 2011 Puppetlabs Inc, unless otherwise noted. define swift::storage::generic( $manage_service = true, $enabled = true, $package_ensure = 'present', + $config_file_name = "${name}-server.conf", $service_provider = $::swift::params::service_provider ) { include ::swift::params Class['swift::storage'] -> Swift::Storage::Generic[$name] - Swift_config<| |> ~> Service["swift-${name}"] + Swift_config<| |> ~> Service["swift-${name}-server"] validate_re($name, '^object|container|account$') package { "swift-${name}": ensure => $package_ensure, - # this is a way to dynamically build the variables to lookup - # sorry its so ugly :( - name => inline_template("<%= scope.lookupvar('::swift::params::${name}_package_name') %>"), + name => getvar("::swift::params::${name}_package_name"), tag => ['openstack', 'swift-package'], - before => Service["swift-${name}", "swift-${name}-replicator"], + before => Service["swift-${name}-server", "swift-${name}-replicator"], } file { "/etc/swift/${name}-server/": - ensure => directory, + ensure => directory, + owner => 'swift', + group => 'swift', + require => Package["swift-${name}"], } if $manage_service { @@ -63,24 +67,21 @@ } } - service { "swift-${name}": - ensure => $service_ensure, - name => inline_template("<%= scope.lookupvar('::swift::params::${name}_service_name') %>"), - enable => $enabled, - hasstatus => true, - provider => $service_provider, - subscribe => Package["swift-${name}"], - tag => 'swift-service', + swift::service { "swift-${name}-server": + os_family_service_name => getvar("::swift::params::${name}_server_service_name"), + service_ensure => $service_ensure, + enabled => $enabled, + config_file_name => $config_file_name, + service_provider => $service_provider, + subscribe => Package["swift-${name}"], } - service { "swift-${name}-replicator": - ensure => $service_ensure, - name => inline_template("<%= scope.lookupvar('::swift::params::${name}_replicator_service_name') %>"), - enable => $enabled, - hasstatus => true, - provider => $service_provider, - subscribe => Package["swift-${name}"], - tag => 'swift-service', + swift::service { "swift-${name}-replicator": + os_family_service_name => getvar("::swift::params::${name}_replicator_service_name"), + service_ensure => $service_ensure, + enabled => $enabled, + config_file_name => $config_file_name, + service_provider => $service_provider, + subscribe => Package["swift-${name}"], } - } diff --git a/swift/manifests/storage/object.pp b/swift/manifests/storage/object.pp index 12d044d88..9408236d0 100644 --- a/swift/manifests/storage/object.pp +++ b/swift/manifests/storage/object.pp @@ -2,8 +2,8 @@ # # == Parameters # [*enabled*] -# (optional) Should the service be enabled. -# Defaults to true +# (optional) Should the service be enabled to start +# at boot. Defaults to true # # [*manage_service*] # (optional) Whether the service should be managed by Puppet. @@ -13,23 +13,38 @@ # (optional) Value of package resource parameter 'ensure'. # Defaults to 'present'. # +# [*config_file_name*] +# (optional) The configuration file name. +# Starting at the path "/etc/swift/" +# Defaults to "object-server.conf" +# +# [*service_provider*] +# (optional) +# To use the swiftinit service provider to manage swift services, set +# service_provider to "swiftinit". When enable is true the provider +# will populate boot files that start swift using swift-init at boot. +# See README for more details. +# Defaults to $::swift::params::service_provider. +# class swift::storage::object( - $manage_service = true, - $enabled = true, - $package_ensure = 'present' -) { + $manage_service = true, + $enabled = true, + $package_ensure = 'present', + $config_file_name = 'object-server.conf', + $service_provider = $::swift::params::service_provider +) inherits ::swift::params { Swift_config<| |> ~> Service['swift-object-updater'] Swift_config<| |> ~> Service['swift-object-auditor'] swift::storage::generic { 'object': - manage_service => $manage_service, - enabled => $enabled, - package_ensure => $package_ensure, + manage_service => $manage_service, + enabled => $enabled, + package_ensure => $package_ensure, + config_file_name => $config_file_name, + service_provider => $service_provider } - include ::swift::params - if $manage_service { if $enabled { $service_ensure = 'running' @@ -38,21 +53,21 @@ } } - service { 'swift-object-updater': - ensure => $service_ensure, - name => $::swift::params::object_updater_service_name, - enable => $enabled, - provider => $::swift::params::service_provider, - require => Package['swift-object'], - tag => 'swift-service', + swift::service { 'swift-object-updater': + os_family_service_name => $::swift::params::object_updater_service_name, + service_ensure => $service_ensure, + enabled => $enabled, + config_file_name => $config_file_name, + service_provider => $service_provider, + require => Package['swift-object'], } - service { 'swift-object-auditor': - ensure => $service_ensure, - name => $::swift::params::object_auditor_service_name, - enable => $enabled, - provider => $::swift::params::service_provider, - require => Package['swift-object'], - tag => 'swift-service', + swift::service { 'swift-object-auditor': + os_family_service_name => $::swift::params::object_auditor_service_name, + service_ensure => $service_ensure, + enabled => $enabled, + config_file_name => $config_file_name, + service_provider => $service_provider, + require => Package['swift-object'], } } diff --git a/swift/manifests/storage/server.pp b/swift/manifests/storage/server.pp index d4d8fc87f..fa5f0cfd0 100644 --- a/swift/manifests/storage/server.pp +++ b/swift/manifests/storage/server.pp @@ -40,7 +40,6 @@ # *NOTE*: Recommended parameter: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' # This mask translates to 0755 for directories and 0644 for files. # - # [*pipeline*] # (optional) Pipeline of applications. # Defaults to ["${type}-server"]. @@ -93,7 +92,7 @@ # [*log_name*] # (optional) Label used when logging. # Defaults to "${type}-server". - +# # [*log_udp_host*] # (optional) If not set, the UDP receiver for syslog is disabled. # Defaults to undef. @@ -107,9 +106,10 @@ # good for seeing errors if true # Defaults to true. # -# [*config_file_path*] -# (optional) The configuration file name. -# Defaults to "${type}-server/${name}.conf". +# [*config_file_path*] +# (optional) The configuration file name. +# Starting at the path "/etc/swift/" +# Defaults to "${type}-server.conf" # define swift::storage::server( $type, @@ -136,7 +136,7 @@ $log_udp_port = undef, $log_requests = true, # this parameters needs to be specified after type and name - $config_file_path = "${type}-server/${name}.conf" + $config_file_path = "${type}-server.conf", ) { if ($incoming_chmod == '0644') { @@ -169,6 +169,7 @@ } include "::swift::storage::${type}" + include ::concat::setup validate_re($name, '^\d+$') @@ -193,7 +194,7 @@ concat { "/etc/swift/${config_file_path}": owner => $owner, group => $group, - notify => Service["swift-${type}", "swift-${type}-replicator"], + notify => Service["swift-${type}-server", "swift-${type}-replicator"], require => Package['swift'], } diff --git a/swift/metadata.json b/swift/metadata.json index df0a36317..1cb32028c 100644 --- a/swift/metadata.json +++ b/swift/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-swift", - "version": "6.1.0", + "version": "7.0.0", "author": "Puppet Labs and OpenStack Contributors", "summary": "Puppet module for OpenStack Swift", "license": "Apache-2.0", @@ -32,7 +32,7 @@ "description": "Installs and configures OpenStack Swift (Object Storage).", "dependencies": [ { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=7.0.0 <8.0.0" }, { "name": "puppetlabs/rsync", "version_requirement": ">=0.2.0 <1.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, { "name": "puppetlabs/xinetd", "version_requirement": ">=1.0.1 <2.0.0" }, diff --git a/swift/spec/acceptance/basic_swift_spec.rb b/swift/spec/acceptance/basic_swift_spec.rb index acfee473b..50212b762 100644 --- a/swift/spec/acceptance/basic_swift_spec.rb +++ b/swift/spec/acceptance/basic_swift_spec.rb @@ -82,4 +82,91 @@ class { end end + + context 'Using swiftinit service provider' do + + it 'should work with no errors' do + swiftinit_pp= <<-EOS + include ::openstack_integration + include ::openstack_integration::repos + include ::openstack_integration::rabbitmq + include ::openstack_integration::mysql + include ::openstack_integration::keystone + + package { 'curl': ensure => present } + + class { '::memcached': + listen_ip => '127.0.0.1', + } + + # Swift resources + class { '::swift': + # not sure how I want to deal with this shared secret + swift_hash_suffix => 'secrete', + package_ensure => latest, + } + class { '::swift::keystone::auth': + password => 'a_big_secret', + } + # === Configure Storage + class { '::swift::storage': + storage_local_net_ip => '127.0.0.1', + } + # create xfs partitions on a loopback device and mounts them + swift::storage::loopback { '2': + require => Class['swift'], + } + # sets up storage nodes which is composed of a single + # device that contains an endpoint for an object, account, and container + swift::storage::node { '2': + mnt_base_dir => '/srv/node', + weight => 1, + manage_ring => true, + zone => '2', + storage_local_net_ip => '127.0.0.1', + require => Swift::Storage::Loopback[2] , + } + class { '::swift::storage::account': + service_provider => 'swiftinit', + } + class { '::swift::storage::container': + service_provider => 'swiftinit', + } + class { '::swift::storage::object': + service_provider => 'swiftinit', + } + class { '::swift::ringbuilder': + part_power => '18', + replicas => '1', + min_part_hours => 1, + require => Class['swift'], + } + class { '::swift::proxy': + proxy_local_net_ip => '127.0.0.1', + pipeline => ['healthcheck', 'cache', 'tempauth', 'proxy-server'], + account_autocreate => true, + require => Class['swift::ringbuilder'], + service_provider => 'swiftinit', + } + class { '::swift::proxy::authtoken': + admin_password => 'a_big_secret', + } + class {'::swift::objectexpirer': + interval => 600, + service_provider => 'swiftinit', + } + class { ['::swift::proxy::healthcheck', '::swift::proxy::cache', '::swift::proxy::tempauth']: } + EOS + + # Run one time to catch any errors upgrading to swiftinit service provider + apply_manifest(swiftinit_pp, :catch_failures => true) + # The second run tests idempotency + apply_manifest(swiftinit_pp, :catch_changes => true) + + end + + describe port(8080) do + it { is_expected.to be_listening.with('tcp') } + end + end end diff --git a/swift/spec/classes/swift_objectexpirer_spec.rb b/swift/spec/classes/swift_objectexpirer_spec.rb index 43d7ea34f..5c23a8ca0 100644 --- a/swift/spec/classes/swift_objectexpirer_spec.rb +++ b/swift/spec/classes/swift_objectexpirer_spec.rb @@ -85,6 +85,18 @@ it_configures 'swift-object-expirer' end + + context 'on debian using swiftinit service provider' do + before do + params.merge!({ :service_provider => 'swiftinit' }) + end + + before do + platform_params.merge!({ :service_provider => 'swiftinit' }) + end + + it_configures 'swift-object-expirer' + end end context 'on RedHat platforms' do @@ -111,5 +123,19 @@ it_configures 'swift-object-expirer' end + + context 'on redhat using swiftinit service provider' do + before do + params.merge!({ :service_provider => 'swiftinit' }) + end + + let :platform_params do + { :object_expirer_package_name => 'openstack-swift-proxy', + :service_name => 'swift-object-expirer', + :service_provider => 'swiftinit' } + end + + it_configures 'swift-object-expirer' + end end end diff --git a/swift/spec/classes/swift_proxy_ceilometer_spec.rb b/swift/spec/classes/swift_proxy_ceilometer_spec.rb index abfa628cb..a9015de69 100644 --- a/swift/spec/classes/swift_proxy_ceilometer_spec.rb +++ b/swift/spec/classes/swift_proxy_ceilometer_spec.rb @@ -22,7 +22,8 @@ class { "swift": describe "when using default parameters" do it { is_expected.to contain_file(fragment_file).with_content(/[filter:ceilometer]/) } - it { is_expected.to contain_file(fragment_file).with_content(/use = egg:ceilometer#swift/) } + it { is_expected.to contain_file(fragment_file).with_content(/paste.filter_factory = ceilometermiddleware.swift:filter_factory/) } + it { is_expected.to contain_file(fragment_file).with_content(/url = rabbit:\/\/guest:guest@127.0.0.1:5672\//) } if Puppet.version.to_f < 4.0 it { is_expected.to contain_concat__fragment('swift_ceilometer').with_require('Class[Ceilometer]')} else @@ -34,9 +35,42 @@ class { "swift": describe "when overriding default parameters" do let :params do - { :group => 'www-data' } + { :group => 'www-data', + :rabbit_user => 'user_1', + :rabbit_password => 'user_1_passw', + :rabbit_host => '1.1.1.1', + :rabbit_port => '5673', + :rabbit_virtual_host => 'rabbit', + :driver => 'messagingv2', + :topic => 'notifications', + :control_exchange => 'swift', + } end - it { is_expected.to contain_user('swift').with_groups('www-data') } + + context 'with single rabbit host' do + it { is_expected.to contain_user('swift').with_groups('www-data') } + it { is_expected.to contain_file(fragment_file).with_content(/[filter:ceilometer]/) } + it { is_expected.to contain_file(fragment_file).with_content(/paste.filter_factory = ceilometermiddleware.swift:filter_factory/) } + it { is_expected.to contain_file(fragment_file).with_content(/url = rabbit:\/\/user_1:user_1_passw@1.1.1.1:5673\/rabbit/) } + it { is_expected.to contain_file(fragment_file).with_content(/driver = messagingv2/) } + it { is_expected.to contain_file(fragment_file).with_content(/topic = notifications/) } + it { is_expected.to contain_file(fragment_file).with_content(/control_exchange = swift/) } + end + + context 'with multiple rabbit hosts' do + before do + params.merge!({ :rabbit_hosts => ['127.0.0.1:5672', '127.0.0.2:5672'] }) + end + + it { is_expected.to contain_user('swift').with_groups('www-data') } + it { is_expected.to contain_file(fragment_file).with_content(/[filter:ceilometer]/) } + it { is_expected.to contain_file(fragment_file).with_content(/paste.filter_factory = ceilometermiddleware.swift:filter_factory/) } + it { is_expected.to contain_file(fragment_file).with_content(/url = rabbit:\/\/user_1:user_1_passw@127.0.0.1:5672,127.0.0.2:5672\/rabbit/) } + it { is_expected.to contain_file(fragment_file).with_content(/driver = messagingv2/) } + it { is_expected.to contain_file(fragment_file).with_content(/topic = notifications/) } + it { is_expected.to contain_file(fragment_file).with_content(/control_exchange = swift/) } + end + end end diff --git a/swift/spec/classes/swift_proxy_spec.rb b/swift/spec/classes/swift_proxy_spec.rb index 23f5f5a66..c285d1879 100644 --- a/swift/spec/classes/swift_proxy_spec.rb +++ b/swift/spec/classes/swift_proxy_spec.rb @@ -37,13 +37,13 @@ class { swift: swift_hash_suffix => string }" {:proxy_local_net_ip => '127.0.0.1'} end - it { is_expected.to contain_service('swift-proxy').with( - {:ensure => 'running', - :provider => 'upstart', - :enable => true, - :hasstatus => true, - :subscribe => 'Concat[/etc/swift/proxy-server.conf]', - :tag => 'swift-service', + it { is_expected.to contain_service('swift-proxy-server').with( + {:ensure => 'running', + :provider => 'upstart', + :enable => true, + :hasstatus => true, + :subscribe => 'Concat[/etc/swift/proxy-server.conf]', + :tag => 'swift-service', } )} it { is_expected.to contain_file('/etc/swift/proxy-server.conf').with( @@ -232,7 +232,7 @@ class { swift: swift_hash_suffix => string } end end - shared_examples_for 'swift-proxy' do + shared_examples_for 'swift-proxy-server' do let :params do { :proxy_local_net_ip => '127.0.0.1' } end @@ -244,14 +244,13 @@ class { swift: swift_hash_suffix => string } params.merge!(param_hash) end - it 'configures swift-proxy service' do - is_expected.to contain_service('swift-proxy').with( - :ensure => (param_hash[:manage_service] && param_hash[:enabled]) ? 'running' : 'stopped', - :name => platform_params[:service_name], - :provider => platform_params[:service_provider], - :enable => param_hash[:enabled], - :hasstatus => true, - :subscribe => 'Concat[/etc/swift/proxy-server.conf]' + it 'configures swift-proxy-server service' do + is_expected.to contain_service('swift-proxy-server').with( + :name => platform_params['swift-proxy-server'], + :ensure => (param_hash[:manage_service] && param_hash[:enabled]) ? 'running' : 'stopped', + :enable => param_hash[:enabled], + :provider => platform_params['service_provider'], + :tag => 'swift-service', ) end end @@ -264,14 +263,15 @@ class { swift: swift_hash_suffix => string } :enabled => false }) end - it 'configures swift-proxy service' do - is_expected.to contain_service('swift-proxy').with( - :ensure => nil, - :name => platform_params[:service_name], - :provider => platform_params[:service_provider], - :enable => false, - :hasstatus => true, - :subscribe => 'Concat[/etc/swift/proxy-server.conf]' + it 'configures swift-proxy-server service' do + + is_expected.to contain_service('swift-proxy-server').with( + :ensure => nil, + :name => platform_params['swift-proxy-server'], + :provider => platform_params['service_provider'], + :enable => false, + :hasstatus => true, + :subscribe => 'Concat[/etc/swift/proxy-server.conf]' ) end end @@ -285,11 +285,24 @@ class { swift: swift_hash_suffix => string } end let :platform_params do - { :service_name => 'swift-proxy', - :service_provider => 'upstart' } + { 'swift-proxy-server' => 'swift-proxy', + 'service_provider' => 'upstart' + } end + it_configures 'swift-proxy-server' + + context 'on Debian platforms using swiftinit service provider' do + before do + params.merge!({ :service_provider => 'swiftinit' }) + end - it_configures 'swift-proxy' + let :platform_params do + { 'swift-proxy-server' => 'swift-proxy-server', + 'service_provider' => 'swiftinit' + } + end + it_configures 'swift-proxy-server' + end end context 'on RedHat platforms' do @@ -300,10 +313,23 @@ class { swift: swift_hash_suffix => string } end let :platform_params do - { :service_name => 'openstack-swift-proxy', - :service_provider => nil } + { + 'swift-proxy-server' => 'openstack-swift-proxy', + } end + it_configures 'swift-proxy-server' - it_configures 'swift-proxy' + context 'on Redhat platforms using swiftinit service provider' do + before do + params.merge!({ :service_provider => 'swiftinit' }) + end + + let :platform_params do + { 'swift-proxy-server' => 'swift-proxy-server', + 'service_provider' => 'swiftinit' + } + end + it_configures 'swift-proxy-server' + end end end diff --git a/swift/spec/classes/swift_storage_account_spec.rb b/swift/spec/classes/swift_storage_account_spec.rb index c1473ce92..25647eb97 100644 --- a/swift/spec/classes/swift_storage_account_spec.rb +++ b/swift/spec/classes/swift_storage_account_spec.rb @@ -39,6 +39,7 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" :name => service_name, :ensure => (param_hash[:manage_service] && param_hash[:enabled]) ? 'running' : 'stopped', :enable => param_hash[:enabled], + :provider => platform_params[:service_provider], :tag => 'swift-service', ) end @@ -75,15 +76,35 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" let :platform_params do { :service_names => { - 'swift-account' => 'swift-account', + 'swift-account-server' => 'swift-account', 'swift-account-replicator' => 'swift-account-replicator', 'swift-account-reaper' => 'swift-account-reaper', 'swift-account-auditor' => 'swift-account-auditor' - } + }, + :service_provider => 'upstart' } end it_configures 'swift-storage-account' + context 'on Debian platforms using swiftinit service provider' do + + before do + params.merge!({ :service_provider => 'swiftinit' }) + end + + let :platform_params do + { :service_names => { + 'swift-account-server' => 'swift-account-server', + 'swift-account-replicator' => 'swift-account-replicator', + 'swift-account-reaper' => 'swift-account-reaper', + 'swift-account-auditor' => 'swift-account-auditor', + }, + :service_provider => 'swiftinit' + } + end + + it_configures 'swift-storage-account' + end end context 'on RedHat platforms' do @@ -94,14 +115,34 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" let :platform_params do { :service_names => { - 'swift-account' => 'openstack-swift-account', + 'swift-account-server' => 'openstack-swift-account', 'swift-account-replicator' => 'openstack-swift-account-replicator', 'swift-account-reaper' => 'openstack-swift-account-reaper', 'swift-account-auditor' => 'openstack-swift-account-auditor' - } + }, + } end it_configures 'swift-storage-account' + context 'on redhat using swiftinit service provider' do + + before do + params.merge!({ :service_provider => 'swiftinit' }) + end + + let :platform_params do + { :service_names => { + 'swift-account-server' => 'swift-account-server', + 'swift-account-replicator' => 'swift-account-replicator', + 'swift-account-reaper' => 'swift-account-reaper', + 'swift-account-auditor' => 'swift-account-auditor', + }, + :service_provider => 'swiftinit' + } + end + + it_configures 'swift-storage-account' + end end end diff --git a/swift/spec/classes/swift_storage_all_spec.rb b/swift/spec/classes/swift_storage_all_spec.rb index 4139ee2b5..fdd6cd9d3 100644 --- a/swift/spec/classes/swift_storage_all_spec.rb +++ b/swift/spec/classes/swift_storage_all_spec.rb @@ -60,7 +60,7 @@ ['object', 'container', 'account'].each do |type| it { is_expected.to contain_package("swift-#{type}").with_ensure('present') } - it { is_expected.to contain_service("swift-#{type}").with( + it { is_expected.to contain_service("swift-#{type}-server").with( {:provider => 'upstart', :ensure => 'running', :enable => true, @@ -147,7 +147,7 @@ end ['object', 'container', 'account'].each do |type| it { is_expected.to contain_package("swift-#{type}").with_ensure('present') } - it { is_expected.to contain_service("swift-#{type}").with( + it { is_expected.to contain_service("swift-#{type}-server").with( {:provider => nil, :ensure => 'running', :enable => true, diff --git a/swift/spec/classes/swift_storage_container_spec.rb b/swift/spec/classes/swift_storage_container_spec.rb index 429e1db6e..5434b019b 100644 --- a/swift/spec/classes/swift_storage_container_spec.rb +++ b/swift/spec/classes/swift_storage_container_spec.rb @@ -36,10 +36,11 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" it 'configures services' do platform_params[:service_names].each do |service_alias, service_name| is_expected.to contain_service(service_alias).with( - :name => service_name, - :ensure => (param_hash[:manage_service] && param_hash[:enabled]) ? 'running' : 'stopped', - :enable => param_hash[:enabled], - :tag => 'swift-service', + :name => service_name, + :ensure => (param_hash[:manage_service] && param_hash[:enabled]) ? 'running' : 'stopped', + :enable => param_hash[:enabled], + :provider => platform_params[:service_provider], + :tag => 'swift-service', ) end end @@ -75,29 +76,36 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" let :platform_params do { :service_names => { - 'swift-container' => 'swift-container', + 'swift-container-server' => 'swift-container', 'swift-container-replicator' => 'swift-container-replicator', 'swift-container-updater' => 'swift-container-updater', 'swift-container-auditor' => 'swift-container-auditor' - } + }, + :service_provider => 'upstart' } end it_configures 'swift-storage-container' - context 'Ubuntu specific resources' do - it 'configures sync' do - is_expected.to contain_service('swift-container-sync').with( - :ensure => 'running', - :enable => true, - :provider => 'upstart', - :require => 'File[/etc/init/swift-container-sync.conf]', - ) - is_expected.to contain_file('/etc/init/swift-container-sync.conf').with( - :source => 'puppet:///modules/swift/swift-container-sync.conf.upstart', - :require => 'Package[swift-container]' - ) + context 'on debian using swiftinit service provider' do + + before do + params.merge!({ :service_provider => 'swiftinit' }) + end + + let :platform_params do + { :service_names => { + 'swift-container-server' => 'swift-container-server', + 'swift-container-replicator' => 'swift-container-replicator', + 'swift-container-updater' => 'swift-container-updater', + 'swift-container-auditor' => 'swift-container-auditor', + 'swift-container-sync' => 'swift-container-sync' + }, + :service_provider => 'swiftinit' + } end + + it_configures 'swift-storage-container' end end @@ -109,7 +117,7 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" let :platform_params do { :service_names => { - 'swift-container' => 'openstack-swift-container', + 'swift-container-server' => 'openstack-swift-container', 'swift-container-replicator' => 'openstack-swift-container-replicator', 'swift-container-updater' => 'openstack-swift-container-updater', 'swift-container-auditor' => 'openstack-swift-container-auditor' @@ -119,23 +127,24 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" it_configures 'swift-storage-container' - context 'RedHat specific resources' do - before do - params.merge!({ :allowed_sync_hosts => ['127.0.0.1', '10.1.0.1', '10.1.0.2'] }) - end + context 'on redhat using swiftinit service provider' do - let :pre_condition do - "class { 'swift': swift_hash_suffix => 'foo' } - class { 'swift::storage::all': storage_local_net_ip => '10.0.0.1' }" + before do + params.merge!({ :service_provider => 'swiftinit' }) end - let :fragment_file do - "/var/lib/puppet/concat/_etc_swift_container-server.conf/fragments/00_swift-container-6001" + let :platform_params do + { :service_names => { + 'swift-container-server' => 'swift-container-server', + 'swift-container-replicator' => 'swift-container-replicator', + 'swift-container-updater' => 'swift-container-updater', + 'swift-container-auditor' => 'swift-container-auditor', + }, + :service_provider => 'swiftinit' + } end - it { - is_expected.to contain_file(fragment_file).with_content(/^allowed_sync_hosts = 127.0.0.1,10.1.0.1,10.1.0.2$/) - } + it_configures 'swift-storage-container' end end end diff --git a/swift/spec/classes/swift_storage_object_spec.rb b/swift/spec/classes/swift_storage_object_spec.rb index 7cbd1c2ae..c770b415a 100644 --- a/swift/spec/classes/swift_storage_object_spec.rb +++ b/swift/spec/classes/swift_storage_object_spec.rb @@ -37,10 +37,11 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" it 'configures services' do platform_params[:service_names].each do |service_alias, service_name| is_expected.to contain_service(service_alias).with( - :name => service_name, - :ensure => (param_hash[:manage_service] && param_hash[:enabled]) ? 'running' : 'stopped', - :enable => param_hash[:enabled], - :tag => 'swift-service', + :name => service_name, + :ensure => (param_hash[:manage_service] && param_hash[:enabled]) ? 'running' : 'stopped', + :enable => param_hash[:enabled], + :provider => platform_params[:service_provider], + :tag => 'swift-service', ) end end @@ -76,15 +77,37 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" let :platform_params do { :service_names => { - 'swift-object' => 'swift-object', + 'swift-object-server' => 'swift-object', 'swift-object-replicator' => 'swift-object-replicator', 'swift-object-updater' => 'swift-object-updater', 'swift-object-auditor' => 'swift-object-auditor' - } + }, + :service_provider => 'upstart' } end it_configures 'swift-storage-object' + context 'on debian using swiftinit service provider' do + + before do + params.merge!({ :service_provider => 'swiftinit' }) + end + + let :platform_params do + { :service_names => { + 'swift-object-server' => 'swift-object-server', + 'swift-object-replicator' => 'swift-object-replicator', + 'swift-object-updater' => 'swift-object-updater', + 'swift-object-auditor' => 'swift-object-auditor', + }, + :service_provider => 'swiftinit' + } + end + + it_configures 'swift-storage-object' + end + + end context 'on RedHat platforms' do @@ -95,7 +118,7 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" let :platform_params do { :service_names => { - 'swift-object' => 'openstack-swift-object', + 'swift-object-server' => 'openstack-swift-object', 'swift-object-replicator' => 'openstack-swift-object-replicator', 'swift-object-updater' => 'openstack-swift-object-updater', 'swift-object-auditor' => 'openstack-swift-object-auditor' @@ -104,5 +127,24 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" end it_configures 'swift-storage-object' + context 'on redhat using swiftinit service provider' do + + before do + params.merge!({ :service_provider => 'swiftinit' }) + end + + let :platform_params do + { :service_names => { + 'swift-object-server' => 'swift-object-server', + 'swift-object-replicator' => 'swift-object-replicator', + 'swift-object-updater' => 'swift-object-updater', + 'swift-object-auditor' => 'swift-object-auditor', + }, + :service_provider => 'swiftinit' + } + end + + it_configures 'swift-storage-object' + end end end diff --git a/swift/spec/defines/swift_storage_generic_spec.rb b/swift/spec/defines/swift_storage_generic_spec.rb index 3b1ef1678..59f51f50e 100644 --- a/swift/spec/defines/swift_storage_generic_spec.rb +++ b/swift/spec/defines/swift_storage_generic_spec.rb @@ -18,9 +18,10 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" end - let :default_params do - {:package_ensure => 'present', - :service_provider => 'upstart'} + let :params do + { :package_ensure => 'present', + :enabled => true, + :manage_service => true } end describe 'with an invalid title' do @@ -30,45 +31,137 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" it_raises 'a Puppet::Error', /does not match/ end - ['account', 'object', 'container'].each do |t| - [{}, - {:package_ensure => 'latest', - :service_provider => 'init'} - ].each do |param_set| - describe "when #{param_set == {} ? "using default" : "specifying"} class parameters" do - let :title do - t - end - let :param_hash do - default_params.merge(param_set) - end - let :params do - param_set + shared_examples_for 'swift-storage-generic' do + %w(account object container).each do |t| + [{}, + { :package_ensure => 'latest' } + ].each do |param_set| + describe "when #{param_set == {} ? 'using default' : 'specifying'} class parameters" do + before do + params.merge!(param_set) + end + + let :title do + t + end + + [{ :enabled => true, :manage_service => true }, + { :enabled => false, :manage_service => true }].each do |param_hash_manage| + context "when service is_expected.to be #{param_hash_manage[:enabled] ? 'enabled' : 'disabled'}" do + before do + params.merge!(param_hash_manage) + end + + it do + is_expected.to contain_package("swift-#{t}").with( + :ensure => params[:package_ensure], + :tag => ['openstack', 'swift-package'] + ) + end + it do + is_expected.to contain_service("swift-#{t}-server").with( + :name => platform_params["swift-#{t}-server"], + :ensure => (param_hash_manage[:manage_service] && param_hash_manage[:enabled]) ? 'running' : 'stopped', + :enable => param_hash_manage[:enabled], + :provider => platform_params['service_provider'], + :tag => 'swift-service' + ) + end + it do + is_expected.to contain_service("swift-#{t}-replicator").with( + :name => platform_params["swift-#{t}-replicator"], + :ensure => (param_hash_manage[:manage_service] && param_hash_manage[:enabled]) ? 'running' : 'stopped', + :enable => param_hash_manage[:enabled], + :provider => platform_params['service_provider'], + :tag => 'swift-service' + ) + end + it do + is_expected.to contain_file("/etc/swift/#{t}-server/").with( + :ensure => 'directory', + ) + end + end + end end - it { is_expected.to contain_package("swift-#{t}").with( - :ensure => param_hash[:package_ensure], - :tag => ['openstack', 'swift-package'], - )} - it { is_expected.to contain_service("swift-#{t}").with( - :ensure => 'running', - :enable => true, - :hasstatus => true, - :provider => param_hash[:service_provider], - :tag => 'swift-service', - )} - it { is_expected.to contain_service("swift-#{t}-replicator").with( - :ensure => 'running', - :enable => true, - :hasstatus => true, - :provider => param_hash[:service_provider], - :tag => 'swift-service', - )} - it { is_expected.to contain_file("/etc/swift/#{t}-server/").with( - :ensure => 'directory', - )} end - # TODO - I do not want to add tests for the upstart stuff - # I need to check the tickets and see if this stuff is fixed + end + end + + context 'on Debian platforms' do + let :facts do + { :operatingsystem => 'Ubuntu', + :osfamily => 'Debian' } + end + + let :platform_params do + { 'swift-account-server' => 'swift-account', + 'swift-account-replicator' => 'swift-account-replicator', + 'swift-container-server' => 'swift-container', + 'swift-container-replicator' => 'swift-container-replicator', + 'swift-object-server' => 'swift-object', + 'swift-object-replicator' => 'swift-object-replicator', + 'service_provider' => 'upstart' + } + end + + it_configures 'swift-storage-generic' + + context 'on Debian platforms using swiftinit service provider' do + before do + params.merge!(:service_provider => 'swiftinit') + end + + let :platform_params do + { 'swift-account-server' => 'swift-account-server', + 'swift-account-replicator' => 'swift-account-replicator', + 'swift-container-server' => 'swift-container-server', + 'swift-container-replicator' => 'swift-container-replicator', + 'swift-object-server' => 'swift-object-server', + 'swift-object-replicator' => 'swift-object-replicator', + 'service_provider' => 'swiftinit' + } + end + + it_configures 'swift-storage-generic' + end + end + + context 'on Redhat platforms' do + let :facts do + { :operatingsystem => 'Redhat', + :osfamily => 'Redhat' } + end + + let :platform_params do + { 'swift-account-server' => 'openstack-swift-account', + 'swift-account-replicator' => 'openstack-swift-account-replicator', + 'swift-container-server' => 'openstack-swift-container', + 'swift-container-replicator' => 'openstack-swift-container-replicator', + 'swift-object-server' => 'openstack-swift-object', + 'swift-object-replicator' => 'openstack-swift-object-replicator' + } + end + + it_configures 'swift-storage-generic' + + context 'on Redhat platforms using swiftinit service provider' do + before do + params.merge!(:service_provider => 'swiftinit') + end + + let :platform_params do + { 'swift-account-server' => 'swift-account-server', + 'swift-account-replicator' => 'swift-account-replicator', + 'swift-container-server' => 'swift-container-server', + 'swift-container-replicator' => 'swift-container-replicator', + 'swift-object-server' => 'swift-object-server', + 'swift-object-replicator' => 'swift-object-replicator', + 'service_provider' => 'swiftinit' + } + end + + it_configures 'swift-storage-generic' end end end diff --git a/swift/spec/defines/swift_storage_server_spec.rb b/swift/spec/defines/swift_storage_server_spec.rb index c89a9b580..d3be56cf4 100644 --- a/swift/spec/defines/swift_storage_server_spec.rb +++ b/swift/spec/defines/swift_storage_server_spec.rb @@ -53,13 +53,13 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" end it { is_expected.to contain_package("swift-#{t}").with_ensure('present') } - it { is_expected.to contain_service("swift-#{t}").with( - :ensure => 'running', - :enable => true, - :hasstatus => true + it { is_expected.to contain_service("swift-#{t}-server").with( + :ensure => 'running', + :enable => true, + :hasstatus => true, )} let :fragment_file do - "/var/lib/puppet/concat/_etc_swift_#{t}-server_#{title}.conf/fragments/00_swift-#{t}-#{title}" + "/var/lib/puppet/concat/_etc_swift_#{t}-server.conf/fragments/00_swift-#{t}-#{title}" end describe 'when parameters are overridden' do @@ -141,7 +141,7 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' } swift_#{t}_config { 'foo/bar': value => 'foo' } " end - it { is_expected.to contain_concat("/etc/swift/#{t}-server/#{title}.conf").that_comes_before("Swift_#{t}_config[foo/bar]") } + it { is_expected.to contain_concat("/etc/swift/#{t}-server.conf").that_comes_before("Swift_#{t}_config[foo/bar]") } end describe "when log_requests is turned off" do let :params do req_params.merge({:log_requests => false}) end diff --git a/swift/spec/unit/puppet/provider/service/swiftinit_spec.rb b/swift/spec/unit/puppet/provider/service/swiftinit_spec.rb new file mode 100644 index 000000000..535652460 --- /dev/null +++ b/swift/spec/unit/puppet/provider/service/swiftinit_spec.rb @@ -0,0 +1,71 @@ +#! /usr/bin/env ruby +## +## Unit testing for the swiftinit service provider +## + +require 'spec_helper' + +provider_class = Puppet::Type.type(:service).provider(:swiftinit) + +describe provider_class do + + + before(:each) do + # Create a mock resource + @resource = stub 'resource' + + @provider = provider_class.new + + # A catch all; no parameters set + @resource.stubs(:[]).returns(nil) + + # But set name, source and path + @resource.stubs(:[]).with(:name).returns "swift-object-server" + @resource.stubs(:[]).with(:ensure).returns :enable + @resource.stubs(:[]).with(:pattern).returns "swift-object" + @resource.stubs(:[]).with(:manifest).returns "object-server" + @resource.stubs(:ref).returns "Service[myservice]" + + @provider.resource = @resource + + @provider.stubs(:command).with(:systemctl_run).returns "systemctl_run" + + @provider.stubs(:systemctl_run) + + end + + it "should have an status method" do + expect(@provider).to respond_to(:status) + end + + it "should have an start method" do + expect(@provider).to respond_to(:start) + end + + it "should have an stop method" do + expect(@provider).to respond_to(:stop) + end + + it "should have an restart method" do + expect(@provider).to respond_to(:restart) + end + + it "should have an refresh method" do + expect(@provider).to respond_to(:refresh) + end + + it "should have an enabled? method" do + expect(@provider).to respond_to(:enabled?) + end + + it "should have an enable method" do + expect(@provider).to respond_to(:enable) + end + + it "should have a disable method" do + expect(@provider).to respond_to(:disable) + end + +end + +##### TODO figure out how to stub out files and test each method more. diff --git a/swift/spec/unit/puppet/provider/swift_ring_builder_spec.rb b/swift/spec/unit/puppet/provider/swift_ring_builder_spec.rb index 077201841..cad198e9c 100644 --- a/swift/spec/unit/puppet/provider/swift_ring_builder_spec.rb +++ b/swift/spec/unit/puppet/provider/swift_ring_builder_spec.rb @@ -11,6 +11,45 @@ '/etc/swift/account.builder' end + it 'should be able to lookup the local ring and build an object 2.2.2+' do + File.expects(:exists?).with(builder_file_path).returns(true) + provider_class.expects(:builder_file_path).twice.returns(builder_file_path) + # Swift 1.8 output + provider_class.expects(:swift_ring_builder).returns( +'/etc/swift/account.builder, build version 3 +262144 partitions, 3 replicas, 3 zones, 3 devices, 0.00 balance +The minimum number of hours before a partition can be reassigned is 1 +The overload factor is 0.00% (0.000000) +Devices: id region zone ip address port replication ip replication port name weight partitions balance meta + 1 1 1 192.168.101.13 6002 192.168.101.13 6002 1 1.00 262144 0.00 + 2 1 2 192.168.101.14 6002 192.168.101.14 6002 1 1.00 262144 200.00 m2 + 0 1 3 192.168.101.15 6002 192.168.101.15 6002 1 1.00 262144-100.00 m2 + 3 1 1 192.168.101.16 6002 192.168.101.16 6002 1 1.00 262144-100.00 +' + ) + resources = provider_class.lookup_ring + expect(resources['192.168.101.13:6002/1']).to_not be_nil + expect(resources['192.168.101.14:6002/1']).to_not be_nil + expect(resources['192.168.101.15:6002/1']).to_not be_nil + expect(resources['192.168.101.16:6002/1']).to_not be_nil + + expect(resources['192.168.101.13:6002/1'][:id]).to eql '1' + expect(resources['192.168.101.13:6002/1'][:region]).to eql '1' + expect(resources['192.168.101.13:6002/1'][:zone]).to eql '1' + expect(resources['192.168.101.13:6002/1'][:weight]).to eql '1.00' + expect(resources['192.168.101.13:6002/1'][:partitions]).to eql '262144' + expect(resources['192.168.101.13:6002/1'][:balance]).to eql '0.00' + expect(resources['192.168.101.13:6002/1'][:meta]).to eql '' + + expect(resources['192.168.101.14:6002/1'][:id]).to eql '2' + expect(resources['192.168.101.14:6002/1'][:region]).to eql '1' + expect(resources['192.168.101.14:6002/1'][:zone]).to eql '2' + expect(resources['192.168.101.14:6002/1'][:weight]).to eql '1.00' + expect(resources['192.168.101.14:6002/1'][:partitions]).to eql '262144' + expect(resources['192.168.101.14:6002/1'][:balance]).to eql '200.00' + expect(resources['192.168.101.14:6002/1'][:meta]).to eql 'm2' + end + it 'should be able to lookup the local ring and build an object 1.8+' do File.expects(:exists?).with(builder_file_path).returns(true) provider_class.expects(:builder_file_path).twice.returns(builder_file_path) diff --git a/swift/templates/proxy/ceilometer.conf.erb b/swift/templates/proxy/ceilometer.conf.erb index 73a7d03d8..28fca3e5e 100644 --- a/swift/templates/proxy/ceilometer.conf.erb +++ b/swift/templates/proxy/ceilometer.conf.erb @@ -1,3 +1,18 @@ [filter:ceilometer] -use = egg:ceilometer#swift +paste.filter_factory = ceilometermiddleware.swift:filter_factory +<% if @rabbit_hosts.nil? %> +url = rabbit://<%= @rabbit_user %>:<%= @rabbit_password %>@<%= @rabbit_host %>:<%= @rabbit_port %>/<%= @rabbit_virtual_host %> +<% else %> +<% hosts=Array(@rabbit_hosts).join(",") -%> +url = rabbit://<%= @rabbit_user %>:<%= @rabbit_password %>@<%= hosts %>/<%= @rabbit_virtual_host %> +<% end %> +<% if @driver %> +driver = <%= @driver %> +<% end %> +<% if @topic %> +topic = <%= @topic %> +<% end %> +<% if @control_exchange %> +control_exchange = <%= @control_exchange %> +<% end %> diff --git a/swift/tests/site.pp b/swift/tests/site.pp index fdc498688..38fcce633 100644 --- a/swift/tests/site.pp +++ b/swift/tests/site.pp @@ -33,6 +33,24 @@ #$swift_local_net_ip = $ipaddress_eth0 $swift_local_net_ip = hiera('swift_local_net_ip', $ipaddress_eth0) +# Swift storage configurations +$rings = [ + 'account', + 'object', + 'container'] +$account_pipeline = [ + 'healthcheck', + 'recon', + 'account-server'] +$container_pipeline = [ + 'healthcheck', + 'recon', + 'container-server'] +$object_pipeline = [ + 'healthcheck', + 'recon', + 'object-server'] + #$swift_keystone_node = '172.16.0.21' $swift_keystone_node = hiera('swift_keystone_node', '172.16.0.25') #$swift_proxy_node = '172.168.0.25' @@ -121,9 +139,16 @@ require => Class['swift'], } + # configure account/container/object server middlewares + swift::storage::filter::recon { $rings: } + swift::storage::filter::healthcheck { $rings: } + # install all swift storage servers together class { '::swift::storage::all': storage_local_net_ip => $swift_local_net_ip, + object_pipeline => $object_pipeline, + container_pipeline => $container_pipeline, + account_pipeline => $account_pipeline, } # specify endpoints per device to be added to the ring specification diff --git a/tempest/CHANGELOG.md b/tempest/CHANGELOG.md index 84391c6bc..2cfb78a69 100644 --- a/tempest/CHANGELOG.md +++ b/tempest/CHANGELOG.md @@ -1,3 +1,27 @@ +##2015-11-25 - 7.0.0 +###Summary + +This is a backwards-compatible major release for OpenStack Liberty. + +####Features +- allow to enable/disable Aodh service +- allow to enable/disable Trove service +- allow to enable/disable Sahara service +- add dashboard parameters +- add basic scenarios options +- allow to optionally git clone tempest +- reflect provider change in puppet-openstacklib + +####Bugfixes +- make sure neutron network is created before Tempest_neutron_net_id_setter +- glance_id_setter: execute after creating Glance image +- fix ocnfiguration for glance/neutron setters + +####Maintenance +- initial msync run for all Puppet OpenStack modules +- try to use zuul-cloner to prepare fixtures +- remove class_parameter_defaults puppet-lint check + ##2015-10-10 - 6.1.0 ###Summary diff --git a/tempest/README.markdown b/tempest/README.markdown index 7d2ad4727..42f968f00 100644 --- a/tempest/README.markdown +++ b/tempest/README.markdown @@ -1,7 +1,7 @@ Tempest ======= -6.1.0 - 2015.1 - Kilo +7.0.0 - 2015.2 - Liberty Module for installing and configuring tempest. diff --git a/tempest/lib/puppet/provider/tempest.rb b/tempest/lib/puppet/provider/tempest.rb new file mode 100644 index 000000000..97f7cb619 --- /dev/null +++ b/tempest/lib/puppet/provider/tempest.rb @@ -0,0 +1,44 @@ +require 'puppet/util/inifile' +require 'puppet/provider/openstack' +require 'puppet/provider/openstack/auth' +require 'puppet/provider/openstack/credentials' +class Puppet::Provider::Tempest < Puppet::Provider::Openstack + + extend Puppet::Provider::Openstack::Auth + + def self.tempest_file + return @tempest_file if @tempest_file + @tempest_file = Puppet::Util::IniConfig::File.new + @tempest_file.read(@file_path) + @tempest_file + end + + def self.request(service, action, properties=[], file_path) + @file_path = file_path + begin + super(service, action, properties) + rescue Puppet::Error::OpenstackAuthInputError => error + tempest_request(service, action, error, properties) + end + end + + def self.tempest_request(service, action, error, properties=nil) + @credentials.username = tempest_credentials['admin_user'] + @credentials.password = tempest_credentials['admin_password'] + @credentials.project_name = tempest_credentials['admin_tenant_name'] + @credentials.auth_url = tempest_credentials['auth_endpoint'] + raise error unless @credentials.set? + Puppet::Provider::Openstack.request(service, action, properties, @credentials) + end + + def self.tempest_credentials + t = {} + t['admin_user'] = tempest_file['identity']['admin_username'] + t['admin_password'] = tempest_file['identity']['admin_password'] + t['admin_tenant_name'] = tempest_file['identity']['admin_tenant_name'] + t['auth_endpoint'] = tempest_file['identity']['uri'] + return t + end + + +end diff --git a/tempest/lib/puppet/provider/tempest_glance_id_setter/ruby.rb b/tempest/lib/puppet/provider/tempest_glance_id_setter/openstack.rb similarity index 53% rename from tempest/lib/puppet/provider/tempest_glance_id_setter/ruby.rb rename to tempest/lib/puppet/provider/tempest_glance_id_setter/openstack.rb index dc15915d8..bc43360e8 100644 --- a/tempest/lib/puppet/provider/tempest_glance_id_setter/ruby.rb +++ b/tempest/lib/puppet/provider/tempest_glance_id_setter/openstack.rb @@ -1,6 +1,11 @@ -Puppet::Type.type(:tempest_glance_id_setter).provide(:ruby) do +require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/tempest') - # almost entirely lifted from stdlib's file_line +Puppet::Type.type(:tempest_glance_id_setter).provide( + :openstack, + :parent => Puppet::Provider::Tempest +) do + + @credentials = Puppet::Provider::Openstack::CredentialsV2_0.new def exists? lines.find do |line| @@ -8,13 +13,32 @@ def exists? end end + def file_path + resource[:tempest_conf_path] + end + def create handle_create_with_match end + def destroy + handle_create_with_match + end + def get_image_id - @image_id ||= Puppet::Resource.indirection.find("Glance_image/#{resource[:image_name]}")[:id] - @image_id if @image_id != :absent + if resource[:ensure] == :present or resource[:ensure].nil? + if @image_id.nil? + images = self.class.request('image', 'list', file_path) + img = images.detect {|img| img[:name] == resource[:image_name]} + if img.nil? + raise(Puppet::Error, "Image #{resource[:image_name]} not found!") + end + @image_id = img[:id] + end + elsif resource[:ensure] != :absent + raise(Puppet::Error, "Cannot ensure to #{resource[:ensure]}") + end + @image_id end def should_line @@ -32,7 +56,7 @@ def handle_create_with_match() file = lines case match_count when 1 - File.open(resource[:tempest_conf_path], 'w') do |fh| + File.open(file_path, 'w') do |fh| lines.each do |l| fh.puts(regex.match(l) ? "#{should_line}" : l) end @@ -44,10 +68,10 @@ def handle_create_with_match() else file.insert(block_pos+1, "#{should_line}\n") end - File.write(resource[:tempest_conf_path], file.join) + File.write(file_path, file.join) else # cannot be negative. raise Puppet::Error, "More than one line in file \ -'#{resource[:tempest_conf_path]}' matches pattern '#{regex}'" +'#{file_path}' matches pattern '#{regex}'" end end @@ -58,7 +82,7 @@ def lines # file; for now assuming that this type is only used on # small-ish config files that can fit into memory without # too much trouble. - @lines ||= File.readlines(resource[:tempest_conf_path]) + @lines ||= File.readlines(file_path) end end diff --git a/tempest/lib/puppet/provider/tempest_neutron_net_id_setter/ruby.rb b/tempest/lib/puppet/provider/tempest_neutron_net_id_setter/openstack.rb similarity index 53% rename from tempest/lib/puppet/provider/tempest_neutron_net_id_setter/ruby.rb rename to tempest/lib/puppet/provider/tempest_neutron_net_id_setter/openstack.rb index 2c605d346..1a60133d5 100644 --- a/tempest/lib/puppet/provider/tempest_neutron_net_id_setter/ruby.rb +++ b/tempest/lib/puppet/provider/tempest_neutron_net_id_setter/openstack.rb @@ -1,6 +1,11 @@ -Puppet::Type.type(:tempest_neutron_net_id_setter).provide(:ruby) do +require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/tempest') - # almost entirely lifted from stdlib's file_line +Puppet::Type.type(:tempest_neutron_net_id_setter).provide( + :openstack, + :parent => Puppet::Provider::Tempest +) do + + @credentials = Puppet::Provider::Openstack::CredentialsV2_0.new def exists? lines.find do |line| @@ -8,13 +13,32 @@ def exists? end end + def file_path + resource[:tempest_conf_path] + end + def create handle_create_with_match end + def destroy + handle_create_with_match + end + def get_network_id - @network_id ||= Puppet::Resource.indirection.find("Neutron_network/#{@resource[:network_name]}")[:id] - @network_id if @network_id != :absent + if resource[:ensure] == :present or resource[:ensure].nil? + if @network_id.nil? + nets = self.class.request('network', 'list', file_path) + net = nets.detect {|img| img[:name] == resource[:network_name]} + if net.nil? + raise(Puppet::Error, "Network #{resource[:network_name]} not found!") + end + @network_id = net[:id] + end + elsif resource[:ensure] != :absent + raise(Puppet::Error, "Cannot ensure to #{resource[:ensure]}") + end + @network_id end def should_line @@ -31,7 +55,7 @@ def handle_create_with_match() file = lines case match_count when 1 - File.open(resource[:tempest_conf_path], 'w') do |fh| + File.open(file_path, 'w') do |fh| lines.each do |l| fh.puts(regex.match(l) ? should_line : l) end @@ -43,10 +67,10 @@ def handle_create_with_match() else file.insert(block_pos+1, "#{should_line}\n") end - File.write(resource[:tempest_conf_path], file.join) + File.write(file_path, file.join) else # cannot be negative. raise Puppet::Error, "More than one line in file \ -'#{resource[:tempest_conf_path]}' matches pattern '#{regex}'" +'#{file_path}' matches pattern '#{regex}'" end end @@ -57,7 +81,7 @@ def lines # file; for now assuming that this type is only used on # small-ish config files that can fit into memory without # too much trouble. - @lines ||= File.readlines(resource[:tempest_conf_path]) + @lines ||= File.readlines(file_path) end end diff --git a/tempest/manifests/init.pp b/tempest/manifests/init.pp index bbeeded02..7b8341319 100644 --- a/tempest/manifests/init.pp +++ b/tempest/manifests/init.pp @@ -111,6 +111,8 @@ # Defaults to false # [*nova_available*] # Defaults to true +# [*murano_available*] +# Defaults to false # [*sahara_available*] # Defaults to false # [*swift_available*] @@ -212,6 +214,7 @@ $horizon_available = true, $neutron_available = false, $nova_available = true, + $murano_available = false, $sahara_available = false, $swift_available = false, $trove_available = false, @@ -322,6 +325,7 @@ 'service_available/horizon': value => $horizon_available; 'service_available/neutron': value => $neutron_available; 'service_available/nova': value => $nova_available; + 'service_available/murano': value => $murano_available; 'service_available/sahara': value => $sahara_available; 'service_available/swift': value => $swift_available; 'service_available/trove': value => $trove_available; @@ -348,6 +352,7 @@ } Glance_image<||> -> Tempest_glance_id_setter['image_ref'] Tempest_config<||> -> Tempest_glance_id_setter['image_ref'] + Keystone_user_role<||> -> Tempest_glance_id_setter['image_ref'] } elsif ($image_name and $image_ref) or (! $image_name and ! $image_ref) { fail('A value for either image_name or image_ref must be provided.') } @@ -359,6 +364,7 @@ } Glance_image<||> -> Tempest_glance_id_setter['image_ref_alt'] Tempest_config<||> -> Tempest_glance_id_setter['image_ref_alt'] + Keystone_user_role<||> -> Tempest_glance_id_setter['image_ref_alt'] } elsif ($image_name_alt and $image_ref_alt) or (! $image_name_alt and ! $image_ref_alt) { fail('A value for either image_name_alt or image_ref_alt must \ be provided.') @@ -374,6 +380,7 @@ } Neutron_network<||> -> Tempest_neutron_net_id_setter['public_network_id'] Tempest_config<||> -> Tempest_neutron_net_id_setter['public_network_id'] + Keystone_user_role<||> -> Tempest_neutron_net_id_setter['public_network_id'] } elsif ($public_network_name and $public_network_id) or (! $public_network_name and ! $public_network_id) { fail('A value for either public_network_id or public_network_name \ must be provided.') diff --git a/tempest/metadata.json b/tempest/metadata.json index 3a306790c..54f518936 100644 --- a/tempest/metadata.json +++ b/tempest/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-tempest", - "version": "6.1.0", + "version": "7.0.0", "author": "OpenStack Contributors", "summary": "Puppet module for OpenStack Tempest", "license": "Apache-2.0", @@ -33,6 +33,7 @@ "dependencies": [ { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "puppetlabs/vcsrepo", "version_requirement": ">=0.1.2 <2.0.0"} + { "name": "puppetlabs/vcsrepo", "version_requirement": ">=0.1.2 <2.0.0"}, + { "name": "openstack/openstacklib", "version_requirement": ">= 7.0.0 <8.0.0" } ] } diff --git a/tempest/spec/classes/tempest_spec.rb b/tempest/spec/classes/tempest_spec.rb index b8c6ecc3f..e4853859d 100644 --- a/tempest/spec/classes/tempest_spec.rb +++ b/tempest/spec/classes/tempest_spec.rb @@ -190,6 +190,7 @@ class { 'neutron': rabbit_password => 'passw0rd' }" is_expected.to contain_tempest_config('service_available/neutron').with(:value => true) is_expected.to contain_tempest_config('service_available/nova').with(:value => true) is_expected.to contain_tempest_config('service_available/sahara').with(:value => false) + is_expected.to contain_tempest_config('service_available/murano').with(:value => false) is_expected.to contain_tempest_config('service_available/swift').with(:value => false) is_expected.to contain_tempest_config('service_available/trove').with(:value => false) is_expected.to contain_tempest_config('whitebox/db_uri').with(:value => nil) diff --git a/tempest/spec/unit/puppet/provider/ruby_spec.rb b/tempest/spec/unit/puppet/provider/ruby_spec.rb index dbf8ae979..35444beca 100644 --- a/tempest/spec/unit/puppet/provider/ruby_spec.rb +++ b/tempest/spec/unit/puppet/provider/ruby_spec.rb @@ -3,9 +3,9 @@ describe 'Providers' do glance_provider_class = - Puppet::Type.type(:tempest_glance_id_setter).provider(:ruby) + Puppet::Type.type(:tempest_glance_id_setter).provider(:openstack) network_provider_class = - Puppet::Type.type(:tempest_neutron_net_id_setter).provider(:ruby) + Puppet::Type.type(:tempest_neutron_net_id_setter).provider(:openstack) include PuppetlabsSpec::Files let(:tmpfile) { tmpfilename('ini_setting_test') } diff --git a/tripleo/.fixtures.yml b/tripleo/.fixtures.yml index e3ab8f9cd..69512da84 100644 --- a/tripleo/.fixtures.yml +++ b/tripleo/.fixtures.yml @@ -2,5 +2,26 @@ fixtures: repositories: 'firewall': 'git://github.com/puppetlabs/puppetlabs-firewall.git' 'stdlib': 'git://github.com/puppetlabs/puppetlabs-stdlib.git' + 'midonet': + repo: 'git://github.com/midonet/puppet-midonet.git' + ref: 'v2015.06.7' + 'tomcat': + repo: 'git://github.com/puppetlabs/puppetlabs-tomcat.git' + ref: '1.3.2' + 'inifile': + repo: 'git://github.com/puppetlabs/puppetlabs-inifile.git' + ref: '1.4.2' + 'cassandra': + repo: 'git://github.com/locp/cassandra.git' + ref: '1.9.2' + 'zookeeper': + repo: 'git://github.com/deric/puppet-zookeeper.git' + ref: 'v0.3.9' + 'datacat': + repo: 'git://github.com/richardc/puppet-datacat' + ref: '0.6.2' + 'java': + repo: 'git://github.com/puppetlabs/puppetlabs-java' + ref: '1.4.2' symlinks: "tripleo": "#{source_dir}" diff --git a/tripleo/Gemfile b/tripleo/Gemfile index 6d4ce9a07..91c5f0a3e 100644 --- a/tripleo/Gemfile +++ b/tripleo/Gemfile @@ -14,6 +14,10 @@ group :development, :test do gem 'puppet-lint-numericvariable', :require => 'false' gem 'json', :require => 'false' gem 'webmock', :require => 'false' + # adding 'psych' explicitly + # https://github.com/bundler/bundler/issues/2068 + # TODO: drop it in a future release of 'bundle'. + gem 'psych', :require => 'false' end group :system_tests do diff --git a/tripleo/lib/puppet/parser/functions/extract_id.rb b/tripleo/lib/puppet/parser/functions/extract_id.rb new file mode 100644 index 000000000..61734abfa --- /dev/null +++ b/tripleo/lib/puppet/parser/functions/extract_id.rb @@ -0,0 +1,14 @@ +# Custom function to extract the index from a list. +# The list are a list of hostname, and the index is the n'th +# position of the host in list +module Puppet::Parser::Functions + newfunction(:extract_id, :type => :rvalue) do |argv| + hosts = argv[0] + if hosts.class != Array + hosts = [hosts] + end + hostname = argv[1] + hash = Hash[hosts.map.with_index.to_a] + return hash[hostname].to_i + 1 + end +end diff --git a/tripleo/lib/puppet/parser/functions/list_to_zookeeper_hash.rb b/tripleo/lib/puppet/parser/functions/list_to_zookeeper_hash.rb new file mode 100644 index 000000000..814326e84 --- /dev/null +++ b/tripleo/lib/puppet/parser/functions/list_to_zookeeper_hash.rb @@ -0,0 +1,24 @@ +# Custom function to convert a list of ips to a map +# like {'ip' => xxx.xxx.xxx.xxx }. This function is needed +# because a not-so-good design of the puppet-midonet module +# and we hope to deprecate it soon. + +module Puppet::Parser::Functions + newfunction(:list_to_zookeeper_hash, :type => :rvalue, :doc => <<-EOS + This function returns Zookeper configuration list of hash + EOS + ) do |argv| + zk_list = argv[0] + if zk_list.class != Array + zk_list = [zk_list] + end + result = Array.new + zk_list.each do |zk_ip| + zk_map = Hash.new + zk_map['ip'] = zk_ip + zk_map['port'] = 2181 + result.push(zk_map) + end + return result + end +end diff --git a/tripleo/manifests/cluster/cassandra.pp b/tripleo/manifests/cluster/cassandra.pp new file mode 100644 index 000000000..b20926bed --- /dev/null +++ b/tripleo/manifests/cluster/cassandra.pp @@ -0,0 +1,73 @@ +# +# Copyright (C) 2015 Midokura SARL +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# == Class: tripleo::cluster::cassandra +# +# Deploys a cassandra service that belongs to a cluster. Uses puppet-cassandra +# +# == Parameters: +# +# [*cassandra_servers*] +# (required) All the IP addresses of the cassandra cluster. +# Array of strings value. +# +# [*cassandra_ip*] +# (required) IP address of the current host. +# String value +# +# [*storage_port*] +# (optional) Inter-node cluster communication port. +# Defaults to 7000. +# +# [*ssl_storage_port*] +# (optional) SSL Inter-node cluster communication port. +# Defaults to 7001. +# +# [*client_port*] +# (optional) Cassandra client port. +# Defaults to 9042. +# +# [*client_port_thrift*] +# (optional) Cassandra client port thrift. +# Defaults to 9160. +# +class tripleo::cluster::cassandra( + $cassandra_servers, + $cassandra_ip, + $storage_port = '7000', + $ssl_storage_port = '7001', + $client_port = '9042', + $client_port_thrift = '9160' +) +{ + + # TODO: Remove this comment once we can guarantee that all the distros + # deploying TripleO use Puppet > 3.7 because of this bug: + # https://tickets.puppetlabs.com/browse/PUP-1299 + # + # validate_array($cassandra_servers) + validate_ipv4_address($cassandra_ip) + + class {'::cassandra': + cluster_name => 'TripleO', + seeds => $cassandra_servers, + listen_address => $cassandra_ip, + storage_port => $storage_port, + ssl_storage_port => $ssl_storage_port, + native_transport_port => $client_port, + rpc_port => $client_port_thrift + } + +} diff --git a/tripleo/manifests/cluster/zookeeper.pp b/tripleo/manifests/cluster/zookeeper.pp new file mode 100644 index 000000000..82d21eefe --- /dev/null +++ b/tripleo/manifests/cluster/zookeeper.pp @@ -0,0 +1,69 @@ +# +# Copyright (C) 2015 Midokura SARL +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# == Class: tripleo::cluster::zookeeper +# +# Deploys a zookeeper service that belongs to a cluster. Uses deric-zookeeper +# +# == Parameters: +# +# [*zookeeper_server_ips*] +# (required) List of IP addresses of the zookeeper cluster. +# Arrays of strings value. +# +# [*zookeeper_client_ip*] +# (required) IP address of the host where zookeeper will listen IP addresses. +# String (IPv4) value. +# +# [*zookeeper_hostnames*] +# (required) List of hostnames of the zookeeper cluster. The hostname of the +# node will be used to define the ID of the zookeeper configuration +# Array of strings value. +# + +class tripleo::cluster::zookeeper( + $zookeeper_server_ips, + $zookeeper_client_ip, + $zookeeper_hostnames +) +{ + # TODO: Remove comments below once we can guarantee that all the distros + # deploying TripleO use Puppet > 3.7 because of this bug: + # https://tickets.puppetlabs.com/browse/PUP-1299 + + # validate_array($zookeeper_server_ips) + validate_ipv4_address($zookeeper_client_ip) + # validate_array($zookeeper_hostnames) + + # TODO(devvesa) Zookeeper package should provide these paths, + # remove this lines as soon as it will. + file {['/usr/lib', '/usr/lib/zookeeper', '/usr/lib/zookeeper/bin/']: + ensure => directory + } + + file {'/usr/lib/zookeeper/bin/zkEnv.sh': + ensure => link, + target => '/usr/libexec/zkEnv.sh' + } + + class {'::zookeeper': + servers => $zookeeper_server_ips, + client_ip => $zookeeper_client_ip, + id => extract_id($zookeeper_hostnames, $::hostname), + cfg_dir => '/etc/zookeeper/conf', + } + + File['/usr/lib/zookeeper/bin/zkEnv.sh'] -> Class['::zookeeper'] +} diff --git a/tripleo/manifests/loadbalancer.pp b/tripleo/manifests/loadbalancer.pp index 16a4f80a9..6306f6114 100644 --- a/tripleo/manifests/loadbalancer.pp +++ b/tripleo/manifests/loadbalancer.pp @@ -130,6 +130,11 @@ # When set, enables SSL on the Ceilometer public API endpoint using the specified file. # Defaults to undef # +# [*aodh_certificate*] +# Filename of an HAProxy-compatible certificate and key file +# When set, enables SSL on the Aodh public API endpoint using the specified file. +# Defaults to undef +# # [*swift_certificate*] # Filename of an HAProxy-compatible certificate and key file # When set, enables SSL on the Swift public API endpoint using the specified file. @@ -198,6 +203,10 @@ # (optional) Enable or not Ceilometer API binding # Defaults to false # +# [*aodh*] +# (optional) Enable or not Aodh API binding +# Defaults to false +# # [*swift_proxy_server*] # (optional) Enable or not Swift API binding # Defaults to false @@ -238,6 +247,10 @@ # (optional) Enable or not Redis binding # Defaults to false # +# [*midonet_api*] +# (optional) Enable or not MidoNet API binding +# Defaults to false +# class tripleo::loadbalancer ( $controller_virtual_ip, $control_virtual_interface, @@ -262,6 +275,7 @@ $glance_certificate = undef, $nova_certificate = undef, $ceilometer_certificate = undef, + $aodh_certificate = undef, $swift_certificate = undef, $heat_certificate = undef, $horizon_certificate = undef, @@ -278,6 +292,7 @@ $nova_metadata = false, $nova_novncproxy = false, $ceilometer = false, + $aodh = false, $swift_proxy_server = false, $heat_api = false, $heat_cloudwatch = false, @@ -288,6 +303,7 @@ $mysql_clustercheck = false, $rabbitmq = false, $redis = false, + $midonet_api = false, ) { if !$controller_host and !$controller_hosts { @@ -421,6 +437,11 @@ } else { $ceilometer_bind_certificate = $service_certificate } + if $aodh_certificate { + $aodh_bind_certificate = $aodh_certificate + } else { + $aodh_bind_certificate = $service_certificate + } if $swift_certificate { $swift_bind_certificate = $swift_certificate } else { @@ -558,6 +579,19 @@ } } + $aodh_api_vip = hiera('aodh_api_vip', $controller_virtual_ip) + if $aodh_bind_certificate { + $aodh_bind_opts = { + "${aodh_api_vip}:8042" => [], + "${public_virtual_ip}:13042" => ['ssl', 'crt', $aodh_bind_certificate], + } + } else { + $aodh_bind_opts = { + "${aodh_api_vip}:8042" => [], + "${public_virtual_ip}:8042" => [], + } + } + $swift_proxy_vip = hiera('swift_proxy_vip', $controller_virtual_ip) if $swift_bind_certificate { $swift_bind_opts = { @@ -814,6 +848,7 @@ bind => $nova_novnc_bind_opts, options => { 'balance' => 'source', + 'timeout' => [ 'tunnel 1h' ], }, collect_exported => false, } @@ -840,6 +875,20 @@ } } + if $aodh { + haproxy::listen { 'aodh': + bind => $aodh_bind_opts, + collect_exported => false, + } + haproxy::balancermember { 'aodh': + listening_service => 'aodh', + ports => '8042', + ipaddresses => hiera('aodh_api_node_ips', $controller_hosts_real), + server_names => $controller_hosts_names_real, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } + if $swift_proxy_server { haproxy::listen { 'swift_proxy_server': bind => $swift_bind_opts, @@ -1001,4 +1050,23 @@ } } + $midonet_api_vip = hiera('midonet_api_vip', $controller_virtual_ip) + $midonet_bind_opts = { + "${midonet_api_vip}:8081" => [], + "${public_virtual_ip}:8081" => [], + } + + if $midonet_api { + haproxy::listen { 'midonet_api': + bind => $midonet_bind_opts, + collect_exported => false, + } + haproxy::balancermember { 'midonet_api': + listening_service => 'midonet_api', + ports => '8081', + ipaddresses => hiera('midonet_api_node_ips', $controller_hosts_real), + server_names => $controller_hosts_names_real, + options => ['check', 'inter 2000', 'rise 2', 'fall 5'], + } + } } diff --git a/tripleo/manifests/network/midonet/agent.pp b/tripleo/manifests/network/midonet/agent.pp new file mode 100644 index 000000000..0e6528255 --- /dev/null +++ b/tripleo/manifests/network/midonet/agent.pp @@ -0,0 +1,66 @@ +# +# Copyright (C) 2015 Midokura SARL +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# == Class: tripleo::network::midonet::agent +# +# Configure the midonet agent +# +# == Parameters: +# +# [*zookeeper_servers*] +# (required) List of IPs of the zookeeper server cluster. It will configure +# the connection using the 2181 port. +# Array of strings value. +# +# [*cassandra_seeds*] +# (required) List of IPs of the cassandra cluster. +# Array of strings value. +# +class tripleo::network::midonet::agent ( + $zookeeper_servers, + $cassandra_seeds +) { + + # TODO: Remove comments below once we can guarantee that all the distros + # deploying TripleO use Puppet > 3.7 because of this bug: + # https://tickets.puppetlabs.com/browse/PUP-1299 + + # validate_array($zookeeper_servers) + # validate_array($cassandra_seeds) + + + # FIXME: This statement should be controlled by hiera on heat templates + # project + # Make sure openvswitch service is not running + service {'openvswitch': + ensure => stopped, + enable => false + } + + exec {'delete datapaths': + command => '/usr/bin/mm-dpctl --delete-dp ovs-system', + path => '/usr/bin:/usr/sbin:/bin', + onlyif => '/usr/bin/mm-dpctl --show-dp ovs-system' + } + + # Configure and run the agent + class {'::midonet::midonet_agent': + zk_servers => list_to_zookeeper_hash($zookeeper_servers), + cassandra_seeds => $cassandra_seeds + } + + Service['openvswitch'] -> Class['::midonet::midonet_agent::run'] + Exec['delete datapaths'] -> Class['::midonet::midonet_agent::run'] +} diff --git a/tripleo/manifests/network/midonet/api.pp b/tripleo/manifests/network/midonet/api.pp new file mode 100644 index 000000000..83efd2c74 --- /dev/null +++ b/tripleo/manifests/network/midonet/api.pp @@ -0,0 +1,122 @@ +# +# Copyright (C) 2015 Midokura SARL +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# == Class: tripleo::network::midonet::api +# +# Configure the MidoNet API +# +# == Parameters: +# +# [*zookeeper_servers*] +# (required) List IPs of the zookeeper server cluster. Zookeeper is the +# backend database where MidoNet stores the virtual network topology. +# Array of strings value. +# +# [*vip*] +# (required) Public Virtual IP where the API will be exposed. +# String (IPv4) value. +# +# [*keystone_ip*] +# (required) MidoNet API is registered as an OpenStack service. Provide the +# keystone ip address. +# String (IPv4) value. +# +# [*keystone_admin_token*] +# (required) MidoNet API is registered as an OpenStack service. It needs the +# keystone admin token to perform some admin calls. +# String value. +# +# [*bind_address*] +# (required) MidoNet API uses a Tomcat instance to offer the REST service. The +# ip address where to bind the tomcat service. +# String (IPv4) value. +# +# [*admin_password*] +# (required) OpenStack admin user password. +# String value. +# +# [*keystone_port*] +# (optional) MidoNet API is registered as an OpenStack service. Provide +# the keystone port. +# Defaults to 35357 +# +# [*keystone_tenant_name*] +# (optional) Tenant of the keystone service. +# Defaults to 'admin' +# +# [*admin_user_name*] +# (optional) OpenStack admin user name. +# Defaults to 'admin' +# +# [*admin_tenant_name*] +# (optional). OpenStack admin tenant name. +# Defaults to 'admin' +# + +class tripleo::network::midonet::api( + $zookeeper_servers, + $vip, + $keystone_ip, + $keystone_admin_token, + $bind_address, + $admin_password, + $keystone_port = 35357, + $keystone_tenant_name = 'admin', + $admin_user_name = 'admin', + $admin_tenant_name = 'admin' +) +{ + + # TODO: Remove this comment once we can guarantee that all the distros + # deploying TripleO use Puppet > 3.7 because of this bug: + # https://tickets.puppetlabs.com/browse/PUP-1299 + + # validate_array($zookeeper_servers) + validate_ip_address($vip) + validate_ip_address($keystone_ip) + validate_ip_address($bind_address) + + # Run Tomcat and MidoNet API + class {'::tomcat': + install_from_source => false + } -> + + package {'midonet-api': + ensure => present + } -> + + class {'::midonet::midonet_api::run': + zk_servers => list_to_zookeeper_hash($zookeeper_servers), + keystone_auth => true, + tomcat_package => 'tomcat', + vtep => false, + api_ip => $vip, + api_port => '8081', + keystone_host => $keystone_ip, + keystone_port => $keystone_port, + keystone_admin_token => $keystone_admin_token, + keystone_tenant_name => $keystone_tenant_name, + catalina_base => '/usr/share/tomcat', + bind_address => $bind_address + } + + # Configure the CLI + class {'::midonet::midonet_cli': + api_endpoint => "http://${vip}:8081/midonet-api", + username => $admin_user_name, + password => $admin_password, + tenant_name => $admin_tenant_name + } +} diff --git a/tripleo/spec/classes/tripleo_cluster_cassandra_spec.rb b/tripleo/spec/classes/tripleo_cluster_cassandra_spec.rb new file mode 100644 index 000000000..13be98ecb --- /dev/null +++ b/tripleo/spec/classes/tripleo_cluster_cassandra_spec.rb @@ -0,0 +1,54 @@ +# +# Copyright (C) 2015 Midokura SARL +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Unit tests for the cassandra service + +require 'spec_helper' + +describe 'tripleo::cluster::cassandra' do + + shared_examples_for 'cassandra cluster service' do + + let :facts do + { + :hostname => 'host1.midonet', + :osfamily => 'RedHat', + :operatingsystemmajrelease => 7, + } + end + + let :params do + { + :cassandra_servers => ['192.168.2.2', '192.168.2.3'], + :cassandra_ip => '192.168.2.2' + } + end + + it 'should configure cassandra' do + is_expected.to contain_class('cassandra').with( + :seeds => ['192.168.2.2', '192.168.2.3'], + :listen_address => '192.168.2.2', + :storage_port => 7000, + :ssl_storage_port => 7001, + :native_transport_port => 9042, + :rpc_port => 9160 + ) + + end + end + + it_configures 'cassandra cluster service' + +end diff --git a/tripleo/spec/classes/tripleo_cluster_zookeeper_spec.rb b/tripleo/spec/classes/tripleo_cluster_zookeeper_spec.rb new file mode 100644 index 000000000..ed46164a4 --- /dev/null +++ b/tripleo/spec/classes/tripleo_cluster_zookeeper_spec.rb @@ -0,0 +1,115 @@ +# +# Copyright (C) 2015 Midokura SARL +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Unit tests for the zookeeper service + +require 'spec_helper' + +describe 'tripleo::cluster::zookeeper' do + + let :default_params do + { + :zookeeper_server_ips => ['23.43.2.34', '23.43.2.35', '24.43.2.36'], + :zookeeper_hostnames => ['host1.midonet', 'host2.midonet', 'host3.midonet'] + } + end + + context 'on host1' do + let :facts do + { + :hostname => 'host1.midonet', + :osfamily => 'RedHat', + :operatingsystemmajrelease => 7, + } + end + + let :params do + { + :zookeeper_client_ip => '23.43.2.34' + } + end + + before do + params.merge!(default_params) + end + + it 'should call zookeeper using id==1' do + is_expected.to contain_class('zookeeper').with( + :servers => ['23.43.2.34', '23.43.2.35', '24.43.2.36'], + :client_ip => '23.43.2.34', + :id => 1 + ) + end + + end + + context 'on host2' do + let :facts do + { + :hostname => 'host2.midonet', + :osfamily => 'RedHat', + :operatingsystemmajrelease => 7, + } + end + + let :params do + { + :zookeeper_client_ip => '23.43.2.35' + } + end + + before do + params.merge!(default_params) + end + + it 'should call zookeeper using id==1' do + is_expected.to contain_class('zookeeper').with( + :servers => ['23.43.2.34', '23.43.2.35', '24.43.2.36'], + :client_ip => '23.43.2.35', + :id => 2 + ) + end + end + + context 'on host3' do + let :facts do + { + :hostname => 'host3.midonet', + :osfamily => 'RedHat', + :operatingsystemmajrelease => 7, + } + end + + let :params do + { + :zookeeper_client_ip => '23.43.2.36' + } + end + + before do + params.merge!(default_params) + end + + it 'should call zookeeper using id==1' do + is_expected.to contain_class('zookeeper').with( + :servers => ['23.43.2.34', '23.43.2.35', '24.43.2.36'], + :client_ip => '23.43.2.36', + :id => 3 + ) + end + + end + +end diff --git a/tripleo/spec/classes/tripleo_firewall_spec.rb b/tripleo/spec/classes/tripleo_firewall_spec.rb index c1249b9fa..7d1d1ecb8 100644 --- a/tripleo/spec/classes/tripleo_firewall_spec.rb +++ b/tripleo/spec/classes/tripleo_firewall_spec.rb @@ -105,7 +105,10 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + { + :osfamily => 'RedHat', + :operatingsystemrelease => '7.1', + } end it_configures 'tripleo node' diff --git a/tripleo/spec/classes/tripleo_midonet_agent_spec.rb b/tripleo/spec/classes/tripleo_midonet_agent_spec.rb new file mode 100644 index 000000000..eb3abfe85 --- /dev/null +++ b/tripleo/spec/classes/tripleo_midonet_agent_spec.rb @@ -0,0 +1,62 @@ +# +# Copyright (C) 2015 Midokura SARL +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Unit tests for the midonet agent + +require 'spec_helper' + +describe 'tripleo::network::midonet::agent' do + + let :facts do + { + :hostname => 'host2.midonet', + :osfamily => 'RedHat', + :operatingsystem => 'CentOS', + :operatingsystemrelease => '7.1', + :operatingsystemmajrelease => 7, + } + end + + shared_examples_for 'midonet agent test' do + + let :params do + { + :zookeeper_servers => ['192.168.2.2', '192.168.2.3'], + :cassandra_seeds => ['192.168.2.2', '192.168.2.3'] + } + end + + it 'should stop openvswitch' do + is_expected.to contain_service('openvswitch').with( + :ensure => 'stopped', + :enable => false + ) + end + + it 'should run the agent with a list of maps' do + is_expected.to contain_class('midonet::midonet_agent').with( + :zk_servers => [{'ip' => '192.168.2.2', + 'port' => 2181}, + {'ip' => '192.168.2.3', + 'port' => 2181}], + :cassandra_seeds => ['192.168.2.2','192.168.2.3'] + ) + end + end + + it_configures 'midonet agent test' + + +end diff --git a/tripleo/spec/classes/tripleo_midonet_api_spec.rb b/tripleo/spec/classes/tripleo_midonet_api_spec.rb new file mode 100644 index 000000000..4b4729494 --- /dev/null +++ b/tripleo/spec/classes/tripleo_midonet_api_spec.rb @@ -0,0 +1,72 @@ +# +# Copyright (C) 2015 Midokura SARL +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Unit tests for the midonet api + +require 'spec_helper' + +describe 'tripleo::network::midonet::api' do + + let :facts do + { + :augeasversion => '1.0.0' + } + end + + shared_examples_for 'midonet api test' do + + let :params do + { + :zookeeper_servers => ['192.168.2.1', '192.168.2.2'], + :vip => '192.23.0.2', + :keystone_ip => '192.23.0.2', + :keystone_admin_token => 'admin_token', + :admin_password => 'admin_password', + :bind_address => '192.23.0.65' + } + end + + it 'should call api configuration' do + is_expected.to contain_class('midonet::midonet_api::run').with( + :zk_servers => [{'ip' => '192.168.2.1', 'port' => 2181}, + {'ip' => '192.168.2.2', 'port' => 2181}], + :keystone_auth => true, + :tomcat_package => 'tomcat', + :vtep => false, + :api_ip => '192.23.0.2', + :api_port => '8081', + :keystone_host => '192.23.0.2', + :keystone_port => 35357, + :keystone_admin_token => 'admin_token', + :keystone_tenant_name => 'admin', + :catalina_base => '/usr/share/tomcat', + :bind_address => '192.23.0.65' + ) + end + + it 'should install the cli' do + is_expected.to contain_class('midonet::midonet_cli').with( + :api_endpoint => 'http://192.23.0.2:8081/midonet-api', + :username => 'admin', + :password => 'admin_password', + :tenant_name => 'admin' + ) + end + + end + + it_configures 'midonet api test' + +end diff --git a/tripleo/spec/spec_helper.rb b/tripleo/spec/spec_helper.rb index 700be6a27..5cf9642eb 100644 --- a/tripleo/spec/spec_helper.rb +++ b/tripleo/spec/spec_helper.rb @@ -2,9 +2,13 @@ require 'shared_examples' require 'webmock/rspec' +fixture_path = File.expand_path(File.join(__FILE__, '..', 'fixtures')) + RSpec.configure do |c| c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' + c.module_path = File.join(fixture_path, 'modules') + c.manifest_dir = File.join(fixture_path, 'manifests') c.default_facts = { :kernel => 'Linux', diff --git a/uchiwa/.gitignore b/uchiwa/.gitignore index 5104b2209..ef7293f7c 100755 --- a/uchiwa/.gitignore +++ b/uchiwa/.gitignore @@ -4,3 +4,4 @@ pkg spec/fixtures .rspec_system .vagrant +.bundle diff --git a/uchiwa/CHANGELOG.md b/uchiwa/CHANGELOG.md new file mode 100644 index 000000000..13ae84a72 --- /dev/null +++ b/uchiwa/CHANGELOG.md @@ -0,0 +1,117 @@ +# Change Log + +## [v1.0.1](https://github.com/Yelp/puppet-uchiwa/tree/v1.0.1) (2015-12-07) +[Full Changelog](https://github.com/Yelp/puppet-uchiwa/compare/v1.0.0...v1.0.1) + +**Closed issues:** + +- Changelog [\#30](https://github.com/Yelp/puppet-uchiwa/issues/30) + +**Merged pull requests:** + +- Update apt key id [\#62](https://github.com/Yelp/puppet-uchiwa/pull/62) ([queeno](https://github.com/queeno)) + +## [v1.0.0](https://github.com/Yelp/puppet-uchiwa/tree/v1.0.0) (2015-12-04) +[Full Changelog](https://github.com/Yelp/puppet-uchiwa/compare/0.3.0...v1.0.0) + +**Closed issues:** + +- Support the users attribute [\#46](https://github.com/Yelp/puppet-uchiwa/issues/46) +- Duplicate Apt Resource [\#36](https://github.com/Yelp/puppet-uchiwa/issues/36) +- JSON acceptance tests are failing [\#33](https://github.com/Yelp/puppet-uchiwa/issues/33) +- New puppet-uchiwa release? [\#25](https://github.com/Yelp/puppet-uchiwa/issues/25) + +**Merged pull requests:** + +- Introducing users param in puppet-uchiwa [\#61](https://github.com/Yelp/puppet-uchiwa/pull/61) ([queeno](https://github.com/queeno)) +- Update sensu apt repo. [\#59](https://github.com/Yelp/puppet-uchiwa/pull/59) ([liamjbennett](https://github.com/liamjbennett)) +- Update apt::source to use puppetlabs-apt 2.0.0+ [\#56](https://github.com/Yelp/puppet-uchiwa/pull/56) ([zxjinn](https://github.com/zxjinn)) +- add a test for sensu\_api\_endpoints param [\#54](https://github.com/Yelp/puppet-uchiwa/pull/54) ([somic](https://github.com/somic)) +- Changed example code from manage\_repo to install\_repo [\#52](https://github.com/Yelp/puppet-uchiwa/pull/52) ([standaloneSA](https://github.com/standaloneSA)) +- Try to cleanup doc and un-nest api definitions [\#51](https://github.com/Yelp/puppet-uchiwa/pull/51) ([solarkennedy](https://github.com/solarkennedy)) +- Properly type port number [\#50](https://github.com/Yelp/puppet-uchiwa/pull/50) ([DoriftoShoes](https://github.com/DoriftoShoes)) +- fix lint [\#44](https://github.com/Yelp/puppet-uchiwa/pull/44) ([bleuchtang](https://github.com/bleuchtang)) +- Fix warning when installing sensu apt repository [\#41](https://github.com/Yelp/puppet-uchiwa/pull/41) ([KoeSystems](https://github.com/KoeSystems)) +- apt module 2.0.0 breaks apt::source extensively [\#39](https://github.com/Yelp/puppet-uchiwa/pull/39) ([bobtfish](https://github.com/bobtfish)) +- Fixing Erroneous validations [\#37](https://github.com/Yelp/puppet-uchiwa/pull/37) ([magmax](https://github.com/magmax)) +- Fix tests [\#35](https://github.com/Yelp/puppet-uchiwa/pull/35) ([pauloconnor](https://github.com/pauloconnor)) +- Notify uchiwa service when package changes [\#34](https://github.com/Yelp/puppet-uchiwa/pull/34) ([timmow](https://github.com/timmow)) +- Ensure uchiwa.json is not world-readable [\#32](https://github.com/Yelp/puppet-uchiwa/pull/32) ([nhinds](https://github.com/nhinds)) +- Fix some issues with 3.7.\* Puppet [\#31](https://github.com/Yelp/puppet-uchiwa/pull/31) ([tayzlor](https://github.com/tayzlor)) +- missing default host for sensu API [\#29](https://github.com/Yelp/puppet-uchiwa/pull/29) ([n1tr0g](https://github.com/n1tr0g)) + +## [0.3.0](https://github.com/Yelp/puppet-uchiwa/tree/0.3.0) (2014-12-11) +[Full Changelog](https://github.com/Yelp/puppet-uchiwa/compare/0.2.7...0.3.0) + +**Closed issues:** + +- Obscure Datacat errors from Uchiwa module [\#24](https://github.com/Yelp/puppet-uchiwa/issues/24) + +**Merged pull requests:** + +- Remove artifact of PR \#21 [\#28](https://github.com/Yelp/puppet-uchiwa/pull/28) ([jbussdieker](https://github.com/jbussdieker)) +- Add the insecure parameter to the default endpoint [\#27](https://github.com/Yelp/puppet-uchiwa/pull/27) ([jbussdieker](https://github.com/jbussdieker)) +- Fix JSON syntax [\#26](https://github.com/Yelp/puppet-uchiwa/pull/26) ([jbussdieker](https://github.com/jbussdieker)) +- The refresh param now takes seconds rather than ms [\#23](https://github.com/Yelp/puppet-uchiwa/pull/23) ([queeno](https://github.com/queeno)) +- Remove the stats param from the puppet module [\#22](https://github.com/Yelp/puppet-uchiwa/pull/22) ([queeno](https://github.com/queeno)) +- Remove uchiwa::api and pass API data via class params [\#21](https://github.com/Yelp/puppet-uchiwa/pull/21) ([queeno](https://github.com/queeno)) +- Fix puppet-uchiwa beaker tests [\#20](https://github.com/Yelp/puppet-uchiwa/pull/20) ([queeno](https://github.com/queeno)) + +## [0.2.7](https://github.com/Yelp/puppet-uchiwa/tree/0.2.7) (2014-11-10) +[Full Changelog](https://github.com/Yelp/puppet-uchiwa/compare/0.2.6...0.2.7) + +**Closed issues:** + +- Am I being inappropriately redirected? [\#18](https://github.com/Yelp/puppet-uchiwa/issues/18) +- uchiwa is not under the yelp namespace on the forge. [\#16](https://github.com/Yelp/puppet-uchiwa/issues/16) + +**Merged pull requests:** + +- Add beaker tests [\#17](https://github.com/Yelp/puppet-uchiwa/pull/17) ([petems](https://github.com/petems)) + +## [0.2.6](https://github.com/Yelp/puppet-uchiwa/tree/0.2.6) (2014-09-25) +[Full Changelog](https://github.com/Yelp/puppet-uchiwa/compare/0.2.5...0.2.6) + +**Closed issues:** + +- Explanation of "incompatible with previous versions of the Sensu-Puppet module" [\#14](https://github.com/Yelp/puppet-uchiwa/issues/14) +- Rename to puppet-uchiwa? [\#10](https://github.com/Yelp/puppet-uchiwa/issues/10) + +**Merged pull requests:** + +- remove unnecessary str2bool in install.pp since input is already validated [\#13](https://github.com/Yelp/puppet-uchiwa/pull/13) ([lreed](https://github.com/lreed)) + +## [0.2.5](https://github.com/Yelp/puppet-uchiwa/tree/0.2.5) (2014-09-11) +[Full Changelog](https://github.com/Yelp/puppet-uchiwa/compare/0.2.4...0.2.5) + +**Closed issues:** + +- Duplicate declaration apt [\#11](https://github.com/Yelp/puppet-uchiwa/issues/11) +- setting install\_repo to false breaks module [\#8](https://github.com/Yelp/puppet-uchiwa/issues/8) + +**Merged pull requests:** + +- use anchor pattern to fix containment of uchiwa class [\#12](https://github.com/Yelp/puppet-uchiwa/pull/12) ([LarsFronius](https://github.com/LarsFronius)) +- Fix for issue \#8 with package requiring repo [\#9](https://github.com/Yelp/puppet-uchiwa/pull/9) ([bjwschaap](https://github.com/bjwschaap)) + +## [0.2.4](https://github.com/Yelp/puppet-uchiwa/tree/0.2.4) (2014-08-11) +[Full Changelog](https://github.com/Yelp/puppet-uchiwa/compare/0.2.3...0.2.4) + +## [0.2.3](https://github.com/Yelp/puppet-uchiwa/tree/0.2.3) (2014-08-08) +[Full Changelog](https://github.com/Yelp/puppet-uchiwa/compare/0.2.2...0.2.3) + +## [0.2.2](https://github.com/Yelp/puppet-uchiwa/tree/0.2.2) (2014-08-08) +[Full Changelog](https://github.com/Yelp/puppet-uchiwa/compare/0.2.0...0.2.2) + +## [0.2.0](https://github.com/Yelp/puppet-uchiwa/tree/0.2.0) (2014-08-07) +[Full Changelog](https://github.com/Yelp/puppet-uchiwa/compare/0.1.1...0.2.0) + +**Merged pull requests:** + +- Fix template so it generates valid JSON [\#2](https://github.com/Yelp/puppet-uchiwa/pull/2) ([bodgit](https://github.com/bodgit)) +- Clean up puppet-lint errors [\#1](https://github.com/Yelp/puppet-uchiwa/pull/1) ([bodgit](https://github.com/bodgit)) + +## [0.1.1](https://github.com/Yelp/puppet-uchiwa/tree/0.1.1) (2014-08-05) + + +\* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)* \ No newline at end of file diff --git a/uchiwa/Gemfile b/uchiwa/Gemfile index 4609bd9b9..dc74f6855 100755 --- a/uchiwa/Gemfile +++ b/uchiwa/Gemfile @@ -13,6 +13,10 @@ group :development, :test do gem 'pry', :require => false gem 'simplecov', :require => false gem 'vagrant-wrapper', :require => false + gem "travis" + gem "travis-lint" + gem "puppet-blacksmith" + gem "guard-rake" end if facterversion = ENV['FACTER_GEM_VERSION'] @@ -27,4 +31,4 @@ else gem 'puppet', :require => false end -# vim:ft=ruby \ No newline at end of file +# vim:ft=ruby diff --git a/uchiwa/Gemfile.lock b/uchiwa/Gemfile.lock index 52c660624..d90674984 100644 --- a/uchiwa/Gemfile.lock +++ b/uchiwa/Gemfile.lock @@ -11,6 +11,7 @@ GEM aws-sdk (1.42.0) json (~> 1.4) nokogiri (>= 1.4.4) + backports (3.6.7) beaker (1.17.6) aws-sdk (= 1.42.0) blimpy (~> 0.6) @@ -43,12 +44,19 @@ GEM archive-tar-minitar excon (>= 0.38.0) json + domain_name (0.5.25) + unf (>= 0.0.5, < 1.0.0) + ethon (0.8.0) + ffi (>= 1.3.0) excon (0.39.5) extlib (0.9.16) facter (2.1.0) CFPropertyList (~> 2.2.6) faraday (0.9.0) multipart-post (>= 1.2, < 3) + faraday_middleware (0.10.0) + faraday (>= 0.7.4, < 0.10) + ffi (1.9.10) fission (0.5.0) CFPropertyList (~> 2.2) fog (1.23.0) @@ -75,6 +83,13 @@ GEM fog-core fog-json formatador (0.2.5) + gh (0.14.0) + addressable + backports + faraday (~> 0.8) + multi_json (~> 1.0) + net-http-persistent (>= 2.7) + net-http-pipeline google-api-client (0.7.1) addressable (>= 2.3.2) autoparse (>= 0.3.3) @@ -86,10 +101,24 @@ GEM retriable (>= 1.4) signet (>= 0.5.0) uuidtools (>= 2.1.0) + guard (2.13.0) + formatador (>= 0.2.4) + listen (>= 2.7, <= 4.0) + lumberjack (~> 1.0) + nenv (~> 0.1) + notiffany (~> 0.0) + pry (>= 0.9.12) + shellany (~> 0.0) + thor (>= 0.18.1) + guard-rake (1.0.0) + guard + rake hiera (1.3.4) json_pure highline (1.6.21) hocon (0.0.4) + http-cookie (1.0.2) + domain_name (~> 0.5) inflecto (0.0.2) inifile (2.0.2) ipaddress (0.8.0) @@ -98,6 +127,10 @@ GEM jwt (1.0.0) launchy (2.4.2) addressable (~> 2.3) + listen (3.0.5) + rb-fsevent (>= 0.9.3) + rb-inotify (>= 0.9) + lumberjack (1.0.9) metaclass (0.0.4) method_source (0.8.2) mime-types (1.25.1) @@ -106,10 +139,17 @@ GEM metaclass (~> 0.0.1) multi_json (1.10.1) multipart-post (2.0.0) + nenv (0.2.0) + net-http-persistent (2.9.4) + net-http-pipeline (1.0.1) net-scp (1.2.1) net-ssh (>= 2.6.5) net-ssh (2.9.1) + netrc (0.11.0) nokogiri (1.5.11) + notiffany (0.0.8) + nenv (~> 0.1) + shellany (~> 0.0) pry (0.10.1) coderay (~> 1.1.0) method_source (~> 0.8.1) @@ -119,6 +159,9 @@ GEM hiera (~> 1.0) json_pure rgen (~> 0.6.5) + puppet-blacksmith (3.3.1) + puppet (>= 2.7.16) + rest-client puppet-lint (0.3.2) puppet-syntax (1.3.0) rake @@ -129,11 +172,21 @@ GEM rake rspec rspec-puppet + pusher-client (0.6.2) + json + websocket (~> 1.0) rake (10.3.2) + rb-fsevent (0.9.6) + rb-inotify (0.9.5) + ffi (>= 0.5.0) rbvmomi (1.8.1) builder nokogiri (>= 1.4.1) trollop + rest-client (1.8.0) + http-cookie (>= 1.0.2, < 2.0) + mime-types (>= 1.16, < 3.0) + netrc (~> 0.7) retriable (1.4.1) rgen (0.6.6) rspec (2.99.0) @@ -155,6 +208,7 @@ GEM rspec (~> 2.99) rspec-its specinfra (~> 1.25) + shellany (0.0.1) signet (0.5.1) addressable (>= 2.2.3) faraday (>= 0.9.0.rc5) @@ -168,12 +222,28 @@ GEM slop (3.6.0) specinfra (1.25.8) thor (0.19.1) + travis (1.6.11) + addressable (~> 2.3) + backports + faraday (~> 0.9) + faraday_middleware (~> 0.9) + gh (~> 0.13) + highline (~> 1.6) + launchy (~> 2.1) + pry (~> 0.9) + pusher-client (~> 0.4) + typhoeus (~> 0.6, >= 0.6.8) + travis-lint (2.0.0) + json trollop (2.0) + typhoeus (0.8.0) + ethon (>= 0.8.0) unf (0.1.4) unf_ext unf_ext (0.0.6) uuidtools (2.1.5) vagrant-wrapper (1.2.1.1) + websocket (1.2.2) PLATFORMS ruby @@ -182,12 +252,19 @@ DEPENDENCIES beaker beaker-rspec (~> 2.2.4) facter + guard-rake pry puppet + puppet-blacksmith puppet-lint puppetlabs_spec_helper rake rspec-puppet serverspec simplecov + travis + travis-lint vagrant-wrapper + +BUNDLED WITH + 1.10.6 diff --git a/uchiwa/Rakefile b/uchiwa/Rakefile index 2ad6ca615..6b7d849f7 100755 --- a/uchiwa/Rakefile +++ b/uchiwa/Rakefile @@ -15,4 +15,11 @@ PuppetLint.configuration.send("disable_autoloader_layout") PuppetLint.configuration.send("disable_quoted_booleans") PuppetLint.configuration.send('disable_class_inherits_from_params_class') PuppetLint.configuration.ignore_paths = exclude_paths -PuppetSyntax.exclude_paths = exclude_paths \ No newline at end of file +PuppetSyntax.exclude_paths = exclude_paths + +# These gems aren't always present, for instance +# on Travis with --without development +begin + require 'puppet_blacksmith/rake_tasks' +rescue LoadError +end diff --git a/uchiwa/manifests/init.pp b/uchiwa/manifests/init.pp index f5bc008dd..7157b9e8a 100755 --- a/uchiwa/manifests/init.pp +++ b/uchiwa/manifests/init.pp @@ -44,7 +44,7 @@ # # [*repo_key_source*] # String -# Default: http://repos.sensuapp.org/apt/pubkey.gpg +# Default: http://repositories.sensuapp.org/apt/pubkey.gpg # GPG key for the repo we're installing # # [*manage_services*] @@ -95,6 +95,36 @@ # }] # An array of API endpoints to connect uchiwa to one or multiple sensu servers. # +# [*users*] +# Array of hashes +# An array of user credentials to access the uchiwa dashboard. If set, it takes +# precendence over 'user' and 'pass'. +# Example: +# ``` +# [{ +# 'username' => 'user1', +# 'password' => 'pass1', +# 'readonly' => false +# }, +# { +# 'username' => 'user2', +# 'password' => 'pass2', +# 'readonly' => true +# }] +# ``` +# +# [*auth*] +# Hash +# A hash containing the static public and private key paths for generating and +# validating JSON Web Token (JWT) signatures. +# Example: +# ``` +# { +# 'publickey' => '/path/to/uchiwa.rsa.pub', +# 'privatekey' => '/path/to/uchiwa.rsa' +# } +# ``` +# class uchiwa ( $package_name = $uchiwa::params::package_name, $service_name = $uchiwa::params::service_name, @@ -112,6 +142,8 @@ $pass = $uchiwa::params::pass, $refresh = $uchiwa::params::refresh, $sensu_api_endpoints = $uchiwa::params::sensu_api_endpoints, + $users = $uchiwa::params::users, + $auth = $uchiwa::params::auth ) inherits uchiwa::params { # validate parameters here @@ -131,6 +163,8 @@ validate_string($pass) validate_integer($refresh) validate_array($sensu_api_endpoints) + validate_array($users) + validate_hash($auth) anchor { 'uchiwa::begin': } -> class { 'uchiwa::install': } -> diff --git a/uchiwa/manifests/params.pp b/uchiwa/manifests/params.pp index 1683e081d..197dac79f 100755 --- a/uchiwa/manifests/params.pp +++ b/uchiwa/manifests/params.pp @@ -22,8 +22,8 @@ $install_repo = true $repo = 'main' $repo_source = undef - $repo_key_id = '8911D8FF37778F24B4E726A218609E3D7580C77F' - $repo_key_source = 'http://repos.sensuapp.org/apt/pubkey.gpg' + $repo_key_id = 'EE15CFF6AB6E4E290FDAB681A20F259AEB9C94BB' + $repo_key_source = 'http://repositories.sensuapp.org/apt/pubkey.gpg' $manage_services = true $manage_user = true @@ -45,4 +45,6 @@ $user = '' $pass = '' $refresh = '5' + $users = [] + $auth = {} } diff --git a/uchiwa/manifests/repo/apt.pp b/uchiwa/manifests/repo/apt.pp index eedd082df..a84cb8003 100755 --- a/uchiwa/manifests/repo/apt.pp +++ b/uchiwa/manifests/repo/apt.pp @@ -19,7 +19,7 @@ if $uchiwa::repo_source { $url = $uchiwa::repo_source } else { - $url = 'http://repos.sensuapp.org/apt' + $url = 'http://repositories.sensuapp.org/apt' } apt::source { 'sensu': diff --git a/uchiwa/metadata.json b/uchiwa/metadata.json index 5fe005e71..ae66d6fc9 100644 --- a/uchiwa/metadata.json +++ b/uchiwa/metadata.json @@ -56,7 +56,7 @@ } ], "name": "yelp-uchiwa", - "version": "0.3.0", + "version": "1.0.2", "author": "yelp", "summary": "Puppet module for installing Uchiwa", "license": "Apache 2.0", @@ -65,7 +65,12 @@ "issues_url": "https://github.com/yelp/puppet-uchiwa/issues", "description": "Puppet module for installing Uchiwa", "dependencies": [ - {"name":"puppetlabs/apt","version_requirement": ">=2.0.0 <3.0.0"}, - {"name":"puppetlabs/stdlib"} + { + "name": "puppetlabs/apt", + "version_requirement": ">=2.0.0 <3.0.0" + }, + { + "name": "puppetlabs/stdlib" + } ] } diff --git a/uchiwa/spec/classes/uchiwa_spec.rb b/uchiwa/spec/classes/uchiwa_spec.rb index 41608e7f8..d9e4ff7d5 100644 --- a/uchiwa/spec/classes/uchiwa_spec.rb +++ b/uchiwa/spec/classes/uchiwa_spec.rb @@ -53,11 +53,11 @@ context 'default' do it { should contain_apt__source('sensu').with( :ensure => 'present', - :location => 'http://repos.sensuapp.org/apt', + :location => 'http://repositories.sensuapp.org/apt', :release => 'sensu', :repos => 'main', :include => { 'src' => false, 'deb' => true }, - :key => { 'id' => '8911D8FF37778F24B4E726A218609E3D7580C77F', 'source' => 'http://repos.sensuapp.org/apt/pubkey.gpg' }, + :key => { 'id' => 'EE15CFF6AB6E4E290FDAB681A20F259AEB9C94BB', 'source' => 'http://repositories.sensuapp.org/apt/pubkey.gpg' }, :before => 'Package[uchiwa]' ) } end @@ -93,7 +93,7 @@ it { should_not contain_apt__key('sensu').with( :key => '7580C77F', - :key_source => 'http://repos.sensuapp.org/apt/pubkey.gpg' + :key_source => 'http://repositories.sensuapp.org/apt/pubkey.gpg' ) } it { should contain_package('uchiwa').with( @@ -152,4 +152,20 @@ } end + context 'with multiple users' do + let(:params) {{ :users => [ { 'username' => 'user1', 'password' => 'pass1', 'readonly' => true } ] }} + it { + should contain_file('/etc/sensu/uchiwa.json') \ + .with_content(/"username": "user1",\n "password": "pass1",\n "role": {\n "readonly": true\n }\n }/) + } + end + + context 'with static JWT RSA keys' do + let(:params) {{ :auth => { 'publickey' => '/etc/sensu/uchiwa.rsa.pub', 'privatekey' => '/etc/sensu/uchiwa.rsa' } }} + it { + should contain_file('/etc/sensu/uchiwa.json') \ + .with_content(/"auth": {\n "publickey": "\/etc\/sensu\/uchiwa.rsa.pub",\n "privatekey": "\/etc\/sensu\/uchiwa.rsa"\n }/) + } + end + end diff --git a/uchiwa/templates/etc/sensu/uchiwa.json.erb b/uchiwa/templates/etc/sensu/uchiwa.json.erb index 771f4a5fd..d2720fd52 100644 --- a/uchiwa/templates/etc/sensu/uchiwa.json.erb +++ b/uchiwa/templates/etc/sensu/uchiwa.json.erb @@ -20,6 +20,25 @@ "port": <%= @port %>, "user": "<%= @user %>", "pass": "<%= @pass %>", - "refresh": <%= @refresh %> + "refresh": <%= @refresh %><%= ',' if @users.size > 0 or @auth.size == 2 %> + <%- if @users.size > 0 -%> + "users": [ + <%- @users.each_with_index do |user, i| -%> + { + "username": "<%= user['username'] %>", + "password": "<%= user['password'] %>", + "role": { + "readonly": <%= user['readonly'] %> + } + }<%= ',' if i < (@users.size - 1) %> + <%- end -%> + ]<%= ',' if @auth.size == 2 %> + <%- end -%> + <%- if @auth.size == 2 -%> + "auth": { + "publickey": "<%= @auth['publickey'] %>", + "privatekey": "<%= @auth['privatekey'] %>" + } + <%- end -%> } } diff --git a/vswitch/CHANGELOG.md b/vswitch/CHANGELOG.md index 9011ada23..be1eb0881 100644 --- a/vswitch/CHANGELOG.md +++ b/vswitch/CHANGELOG.md @@ -1,3 +1,23 @@ +##2015-11-25 - 3.0.0 +###Summary + +This is a major release for OpenStack Liberty but contains no API-breaking +changes. + + +####Features +- support for FreeBSD + +###Bugfixes +- explicitly say that ovs_redhat parent is ovs +- add require ovs_redhat.rb to ovs_redhat_el6.rb + +####Maintenance +- acceptance: use common bits from puppet-openstack-integration +- remove class_parameter_defaults puppet-lint check +- fix RSpec 3.x syntax +- initial msync run for all Puppet OpenStack modules + ##2015-10-15 - 2.1.0 ###Summary diff --git a/vswitch/README.md b/vswitch/README.md index 0b2c4f371..24ac3be3c 100644 --- a/vswitch/README.md +++ b/vswitch/README.md @@ -1,7 +1,7 @@ VSwitch ======= -2.1.0 - 2015.1 - Kilo +3.0.0 - 2015.2 - Liberty A Puppet module providing things for vSwitches. At the moment OVS is the only one I've added but please feel free to contribute new providers through diff --git a/vswitch/metadata.json b/vswitch/metadata.json index 60bf7ce9d..a0e7ddb7f 100644 --- a/vswitch/metadata.json +++ b/vswitch/metadata.json @@ -1,6 +1,6 @@ { "name": "openstack-vswitch", - "version": "2.1.0", + "version": "3.0.0", "author": "Endre Karlson, Dan Bode and OpenStack Contributors", "license": "Apache-2.0", "source": "git://github.com/openstack/puppet-vswitch.git", @@ -27,6 +27,10 @@ { "operatingsystem": "Ubuntu", "operatingsystemrelease": [ "12.04", "14.04" ] + }, + { + "operatingsystem":"FreeBSD", + "operatingsystemrelease": [ "10.0", "11.0" ] } ], "requirements": [