diff --git a/Puppetfile b/Puppetfile index cb36fdc17..86730b495 100644 --- a/Puppetfile +++ b/Puppetfile @@ -3,15 +3,15 @@ mod 'apache', :git => 'https://github.com/puppetlabs/puppetlabs-apache.git' mod 'ceilometer', - :commit => '08fc9d9159cd9eb0830d550abb1058bc2b9b5759', + :commit => '065a353aae3fb869395908289be5d61840d5d38b', :git => 'https://github.com/stackforge/puppet-ceilometer.git' mod 'certmonger', - :commit => '5fbf10fbbff4aed4db30e839c63c99b195e8425a', + :commit => '3f86b9973fc30c14a066b0f215023d5f1398b874', :git => 'https://github.com/rcritten/puppet-certmonger.git' mod 'cinder', - :commit => '2da616a4a52d3086fe3a291b9199fc7313575504', + :commit => '987c51bc097fa049a8692741ffbeaffa2f3770e9', :git => 'https://github.com/stackforge/puppet-cinder.git' mod 'common', @@ -23,11 +23,11 @@ mod 'concat', :git => 'https://github.com/puppetlabs/puppetlabs-concat.git' mod 'firewall', - :commit => 'd5a10f5a52d84b9fcfb8fc65ef505685a07d5799', + :commit => 'f061452461c841e83f20df1f9dd0aea485fb9744', :git => 'https://github.com/puppetlabs/puppetlabs-firewall.git' mod 'galera', - :commit => 'e35922bbb31ef2e6a86c7973cbafea96a8b160af', + :commit => 'a63ab112aabdc9faa5e66fc095ef9dcc865d6999', :git => 'https://github.com/rohara/puppet-galera.git' mod 'glance', @@ -43,11 +43,11 @@ mod 'haproxy', :git => 'https://github.com/puppetlabs/puppetlabs-haproxy.git' mod 'heat', - :commit => 'e9e1ba05e13948b8e0c7a72b1b68cefbedd2b40d', + :commit => '27f39cd68e894eacc2a068cfee7aea3c49223892', :git => 'https://github.com/stackforge/puppet-heat.git' mod 'horizon', - :commit => '16b482ea21a70d8dd06ab4c98ac5a218399b0213', + :commit => 'f1e5acfe6fdd3709b4c34f12b90786cc7fd52a07', :git => 'https://github.com/stackforge/puppet-horizon.git' mod 'inifile', @@ -55,11 +55,11 @@ mod 'inifile', :git => 'https://github.com/puppetlabs/puppetlabs-inifile.git' mod 'ipa', - :commit => '2cbd870b0dba2b8f588d74fc5ff8aa9cd0dc9ccf', + :commit => '08e51e96ac2c9265499deec3485e396b792587d3', :git => 'https://github.com/xbezdick/puppet-ipa.git' mod 'keystone', - :commit => '605161f3d4b7bbcffc657c86b367159701dfdcbe', + :commit => '3f64ee48fa04ca6c5f8173a3dbadad946ca41239', :git => 'https://github.com/stackforge/puppet-keystone.git' mod 'memcached', @@ -75,7 +75,7 @@ mod 'mongodb', :git => 'https://github.com/puppetlabs/puppetlabs-mongodb.git' mod 'mysql', - :commit => 'c70fc13fc15740b61b8eccd3c79168d3e417a374', + :commit => '40dd1805886aee56dc02860565f161c6e3b4c7e5', :git => 'https://github.com/puppetlabs/puppetlabs-mysql.git' mod 'n1k-vsm', @@ -87,11 +87,11 @@ mod 'nagios', :git => 'https://github.com/gildub/puppet-nagios-openstack.git' mod 'neutron', - :commit => 'dcd122e477713421d9601d93d13725a4871b9c42', + :commit => '4b7360b16e37e0460ee20d48cba408b796b029c3', :git => 'https://github.com/stackforge/puppet-neutron.git' mod 'nova', - :commit => 'a79e5338df5f85cb299183e54b39e8a22a640f59', + :commit => '648c9e85830204995c24ae43d15efe278b9fa56b', :git => 'https://github.com/stackforge/puppet-nova.git' mod 'nssdb', @@ -107,7 +107,7 @@ mod 'openstack', :git => 'https://github.com/stackforge/puppet-openstack.git' mod 'openstacklib', - :commit => 'c374bed10f8af6000601fa407ebaef0833e1999c', + :commit => '28130971a816859ff69eef037b9f9b1036a7720e', :git => 'https://github.com/stackforge/puppet-openstacklib.git' mod 'pacemaker', @@ -119,11 +119,11 @@ mod 'puppet', :git => 'https://github.com/purpleidea/puppet-puppet.git' mod 'qpid', - :commit => '1f0c32b39ad17e7acbd440b50fb6f0875971f5e1', + :commit => '9ffb2788c536f1694980e07a43e8133ff85fa28c', :git => 'https://github.com/dprince/puppet-qpid' mod 'rabbitmq', - :commit => 'cbda1ced336f9768ebd442415b4d9c7c4ddb48c7', + :commit => '4832bd61b5b1bfea7c9cc985508e65cd10081652', :git => 'https://github.com/puppetlabs/puppetlabs-rabbitmq.git' mod 'rsync', @@ -131,7 +131,7 @@ mod 'rsync', :git => 'https://github.com/puppetlabs/puppetlabs-rsync.git' mod 'sahara', - :commit => 'f4e5681cfb289113be1ba49c12709145ecbad938', + :commit => '6b696cffcba6692975dbcfee144e81b6e90e5ecf', :git => 'https://github.com/stackforge/puppet-sahara.git' mod 'ssh', @@ -147,7 +147,7 @@ mod 'stdlib', :git => 'https://github.com/puppetlabs/puppetlabs-stdlib.git' mod 'swift', - :commit => '3ea00440361ff2452561d2cce808d938e39cce56', + :commit => '7b30dbb3979ec1597173608e17b60144eefbeeec', :git => 'https://github.com/stackforge/puppet-swift.git' mod 'sysctl', @@ -167,7 +167,7 @@ mod 'vlan', :git => 'https://github.com/derekhiggins/puppet-vlan.git' mod 'vswitch', - :commit => '17b62e56e07eeed25fd2aaef278a16c97155a115', + :commit => '51fd30c22b79d927fb0329e6e2b58fe67217ecee', :git => 'https://github.com/stackforge/puppet-vswitch.git' mod 'xinetd', diff --git a/ceilometer/Modulefile b/ceilometer/Modulefile deleted file mode 100644 index e7b7fb049..000000000 --- a/ceilometer/Modulefile +++ /dev/null @@ -1,13 +0,0 @@ -name 'puppetlabs-ceilometer' -version '4.0.0' -author 'eNovance and StackForge Contributors' -license 'Apache License 2.0' -summary 'Puppet module for OpenStack Ceilometer' -description 'Installs and configures OpenStack Ceilometer (Telemetry).' -project_page 'https://launchpad.net/puppet-ceilometer' -source 'https://github.com/stackforge/puppet-ceilometer' - -dependency 'puppetlabs/inifile', '>=1.0.0 <2.0.0' -dependency 'puppetlabs/keystone', '>=4.0.0 <5.0.0' -dependency 'puppetlabs/stdlib', '>=4.0.0 < 5.0.0' -dependency 'stackforge/openstacklib', '>=5.0.0' diff --git a/ceilometer/Rakefile b/ceilometer/Rakefile index 4c2b2ed07..2e74217e1 100644 --- a/ceilometer/Rakefile +++ b/ceilometer/Rakefile @@ -4,3 +4,4 @@ require 'puppet-lint/tasks/puppet-lint' PuppetLint.configuration.fail_on_warnings = true PuppetLint.configuration.send('disable_80chars') PuppetLint.configuration.send('disable_class_parameter_defaults') +PuppetLint.configuration.send('disable_only_variable_string') diff --git a/ceilometer/manifests/alarm/evaluator.pp b/ceilometer/manifests/alarm/evaluator.pp index 2b9ca643b..c634f3ba6 100644 --- a/ceilometer/manifests/alarm/evaluator.pp +++ b/ceilometer/manifests/alarm/evaluator.pp @@ -36,7 +36,7 @@ include ceilometer::params - validate_re($evaluation_interval,'^(\d+)$') + validate_re("${evaluation_interval}",'^(\d+)$') Ceilometer_config<||> ~> Service['ceilometer-alarm-evaluator'] diff --git a/ceilometer/metadata.json b/ceilometer/metadata.json new file mode 100644 index 000000000..77a27e3e2 --- /dev/null +++ b/ceilometer/metadata.json @@ -0,0 +1,39 @@ +{ + "name": "stackforge-ceilometer", + "version": "5.0.0", + "author": "eNovance and StackForge Contributors", + "summary": "Puppet module for OpenStack Ceilometer", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-ceilometer.git", + "project_page": "https://launchpad.net/puppet-ceilometer", + "issues_url": "https://bugs.launchpad.net/puppet-ceilometer", + "requirements": [ + { "name": "pe","version_requirement": "3.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": ["7"] + }, + { + "operatingsystem": "Fedora", + "operatingsystemrelease": ["20"] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": ["6.5","7"] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": ["12.04","14.04"] + } + ], + "description": "Installs and configures OpenStack Ceilometer (Telemetry).", + "dependencies": [ + { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, + { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0" } + ] +} diff --git a/certmonger/Modulefile b/certmonger/Modulefile index 2bc1a0099..5ec7c4b56 100644 --- a/certmonger/Modulefile +++ b/certmonger/Modulefile @@ -1,5 +1,5 @@ name 'rcritten/certmonger' -version '1.0.2' +version '1.0.3' source 'git://github.com/rcritten/puppet-certmonger.git' author 'Rob Crittenden ' license 'Apache' diff --git a/certmonger/lib/facter/ipa_client_configured.rb b/certmonger/lib/facter/ipa_client_configured.rb deleted file mode 100644 index cc50290c2..000000000 --- a/certmonger/lib/facter/ipa_client_configured.rb +++ /dev/null @@ -1,9 +0,0 @@ -Facter.add("ipa_client_configured") do - setcode do - if File.exist? "/etc/ipa/default.conf" - "true" - else - "false" - end - end -end diff --git a/certmonger/manifests/request_ipa_cert.pp b/certmonger/manifests/request_ipa_cert.pp index d3747ce07..c3f63b716 100644 --- a/certmonger/manifests/request_ipa_cert.pp +++ b/certmonger/manifests/request_ipa_cert.pp @@ -57,108 +57,65 @@ ) { include certmonger::server - if "$ipa_client_configured" == 'true' { - - $principal_no_slash = regsubst($principal, '\/', '_') + $principal_no_slash = regsubst($principal, '\/', '_') - if $hostname == undef { - $subject = '' - } else { - $subject = "-N cn=${hostname}" - } + # Only execute certmonger if IPA client is configured + $onlyif = "/usr/bin/test -s /etc/ipa/default.conf" - if $seclib == 'nss' { - $options = "-d ${basedir}/${dbname} -n ${nickname} -p ${basedir}/${dbname}/password.conf" + if $hostname == undef { + $subject = '' + } else { + $subject = "-N cn=${hostname}" + } - file {"${basedir}/${dbname}/requested": - ensure => directory, - mode => 0600, - owner => 0, - group => 0, - } - - # Semaphore file to determine if we've already requested a certificate. - file {"${basedir}/${dbname}/requested/${principal_no_slash}": - ensure => file, - mode => 0600, - owner => $owner_id, - group => $group_id, - require => [ - Exec["get_cert_nss_${title}"] - ], - } - exec {"get_cert_nss_${title}": - command => "/usr/bin/ipa-getcert request ${options} -K ${principal} ${subject}", - creates => "${basedir}/${dbname}/requested/${principal_no_slash}", - require => [ - Package['certmonger'], - File["${basedir}/${dbname}/password.conf"], - ], - } - } - elsif $seclib == 'openssl' { + if $seclib == 'nss' { + $options = "-d ${basedir}/${dbname} -n ${nickname} -p ${basedir}/${dbname}/password.conf" + $unless = "/usr/bin/getcert list -d ${basedir}/${dbname} -n ${nickname}" - $options = "-k ${key} -f ${cert}" + exec {"get_cert_nss_${title}": + command => "/usr/bin/ipa-getcert request ${options} -K ${principal} ${subject}", + onlyif => "${onlyif}", + unless => "${unless}", + require => [ + Service['certmonger'], + File["${basedir}/${dbname}/password.conf"], + ], + } + } + elsif $seclib == 'openssl' { - # NOTE: Order is extremely important here. If the key file exists - # (content doesn't matter) then certmonger will attempt to use that - # as the key. You could end up in a NEWLY_ADDED_NEED_KEYINFO_READ_PIN - # state if the key file doesn't actually contain a key. + $options = "-k ${key} -f ${cert}" + $unless = "/usr/bin/getcert list -f ${cert}" - file {"${cert}": - ensure => file, - mode => 0444, - owner => $owner_id, - group => $group_id, - } - file {"${key}": - ensure => file, - mode => 0440, - owner => $owner_id, - group => $group_id, - } - exec {"get_cert_openssl_${title}": - command => "/usr/bin/ipa-getcert request ${options} -K ${principal} ${subject}", - creates => [ - "${key}", - "${cert}", - ], - require => [ - Package['certmonger'], - ], - before => [ - File["${key}"], - File["${cert}"], - ], - notify => Exec["wait_for_certmonger_${title}"], - } + exec {"get_cert_openssl_${title}": + command => "/usr/bin/ipa-getcert request ${options} -K ${principal} ${subject}", + onlyif => "${onlyif}", + unless => "${unless}", + require => [ + Service['certmonger'], + ], + notify => Exec["wait_for_certmonger_${title}"], + } - # We need certmonger to finish creating the key before we - # can proceed. Use onlyif as a way to execute multiple - # commands without restorting to shipping a shell script. - # This will call getcert to check the status of our cert - # 5 times. This doesn't short circuit though, so all 5 will - # always run, causing a 5-second delay. - exec {"wait_for_certmonger_${title}": - command => "true", - onlyif => [ - "sleep 1 && getcert list -f ${cert}", - "sleep 1 && getcert list -f ${cert}", - "sleep 1 && getcert list -f ${cert}", - "sleep 1 && getcert list -f ${cert}", - "sleep 1 && getcert list -f ${cert}", - ], - path => "/usr/bin:/bin", - before => [ - File["${key}"], - File["${cert}"], - ], - refreshonly => true, - } - } else { - fail("Unrecognized security library: ${seclib}") - } + # We need certmonger to finish creating the key before we + # can proceed. Use onlyif as a way to execute multiple + # commands without restorting to shipping a shell script. + # This will call getcert to check the status of our cert + # 5 times. This doesn't short circuit though, so all 5 will + # always run, causing a 5-second delay. + exec {"wait_for_certmonger_${title}": + command => "true", + onlyif => [ + "sleep 1 && getcert list -f ${cert}", + "sleep 1 && getcert list -f ${cert}", + "sleep 1 && getcert list -f ${cert}", + "sleep 1 && getcert list -f ${cert}", + "sleep 1 && getcert list -f ${cert}", + ], + path => "/usr/bin:/bin", + refreshonly => true, + } } else { - fail("ipa not configured") + fail("Unrecognized security library: ${seclib}") } } diff --git a/cinder/Modulefile b/cinder/Modulefile deleted file mode 100644 index d68b5159a..000000000 --- a/cinder/Modulefile +++ /dev/null @@ -1,15 +0,0 @@ -name 'puppetlabs-cinder' -version '4.0.0' -author 'Puppet Labs and StackForge Contributors' -license 'Apache License 2.0' -summary 'Puppet module for OpenStack Cinder' -description 'Installs and configures OpenStack Cinder (Block Storage).' -project_page 'https://launchpad.net/puppet-cinder' -source 'https://github.com/stackforge/puppet-cinder' - -dependency 'dprince/qpid', '>=1.0.0 <2.0.0' -dependency 'puppetlabs/inifile', '>=1.0.0 <2.0.0' -dependency 'puppetlabs/keystone', '>=4.0.0 <5.0.0' -dependency 'puppetlabs/rabbitmq', '>=2.0.2 <4.0.0' -dependency 'puppetlabs/stdlib', '>=4.0.0' -dependency 'stackforge/openstacklib', '>=5.0.0' diff --git a/cinder/manifests/backend/emc_vnx.pp b/cinder/manifests/backend/emc_vnx.pp new file mode 100644 index 000000000..5b060d39f --- /dev/null +++ b/cinder/manifests/backend/emc_vnx.pp @@ -0,0 +1,65 @@ +# +# == Define: cinder::backend::emc_vnx +# +# Setup Cinder to use the EMC VNX driver. +# Compatible for multiple backends +# +# == Parameters +# +# [*volume_backend_name*] +# (optional) Allows for the volume_backend_name to be separate of $name. +# Defaults to: $name +# +# [*san_ip*] +# (required) IP address of SAN controller. +# +# [*san_password*] +# (required) Password of SAN controller. +# +# [*san_login*] +# (optional) Login of SAN controller. +# Defaults to : 'admin' +# +# [*storage_vnx_pool_name*] +# (required) Storage pool name. +# +# [*default_timeout*] +# (optional) Default timeout for CLI operations in minutes. +# Defaults to: '10' +# +# [*max_luns_per_storage_group*] +# (optional) Default max number of LUNs in a storage group. +# Defaults to: '256' +# +# [*package_ensure*] +# (optional) The state of the package +# Defaults to: 'present' +# +define cinder::backend::emc_vnx ( + $iscsi_ip_address, + $san_ip, + $san_password, + $storage_vnx_pool_name, + $default_timeout = '10', + $max_luns_per_storage_group = '256', + $package_ensure = 'present', + $san_login = 'admin', + $volume_backend_name = $name, +) { + + include cinder::params + + cinder_config { + "${name}/default_timeout": value => $default_timeout; + "${name}/iscsi_ip_address": value => $iscsi_ip_address; + "${name}/max_luns_per_storage_group": value => $max_luns_per_storage_group; + "${name}/naviseccli_path": value => '/opt/Navisphere/bin/naviseccli'; + "${name}/san_ip": value => $san_ip; + "${name}/san_login": value => $san_login; + "${name}/san_password": value => $san_password; + "${name}/storage_vnx_pool_name": value => $storage_vnx_pool_name; + "${name}/volume_backend_name": value => $volume_backend_name; + "${name}/volume_driver": value => 'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver'; + } + +} diff --git a/cinder/manifests/backend/iscsi.pp b/cinder/manifests/backend/iscsi.pp index 86da186cf..2d03fa5f3 100644 --- a/cinder/manifests/backend/iscsi.pp +++ b/cinder/manifests/backend/iscsi.pp @@ -10,6 +10,7 @@ define cinder::backend::iscsi ( $iscsi_ip_address, $volume_backend_name = $name, + $volume_driver = 'cinder.volume.drivers.lvm.LVMISCSIDriver', $volume_group = 'cinder-volumes', $iscsi_helper = $::cinder::params::iscsi_helper, ) { @@ -18,6 +19,7 @@ cinder_config { "${name}/volume_backend_name": value => $volume_backend_name; + "${name}/volume_driver": value => $volume_driver; "${name}/iscsi_ip_address": value => $iscsi_ip_address; "${name}/iscsi_helper": value => $iscsi_helper; "${name}/volume_group": value => $volume_group; diff --git a/cinder/manifests/backend/netapp.pp b/cinder/manifests/backend/netapp.pp index 4d88c7874..39d397b51 100644 --- a/cinder/manifests/backend/netapp.pp +++ b/cinder/manifests/backend/netapp.pp @@ -198,4 +198,10 @@ "${volume_backend_name}/netapp_storage_pools": value => $netapp_storage_pools; "${volume_backend_name}/netapp_webservice_path": value => $netapp_webservice_path; } + + if $netapp_storage_family == 'eseries' { + cinder_config { + "${volume_backend_name}/use_multipath_for_image_xfer": value => true; + } + } } diff --git a/cinder/manifests/backup/ceph.pp b/cinder/manifests/backup/ceph.pp index 9ac208ab8..dac0adc8b 100644 --- a/cinder/manifests/backup/ceph.pp +++ b/cinder/manifests/backup/ceph.pp @@ -21,6 +21,10 @@ # # === Parameters # +# [*backup_driver*] +# (optional) Which cinder backup driver to use +# Defaults to 'cinder.backup.drivers.ceph' +# # [*backup_ceph_conf*] # (optional) Ceph config file to use. # Should be a valid ceph configuration file @@ -54,7 +58,7 @@ # class cinder::backup::ceph ( - $backup_driver = 'cinder.backup.driver.ceph', + $backup_driver = 'cinder.backup.drivers.ceph', $backup_ceph_conf = '/etc/ceph/ceph.conf', $backup_ceph_user = 'cinder', $backup_ceph_chunk_size = '134217728', diff --git a/cinder/manifests/init.pp b/cinder/manifests/init.pp index 717b1f41d..9c674c881 100644 --- a/cinder/manifests/init.pp +++ b/cinder/manifests/init.pp @@ -9,6 +9,27 @@ # Timeout when db connections should be reaped. # (Optional) Defaults to 3600. # +# [database_min_pool_size] +# Minimum number of SQL connections to keep open in a pool. +# (Optional) Defaults to 1. +# +# [database_max_pool_size] +# Maximum number of SQL connections to keep open in a pool. +# (Optional) Defaults to undef. +# +# [database_max_retries] +# Maximum db connection retries during startup. +# Setting -1 implies an infinite retry count. +# (Optional) Defaults to 10. +# +# [database_retry_interval] +# Interval between retries of opening a sql connection. +# (Optional) Defaults to 10. +# +# [database_max_overflow] +# If set, use this value for max_overflow with sqlalchemy. +# (Optional) Defaults to undef. +# # [*rabbit_use_ssl*] # (optional) Connect over SSL for RabbitMQ # Defaults to false @@ -85,6 +106,11 @@ class cinder ( $database_connection = 'sqlite:////var/lib/cinder/cinder.sqlite', $database_idle_timeout = '3600', + $database_min_pool_size = '1', + $database_max_pool_size = undef, + $database_max_retries = '10', + $database_retry_interval = '10', + $database_max_overflow = undef, $rpc_backend = 'cinder.openstack.common.rpc.impl_kombu', $control_exchange = 'openstack', $rabbit_host = '127.0.0.1', @@ -292,6 +318,9 @@ cinder_config { 'database/connection': value => $database_connection_real, secret => true; 'database/idle_timeout': value => $database_idle_timeout_real; + 'database/min_pool_size': value => $database_min_pool_size; + 'database/max_retries': value => $database_max_retries; + 'database/retry_interval': value => $database_retry_interval; 'DEFAULT/verbose': value => $verbose; 'DEFAULT/debug': value => $debug; 'DEFAULT/api_paste_config': value => $api_paste_config; @@ -300,6 +329,26 @@ 'DEFAULT/default_availability_zone': value => $default_availability_zone_real; } + if $database_max_pool_size { + cinder_config { + 'database/max_pool_size': value => $database_max_pool_size; + } + } else { + cinder_config { + 'database/max_pool_size': ensure => absent; + } + } + + if $database_max_overflow { + cinder_config { + 'database/max_overflow': value => $database_max_overflow; + } + } else { + cinder_config { + 'database/max_overflow': ensure => absent; + } + } + if($database_connection_real =~ /mysql:\/\/\S+:\S+@\S+\/\S+/) { require 'mysql::bindings' require 'mysql::bindings::python' diff --git a/cinder/manifests/type.pp b/cinder/manifests/type.pp index a0c78bce3..435f9b293 100644 --- a/cinder/manifests/type.pp +++ b/cinder/manifests/type.pp @@ -60,7 +60,7 @@ exec {"cinder type-create ${volume_name}": command => "cinder type-create ${volume_name}", - unless => "cinder type-list | grep ${volume_name}", + unless => "cinder type-list | grep -qP '\\b${volume_name}\\b'", environment => concat($cinder_env, $region_env), require => Package['python-cinderclient'], path => ['/usr/bin', '/bin'], diff --git a/cinder/manifests/volume/emc_vnx.pp b/cinder/manifests/volume/emc_vnx.pp new file mode 100644 index 000000000..5c50f176a --- /dev/null +++ b/cinder/manifests/volume/emc_vnx.pp @@ -0,0 +1,50 @@ +# == Class: cinder::volume::emc_enx +# +# Configures Cinder volume EMC VNX driver. +# Parameters are particular to each volume driver. +# +# === Parameters +# +# [*san_ip*] +# (required) IP address of SAN controller. +# +# [*san_password*] +# (required) Password of SAN controller. +# +# [*san_login*] +# (optional) Login of SAN controller. +# Defaults to : 'admin' +# +# [*storage_vnx_pool_name*] +# (required) Storage pool name. +# +# [*default_timeout*] +# (optonal) Default timeout for CLI operations in minutes. +# Defaults to: '10' +# +# [*max_luns_per_storage_group*] +# (optonal) Default max number of LUNs in a storage group. +# Defaults to: '256' +# +class cinder::volume::emc_vnx( + $iscsi_ip_address, + $san_ip, + $san_password, + $storage_vnx_pool_name, + $default_timeout = '10', + $max_luns_per_storage_group = '256', + $package_ensure = 'present', + $san_login = 'admin', +) { + + cinder::backend::emc_vnx { 'DEFAULT': + default_timeout => $default_timeout, + iscsi_ip_address => $iscsi_ip_address, + max_luns_per_storage_group => $max_luns_per_storage_group, + package_ensure => $package_ensure, + san_ip => $san_ip, + san_login => $san_login, + san_password => $san_password, + storage_vnx_pool_name => $storage_vnx_pool_name, + } +} diff --git a/cinder/manifests/volume/iscsi.pp b/cinder/manifests/volume/iscsi.pp index cb5186221..e9eb4dad4 100644 --- a/cinder/manifests/volume/iscsi.pp +++ b/cinder/manifests/volume/iscsi.pp @@ -1,6 +1,7 @@ # class cinder::volume::iscsi ( $iscsi_ip_address, + $volume_driver = 'cinder.volume.drivers.lvm.LVMISCSIDriver', $volume_group = 'cinder-volumes', $iscsi_helper = $::cinder::params::iscsi_helper, ) { @@ -9,6 +10,7 @@ cinder::backend::iscsi { 'DEFAULT': iscsi_ip_address => $iscsi_ip_address, + volume_driver => $volume_driver, volume_group => $volume_group, iscsi_helper => $iscsi_helper } diff --git a/cinder/metadata.json b/cinder/metadata.json new file mode 100644 index 000000000..e288f3ca9 --- /dev/null +++ b/cinder/metadata.json @@ -0,0 +1,41 @@ +{ + "name": "stackforge-cinder", + "version": "5.0.0", + "author": "Puppet Labs and StackForge Contributors", + "summary": "Puppet module for OpenStack Cinder", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-cinder.git", + "project_page": "https://launchpad.net/puppet-cinder", + "issues_url": "https://bugs.launchpad.net/puppet-cinder", + "requirements": [ + { "name": "pe","version_requirement": "3.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": ["7"] + }, + { + "operatingsystem": "Fedora", + "operatingsystemrelease": ["20"] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": ["6.5","7"] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": ["12.04","14.04"] + } + ], + "description": "Installs and configures OpenStack Cinder (Block Storage).", + "dependencies": [ + { "name": "dprince/qpid", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "puppetlabs/rabbitmq", "version_requirement": ">=2.0.2 <4.0.0" }, + { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, + { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0" } + ] +} diff --git a/cinder/spec/classes/cinder_backup_ceph_spec.rb b/cinder/spec/classes/cinder_backup_ceph_spec.rb index bedd7cdc5..8c3a55218 100644 --- a/cinder/spec/classes/cinder_backup_ceph_spec.rb +++ b/cinder/spec/classes/cinder_backup_ceph_spec.rb @@ -41,7 +41,7 @@ end it 'configures cinder.conf' do - should contain_cinder_config('DEFAULT/backup_driver').with_value('cinder.backup.driver.ceph') + should contain_cinder_config('DEFAULT/backup_driver').with_value('cinder.backup.drivers.ceph') should contain_cinder_config('DEFAULT/backup_ceph_conf').with_value(p[:backup_ceph_conf]) should contain_cinder_config('DEFAULT/backup_ceph_user').with_value(p[:backup_ceph_user]) should contain_cinder_config('DEFAULT/backup_ceph_chunk_size').with_value(p[:backup_ceph_chunk_size]) diff --git a/cinder/spec/classes/cinder_spec.rb b/cinder/spec/classes/cinder_spec.rb index 8d22a62f6..04707309d 100644 --- a/cinder/spec/classes/cinder_spec.rb +++ b/cinder/spec/classes/cinder_spec.rb @@ -52,6 +52,17 @@ should contain_cinder_config('database/idle_timeout').with( :value => '3600' ) + should contain_cinder_config('database/min_pool_size').with( + :value => '1' + ) + should contain_cinder_config('database/max_pool_size').with_ensure('absent') + should contain_cinder_config('database/max_retries').with( + :value => '10' + ) + should contain_cinder_config('database/retry_interval').with( + :value => '10' + ) + should contain_cinder_config('database/max_overflow').with_ensure('absent') should contain_cinder_config('DEFAULT/verbose').with( :value => false ) diff --git a/cinder/spec/classes/cinder_volume_emc_vnx_spec.rb b/cinder/spec/classes/cinder_volume_emc_vnx_spec.rb new file mode 100644 index 000000000..3372e1f57 --- /dev/null +++ b/cinder/spec/classes/cinder_volume_emc_vnx_spec.rb @@ -0,0 +1,32 @@ +require 'spec_helper' + +describe 'cinder::volume::emc_vnx' do + let :req_params do + { + :san_ip => '127.0.0.2', + :san_login => 'emc', + :san_password => 'password', + :iscsi_ip_address => '127.0.0.3', + :storage_vnx_pool_name => 'emc-storage-pool' + } + end + + let :facts do + {:osfamily => 'Redhat' } + end + + let :params do + req_params + end + + describe 'emc vnx volume driver' do + it 'configure emc vnx volume driver' do + should contain_cinder_config('DEFAULT/volume_driver').with_value('cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver') + should contain_cinder_config('DEFAULT/san_ip').with_value('127.0.0.2') + should contain_cinder_config('DEFAULT/san_login').with_value('emc') + should contain_cinder_config('DEFAULT/san_password').with_value('password') + should contain_cinder_config('DEFAULT/iscsi_ip_address').with_value('127.0.0.3') + should contain_cinder_config('DEFAULT/storage_vnx_pool_name').with_value('emc-storage-pool') + end + end +end diff --git a/cinder/spec/classes/cinder_volume_iscsi_spec.rb b/cinder/spec/classes/cinder_volume_iscsi_spec.rb index 27a3c5345..93e062245 100644 --- a/cinder/spec/classes/cinder_volume_iscsi_spec.rb +++ b/cinder/spec/classes/cinder_volume_iscsi_spec.rb @@ -16,12 +16,22 @@ req_params end + it { should contain_cinder_config('DEFAULT/volume_driver').with( + :value => 'cinder.volume.drivers.lvm.LVMISCSIDriver')} it { should contain_cinder_config('DEFAULT/iscsi_ip_address').with(:value => '127.0.0.2')} it { should contain_cinder_config('DEFAULT/iscsi_helper').with(:value => 'tgtadm')} it { should contain_cinder_config('DEFAULT/volume_group').with(:value => 'cinder-volumes')} end + describe 'with iSER driver' do + let(:params) { req_params.merge( + :volume_driver => 'cinder.volume.drivers.lvm.LVMISERDriver')} + + it { should contain_cinder_config('DEFAULT/volume_driver').with( + :value => 'cinder.volume.drivers.lvm.LVMISERDriver')} + end + describe 'with a unsupported iscsi helper' do let(:params) { req_params.merge(:iscsi_helper => 'fooboozoo')} diff --git a/cinder/spec/defines/cinder_backend_emc_vnx_spec.rb b/cinder/spec/defines/cinder_backend_emc_vnx_spec.rb new file mode 100644 index 000000000..e5031bb67 --- /dev/null +++ b/cinder/spec/defines/cinder_backend_emc_vnx_spec.rb @@ -0,0 +1,34 @@ +require 'spec_helper' + +describe 'cinder::backend::emc_vnx' do + let (:title) { 'emc' } + + let :req_params do + { + :san_ip => '127.0.0.2', + :san_login => 'emc', + :san_password => 'password', + :iscsi_ip_address => '127.0.0.3', + :storage_vnx_pool_name => 'emc-storage-pool' + } + end + + let :facts do + {:osfamily => 'Redhat' } + end + + let :params do + req_params + end + + describe 'emc vnx volume driver' do + it 'configure emc vnx volume driver' do + should contain_cinder_config('emc/volume_driver').with_value('cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver') + should contain_cinder_config('emc/san_ip').with_value('127.0.0.2') + should contain_cinder_config('emc/san_login').with_value('emc') + should contain_cinder_config('emc/san_password').with_value('password') + should contain_cinder_config('emc/iscsi_ip_address').with_value('127.0.0.3') + should contain_cinder_config('emc/storage_vnx_pool_name').with_value('emc-storage-pool') + end + end +end diff --git a/cinder/spec/defines/cinder_backend_iscsi_spec.rb b/cinder/spec/defines/cinder_backend_iscsi_spec.rb index 4e987648d..1006d7478 100644 --- a/cinder/spec/defines/cinder_backend_iscsi_spec.rb +++ b/cinder/spec/defines/cinder_backend_iscsi_spec.rb @@ -23,6 +23,8 @@ it 'should configure iscsi driver' do should contain_cinder_config('hippo/volume_backend_name').with( :value => 'hippo') + should contain_cinder_config('hippo/volume_driver').with( + :value => 'cinder.volume.drivers.lvm.LVMISCSIDriver') should contain_cinder_config('hippo/iscsi_ip_address').with( :value => '127.0.0.2') should contain_cinder_config('hippo/iscsi_helper').with( diff --git a/cinder/spec/defines/cinder_backend_netapp_spec.rb b/cinder/spec/defines/cinder_backend_netapp_spec.rb index e0e316fc1..d9e7b94ae 100644 --- a/cinder/spec/defines/cinder_backend_netapp_spec.rb +++ b/cinder/spec/defines/cinder_backend_netapp_spec.rb @@ -70,4 +70,11 @@ it_configures 'netapp volume driver' end + context 'with netapp_storage_family eseries' do + let (:req_params) { params.merge!({ + :netapp_storage_family => 'eseries', + }) } + + it { should contain_cinder_config("#{req_params[:volume_backend_name]}/use_multipath_for_image_xfer").with_value('true') } + end end diff --git a/cinder/spec/defines/cinder_type_spec.rb b/cinder/spec/defines/cinder_type_spec.rb index 8d763ff9d..4fa2ac981 100644 --- a/cinder/spec/defines/cinder_type_spec.rb +++ b/cinder/spec/defines/cinder_type_spec.rb @@ -24,7 +24,7 @@ 'OS_USERNAME=admin', 'OS_PASSWORD=asdf', 'OS_AUTH_URL=http://127.127.127.1:5000/v2.0/'], - :unless => 'cinder type-list | grep hippo', + :unless => "cinder type-list | grep -qP '\\bhippo\\b'", :require => 'Package[python-cinderclient]') should contain_exec('cinder type-key hippo set volume_backend_name=name1') should contain_exec('cinder type-key hippo set volume_backend_name=name2') diff --git a/firewall/README.markdown b/firewall/README.markdown index d76d7b98e..d34723217 100644 --- a/firewall/README.markdown +++ b/firewall/README.markdown @@ -67,7 +67,7 @@ Therefore, the run order is: * Your rules (defined in code) * The rules in `my_fw::post` -The rules in the `pre` and `post` classes are fairly general. These two classes ensure that you retain connectivity, and that you drop unmatched packets appropriately. The rules you define in your manifests are likely specific to the applications you run. +The rules in the `pre` and `post` classes are fairly general. These two classes ensure that you retain connectivity and that you drop unmatched packets appropriately. The rules you define in your manifests are likely specific to the applications you run. 1. Add the `pre` class to `my_fw/manifests/pre.pp`. `pre.pp` should contain any default rules to be applied first. The rules in this class should be added in the order you want them to run. @@ -88,12 +88,12 @@ The rules in the `pre` and `post` classes are fairly general. These two classes }-> firewall { '002 accept related established rules': proto => 'all', - ctstate => ['RELATED', 'ESTABLISHED'], + state => ['RELATED', 'ESTABLISHED'], action => 'accept', } } -The rules in `pre` should allow basic networking (such as ICMP and TCP), and ensure that existing connections are not closed. +The rules in `pre` should allow basic networking (such as ICMP and TCP) and ensure that existing connections are not closed. 2. Add the `post` class to `my_fw/manifests/post.pp` and include any default rules to be applied last. @@ -446,6 +446,8 @@ If Puppet is managing the iptables or iptables-persistent packages, and the prov * `ipsec_policy`: Sets the ipsec policy type. Valid values are 'none', 'ipsec'. Requires the `ipsec_policy` feature. +* `ipset`: Matches IP sets. Value must be `ipset_name (src|dst|src,dst)` and can be negated by putting ! in front. Requires ipset kernel module. + * `isfirstfrag`: If true, matches when the packet is the first fragment of a fragmented ipv6 packet. Cannot be negated. Supported by ipv6 only. Valid values are 'true', 'false'. Requires the `isfirstfrag` feature. * `isfragment`: If 'true', matches when the packet is a tcp fragment of a fragmented packet. Supported by iptables only. Valid values are 'true', 'false'. Requires features `isfragment`. diff --git a/firewall/lib/puppet/provider/firewall/ip6tables.rb b/firewall/lib/puppet/provider/firewall/ip6tables.rb index 2ed90a8fc..bc8004e69 100644 --- a/firewall/lib/puppet/provider/firewall/ip6tables.rb +++ b/firewall/lib/puppet/provider/firewall/ip6tables.rb @@ -1,4 +1,4 @@ -Puppet::Type.type(:firewall).provide :ip6tables, :parent => :iptables, :source => :iptables do +Puppet::Type.type(:firewall).provide :ip6tables, :parent => :iptables, :source => :ip6tables do @doc = "Ip6tables type provider" has_feature :iptables diff --git a/firewall/lib/puppet/provider/firewall/iptables.rb b/firewall/lib/puppet/provider/firewall/iptables.rb index 09816a3a2..300d5255d 100644 --- a/firewall/lib/puppet/provider/firewall/iptables.rb +++ b/firewall/lib/puppet/provider/firewall/iptables.rb @@ -29,6 +29,7 @@ has_feature :ipsec_dir has_feature :ipsec_policy has_feature :mask + has_feature :ipset optional_commands({ :iptables => 'iptables', @@ -62,6 +63,7 @@ :iniface => "-i", :ipsec_dir => "-m policy --dir", :ipsec_policy => "--pol", + :ipset => "-m set --match-set", :isfragment => "-f", :jump => "-j", :limit => "-m limit --limit", @@ -153,7 +155,7 @@ :src_range, :dst_range, :tcp_flags, :gid, :uid, :mac_source, :sport, :dport, :port, :dst_type, :src_type, :socket, :pkttype, :name, :ipsec_dir, :ipsec_policy, :state, :ctstate, :icmp, :limit, :burst, :recent, :rseconds, :reap, - :rhitcount, :rttl, :rname, :mask, :rsource, :rdest, :jump, :todest, + :rhitcount, :rttl, :rname, :mask, :rsource, :rdest, :ipset, :jump, :todest, :tosource, :toports, :random, :log_prefix, :log_level, :reject, :set_mark, :connlimit_above, :connlimit_mask, :connmark ] @@ -222,6 +224,8 @@ def self.rule_to_hash(line, table, counter) # --tcp-flags takes two values; we cheat by adding " around it # so it behaves like --comment values = values.gsub(/(!\s+)?--tcp-flags (\S*) (\S*)/, '--tcp-flags "\1\2 \3"') + # ditto for --match-set + values = values.sub(/(!\s+)?--match-set (\S*) (\S*)/, '--match-set "\1\2 \3"') # we do a similar thing for negated address masks (source and destination). values = values.gsub(/(-\S+) (!)\s?(\S*)/,'\1 "\2 \3"') # the actual rule will have the ! mark before the option. @@ -327,6 +331,7 @@ def self.rule_to_hash(line, table, counter) :dport, :dst_range, :dst_type, + :ipset, :port, :proto, :source, @@ -502,7 +507,7 @@ def general_args # our tcp_flags takes a single string with comma lists separated # by space # --tcp-flags expects two arguments - if res == :tcp_flags + if res == :tcp_flags or res == :ipset one, two = resource_value.split(' ') args << one args << two diff --git a/firewall/lib/puppet/type/firewall.rb b/firewall/lib/puppet/type/firewall.rb index 34a5d33a6..ce699da0e 100644 --- a/firewall/lib/puppet/type/firewall.rb +++ b/firewall/lib/puppet/type/firewall.rb @@ -54,6 +54,7 @@ feature :ipsec_policy, "Match IPsec policy" feature :ipsec_dir, "Match IPsec policy direction" feature :mask, "Ability to match recent rules based on the ipv4 mask" + feature :ipset, "Match against specified ipset list" # provider specific features feature :iptables, "The provider provides iptables features." @@ -879,7 +880,7 @@ def should_to_s(value) newproperty(:isfirstfrag, :required_features => :isfirstfrag) do desc <<-EOS - If true, matches if the packet is the first fragment. + If true, matches if the packet is the first fragment. Sadly cannot be negated. ipv6. EOS @@ -964,6 +965,16 @@ def should_to_s(value) EOS end + newproperty(:ipset, :required_features => :ipset) do + desc <<-EOS + Matches against the specified ipset list. + Requires ipset kernel module. + The value is the name of the blacklist, followed by a space, and then + 'src' and/or 'dst' separated by a comma. + For example: 'blacklist src,dst' + EOS + end + newparam(:line) do desc <<-EOS Read-only property for caching the rule line. @@ -1090,13 +1101,6 @@ def should_to_s(value) end end - if value(:jump).to_s == "REDIRECT" - unless value(:toports) - self.fail "Parameter jump => REDIRECT missing mandatory toports " \ - "parameter" - end - end - if value(:jump).to_s == "MASQUERADE" unless value(:table).to_s =~ /nat/ self.fail "Parameter jump => MASQUERADE only applies to table => nat" diff --git a/firewall/lib/puppet/util/firewall.rb b/firewall/lib/puppet/util/firewall.rb index 9982bed83..c5a78b859 100644 --- a/firewall/lib/puppet/util/firewall.rb +++ b/firewall/lib/puppet/util/firewall.rb @@ -169,7 +169,7 @@ def persist_iptables(proto) end # RHEL 7 and newer also use systemd to persist iptable rules - if os_key == 'RedHat' && Facter.value(:operatingsystem) == 'RedHat' && Facter.value(:operatingsystemrelease).to_i >= 7 + if os_key == 'RedHat' && ['RedHat','CentOS','Scientific','SL','SLC','Ascendos','CloudLinux','PSBM','OracleLinux','OVS','OEL','Amazon','XenServer'].include?(Facter.value(:operatingsystem)) && Facter.value(:operatingsystemrelease).to_i >= 7 os_key = 'Fedora' end diff --git a/firewall/metadata.json b/firewall/metadata.json index 561891bd5..eae9ac8e9 100644 --- a/firewall/metadata.json +++ b/firewall/metadata.json @@ -27,7 +27,6 @@ { "operatingsystem": "OracleLinux", "operatingsystemrelease": [ - "5", "6", "7" ] @@ -65,7 +64,7 @@ "requirements": [ { "name": "pe", - "version_requirement": ">= 3.2.0 < 3.4.0" + "version_requirement": "3.x" }, { "name": "puppet", diff --git a/firewall/spec/acceptance/firewall_spec.rb b/firewall/spec/acceptance/firewall_spec.rb index bb508d9ae..8ee95557f 100644 --- a/firewall/spec/acceptance/firewall_spec.rb +++ b/firewall/spec/acceptance/firewall_spec.rb @@ -116,7 +116,9 @@ class { '::firewall': } EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'should contain the rule' do @@ -139,7 +141,9 @@ class { '::firewall': } EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'should contain the rule' do @@ -189,7 +193,9 @@ class { '::firewall': } EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'should contain the rule' do @@ -239,7 +245,9 @@ class { '::firewall': } EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'should contain the rule' do @@ -262,7 +270,9 @@ class { '::firewall': } EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'should contain the rule' do @@ -312,7 +322,9 @@ class { '::firewall': } EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'should contain the rule' do @@ -839,7 +851,9 @@ class { '::firewall': } EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'should contain the rule' do @@ -1626,7 +1640,11 @@ class { '::firewall': } it 'should contain the rule' do shell('iptables-save') do |r| - expect(r.stdout).to match(/-A INPUT -s 10.1.5.28\/(32|255\.255\.255\.255) -p tcp -m mac --mac-source 0A:1B:3C:4D:5E:6F -m comment --comment "610 - test"/) + if (fact('osfamily') == 'RedHat' and fact('operatingsystemmajrelease') == '5') + expect(r.stdout).to match(/-A INPUT -s 10.1.5.28 -p tcp -m mac --mac-source 0A:1B:3C:4D:5E:6F -m comment --comment "610 - test"/) + else + expect(r.stdout).to match(/-A INPUT -s 10.1.5.28\/(32|255\.255\.255\.255) -p tcp -m mac --mac-source 0A:1B:3C:4D:5E:6F -m comment --comment "610 - test"/) + end end end end diff --git a/firewall/spec/acceptance/firewallchain_spec.rb b/firewall/spec/acceptance/firewallchain_spec.rb index f70d9cefd..fab20b3ab 100644 --- a/firewall/spec/acceptance/firewallchain_spec.rb +++ b/firewall/spec/acceptance/firewallchain_spec.rb @@ -14,7 +14,9 @@ EOS # Run it twice and test for idempotency apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'finds the chain' do @@ -33,7 +35,9 @@ EOS # Run it twice and test for idempotency apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'fails to find the chain' do @@ -112,7 +116,9 @@ EOS # Run it twice and test for idempotency apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'finds the chain' do diff --git a/firewall/spec/acceptance/invert_spec.rb b/firewall/spec/acceptance/invert_spec.rb index aa04912c5..16af9b8ba 100644 --- a/firewall/spec/acceptance/invert_spec.rb +++ b/firewall/spec/acceptance/invert_spec.rb @@ -25,13 +25,20 @@ class { '::firewall': } EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'should contain the rules' do shell('iptables-save') do |r| - expect(r.stdout).to match(/-A INPUT ! -p esp -m comment --comment "601 disallow esp protocol" -j ACCEPT/) - expect(r.stdout).to match(/-A INPUT ! -s 10\.0\.0\.0\/8 -p tcp -m tcp ! --tcp-flags FIN,SYN,RST,ACK SYN -m multiport ! --sports 80,443 -m comment --comment "602 drop NEW external website packets with FIN\/RST\/ACK set and SYN unset" -m state --state NEW -j DROP/) + if (fact('osfamily') == 'RedHat' and fact('operatingsystemmajrelease') == '5') + expect(r.stdout).to match(/-A INPUT -p ! esp -m comment --comment "601 disallow esp protocol" -j ACCEPT/) + expect(r.stdout).to match(/-A INPUT -s ! 10\.0\.0\.0\/255\.0\.0\.0 -p tcp -m tcp ! --tcp-flags FIN,SYN,RST,ACK SYN -m multiport --sports ! 80,443 -m comment --comment "602 drop NEW external website packets with FIN\/RST\/ACK set and SYN unset" -m state --state NEW -j DROP/) + else + expect(r.stdout).to match(/-A INPUT ! -p esp -m comment --comment "601 disallow esp protocol" -j ACCEPT/) + expect(r.stdout).to match(/-A INPUT ! -s 10\.0\.0\.0\/8 -p tcp -m tcp ! --tcp-flags FIN,SYN,RST,ACK SYN -m multiport ! --sports 80,443 -m comment --comment "602 drop NEW external website packets with FIN\/RST\/ACK set and SYN unset" -m state --state NEW -j DROP/) + end end end end diff --git a/firewall/spec/acceptance/ip6_fragment_spec.rb b/firewall/spec/acceptance/ip6_fragment_spec.rb index 3e44f8723..61e79cef3 100644 --- a/firewall/spec/acceptance/ip6_fragment_spec.rb +++ b/firewall/spec/acceptance/ip6_fragment_spec.rb @@ -37,7 +37,9 @@ class { '::firewall': } EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end shell('ip6tables-save') do |r| expect(r.stdout).to match(/#{line_match}/) @@ -56,7 +58,11 @@ class { '::firewall': } } EOS - apply_manifest(pp, :catch_changes => true) + if fact('selinux') == 'true' + apply_manifest(pp, :catch_failures => true) + else + apply_manifest(pp, :catch_changes => true) + end shell('ip6tables-save') do |r| expect(r.stdout).to match(/#{line_match}/) diff --git a/firewall/spec/acceptance/isfragment_spec.rb b/firewall/spec/acceptance/isfragment_spec.rb index a4b65e76e..772f9493e 100644 --- a/firewall/spec/acceptance/isfragment_spec.rb +++ b/firewall/spec/acceptance/isfragment_spec.rb @@ -17,7 +17,9 @@ class { '::firewall': } EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end shell('iptables-save') do |r| expect(r.stdout).to match(/#{line_match}/) @@ -35,7 +37,11 @@ class { '::firewall': } } EOS - apply_manifest(pp, :catch_changes => true) + if fact('selinux') == 'true' + apply_manifest(pp, :catch_failures => true) + else + apply_manifest(pp, :catch_changes => true) + end shell('iptables-save') do |r| expect(r.stdout).to match(/#{line_match}/) diff --git a/firewall/spec/acceptance/params_spec.rb b/firewall/spec/acceptance/params_spec.rb index 93b83ef14..37fe98061 100644 --- a/firewall/spec/acceptance/params_spec.rb +++ b/firewall/spec/acceptance/params_spec.rb @@ -1,35 +1,18 @@ require 'spec_helper_acceptance' describe "param based tests:", :unless => UNSUPPORTED_PLATFORMS.include?(fact('osfamily')) do - # Takes a hash and converts it into a firewall resource - def pp(params) - name = params.delete('name') || '100 test' - pm = <<-EOS -firewall { '#{name}': - EOS - - params.each do |k,v| - pm += <<-EOS - #{k} => #{v}, - EOS - end - - pm += <<-EOS -} - EOS - pm - end - it 'test various params', :unless => (default['platform'].match(/el-5/) || fact('operatingsystem') == 'SLES') do iptables_flush_all_tables - ppm = pp({ - 'table' => "'raw'", - 'socket' => 'true', - 'chain' => "'PREROUTING'", - 'jump' => 'LOG', - 'log_level' => 'debug', - }) + ppm = <<-EOS + firewall { '100 test': + table => 'raw', + socket => 'true', + chain => 'PREROUTING', + jump => 'LOG', + log_level => 'debug', + } + EOS expect(apply_manifest(ppm, :catch_failures => true).exit_code).to eq(2) expect(apply_manifest(ppm, :catch_failures => true).exit_code).to be_zero @@ -38,12 +21,13 @@ def pp(params) it 'test log rule' do iptables_flush_all_tables - ppm = pp({ - 'name' => '998 log all', - 'proto' => 'all', - 'jump' => 'LOG', - 'log_level' => 'debug', - }) + ppm = <<-EOS + firewall { '998 log all': + proto => 'all', + jump => 'LOG', + log_level => 'debug', + } + EOS expect(apply_manifest(ppm, :catch_failures => true).exit_code).to eq(2) expect(apply_manifest(ppm, :catch_failures => true).exit_code).to be_zero end @@ -51,25 +35,27 @@ def pp(params) it 'test log rule - changing names' do iptables_flush_all_tables - ppm1 = pp({ - 'name' => '004 log all INVALID packets', - 'chain' => 'INPUT', - 'proto' => 'all', - 'ctstate' => 'INVALID', - 'jump' => 'LOG', - 'log_level' => '3', - 'log_prefix' => '"IPTABLES dropped invalid: "', - }) - - ppm2 = pp({ - 'name' => '003 log all INVALID packets', - 'chain' => 'INPUT', - 'proto' => 'all', - 'ctstate' => 'INVALID', - 'jump' => 'LOG', - 'log_level' => '3', - 'log_prefix' => '"IPTABLES dropped invalid: "', - }) + ppm1 = <<-EOS + firewall { '004 log all INVALID packets': + chain => 'INPUT', + proto => 'all', + ctstate => 'INVALID', + jump => 'LOG', + log_level => '3', + log_prefix => 'IPTABLES dropped invalid: ', + } + EOS + + ppm2 = <<-EOS + firewall { '003 log all INVALID packets': + chain => 'INPUT', + proto => 'all', + ctstate => 'INVALID', + jump => 'LOG', + log_level => '3', + log_prefix => 'IPTABLES dropped invalid: ', + } + EOS expect(apply_manifest(ppm1, :catch_failures => true).exit_code).to eq(2) @@ -84,17 +70,19 @@ def pp(params) it 'test chain - changing names' do iptables_flush_all_tables - ppm1 = pp({ - 'name' => '004 with a chain', - 'chain' => 'INPUT', - 'proto' => 'all', - }) + ppm1 = <<-EOS + firewall { '004 with a chain': + chain => 'INPUT', + proto => 'all', + } + EOS - ppm2 = pp({ - 'name' => '004 with a chain', - 'chain' => 'OUTPUT', - 'proto' => 'all', - }) + ppm2 = <<-EOS + firewall { '004 with a chain': + chain => 'OUTPUT', + proto => 'all', + } + EOS apply_manifest(ppm1, :expect_changes => true) @@ -109,15 +97,16 @@ def pp(params) it 'test log rule - idempotent' do iptables_flush_all_tables - ppm1 = pp({ - 'name' => '004 log all INVALID packets', - 'chain' => 'INPUT', - 'proto' => 'all', - 'ctstate' => 'INVALID', - 'jump' => 'LOG', - 'log_level' => '3', - 'log_prefix' => '"IPTABLES dropped invalid: "', - }) + ppm1 = <<-EOS + firewall { '004 log all INVALID packets': + chain => 'INPUT', + proto => 'all', + ctstate => 'INVALID', + jump => 'LOG', + log_level => '3', + log_prefix => 'IPTABLES dropped invalid: ', + } + EOS expect(apply_manifest(ppm1, :catch_failures => true).exit_code).to eq(2) expect(apply_manifest(ppm1, :catch_failures => true).exit_code).to be_zero @@ -126,13 +115,15 @@ def pp(params) it 'test src_range rule' do iptables_flush_all_tables - ppm = pp({ - 'name' => '997 block src ip range', - 'chain' => 'INPUT', - 'proto' => 'all', - 'action' => 'drop', - 'src_range' => '"10.0.0.1-10.0.0.10"', - }) + ppm = <<-EOS + firewall { '997 block src ip range': + chain => 'INPUT', + proto => 'all', + action => 'drop', + src_range => '10.0.0.1-10.0.0.10', + } + EOS + expect(apply_manifest(ppm, :catch_failures => true).exit_code).to eq(2) expect(apply_manifest(ppm, :catch_failures => true).exit_code).to be_zero end @@ -140,13 +131,15 @@ def pp(params) it 'test dst_range rule' do iptables_flush_all_tables - ppm = pp({ - 'name' => '998 block dst ip range', - 'chain' => 'INPUT', - 'proto' => 'all', - 'action' => 'drop', - 'dst_range' => '"10.0.0.2-10.0.0.20"', - }) + ppm = <<-EOS + firewall { '998 block dst ip range': + chain => 'INPUT', + proto => 'all', + action => 'drop', + dst_range => '10.0.0.2-10.0.0.20', + } + EOS + expect(apply_manifest(ppm, :catch_failures => true).exit_code).to eq(2) expect(apply_manifest(ppm, :catch_failures => true).exit_code).to be_zero end diff --git a/firewall/spec/acceptance/purge_spec.rb b/firewall/spec/acceptance/purge_spec.rb index 4de968a32..6a9d9a8b6 100644 --- a/firewall/spec/acceptance/purge_spec.rb +++ b/firewall/spec/acceptance/purge_spec.rb @@ -29,7 +29,10 @@ class { 'firewall': } end end - context('chain purge') do + context('ipv4 chain purge') do + after(:all) do + iptables_flush_all_tables + end before(:each) do iptables_flush_all_tables @@ -68,7 +71,9 @@ class { 'firewall': } } EOS - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'ignores specified rules' do @@ -82,7 +87,11 @@ class { 'firewall': } } EOS - apply_manifest(pp, :catch_changes => true) + if fact('selinux') == 'true' + apply_manifest(pp, :catch_failures => true) + else + apply_manifest(pp, :catch_changes => true) + end end it 'adds managed rules with ignored rules' do @@ -121,4 +130,110 @@ class { 'firewall': } expect(shell('iptables-save').stdout).to match(/-A INPUT -s 1\.2\.1\.1(\/32)? -p tcp\s?\n-A INPUT -s 1\.2\.1\.1(\/32)? -p udp/) end end + context('ipv6 chain purge') do + after(:all) do + ip6tables_flush_all_tables + end + before(:each) do + ip6tables_flush_all_tables + + shell('ip6tables -A INPUT -p tcp -s 1::42') + shell('ip6tables -A INPUT -p udp -s 1::42') + shell('ip6tables -A OUTPUT -s 1::50 -m comment --comment "010 output-1::50"') + end + + it 'purges only the specified chain' do + pp = <<-EOS + class { 'firewall': } + firewallchain { 'INPUT:filter:IPv6': + purge => true, + } + EOS + + apply_manifest(pp, :expect_changes => true) + + shell('ip6tables-save') do |r| + expect(r.stdout).to match(/010 output-1::50/) + expect(r.stdout).to_not match(/1::42/) + expect(r.stderr).to eq("") + end + end + + it 'ignores managed rules' do + pp = <<-EOS + class { 'firewall': } + firewallchain { 'OUTPUT:filter:IPv6': + purge => true, + } + firewall { '010 output-1::50': + chain => 'OUTPUT', + proto => 'all', + source => '1::50', + provider => 'ip6tables', + } + EOS + + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end + end + + it 'ignores specified rules' do + pp = <<-EOS + class { 'firewall': } + firewallchain { 'INPUT:filter:IPv6': + purge => true, + ignore => [ + '-s 1::42', + ], + } + EOS + + if fact('selinux') == 'true' + apply_manifest(pp, :catch_failures => true) + else + apply_manifest(pp, :catch_changes => true) + end + end + + it 'adds managed rules with ignored rules' do + pp = <<-EOS + class { 'firewall': } + firewallchain { 'INPUT:filter:IPv6': + purge => true, + ignore => [ + '-s 1::42', + ], + } + firewall { '014 input-1::46': + chain => 'INPUT', + proto => 'all', + source => '1::46', + provider => 'ip6tables', + } + -> firewall { '013 input-1::45': + chain => 'INPUT', + proto => 'all', + source => '1::45', + provider => 'ip6tables', + } + -> firewall { '012 input-1::44': + chain => 'INPUT', + proto => 'all', + source => '1::44', + provider => 'ip6tables', + } + -> firewall { '011 input-1::43': + chain => 'INPUT', + proto => 'all', + source => '1::43', + provider => 'ip6tables', + } + EOS + + apply_manifest(pp, :catch_failures => true) + + expect(shell('ip6tables-save').stdout).to match(/-A INPUT -s 1::42(\/128)? -p tcp\s?\n-A INPUT -s 1::42(\/128)? -p udp/) + end + end end diff --git a/firewall/spec/acceptance/resource_cmd_spec.rb b/firewall/spec/acceptance/resource_cmd_spec.rb index b942c5580..ca4837a49 100644 --- a/firewall/spec/acceptance/resource_cmd_spec.rb +++ b/firewall/spec/acceptance/resource_cmd_spec.rb @@ -93,7 +93,8 @@ context 'accepts rules utilizing the statistic module' do before :all do iptables_flush_all_tables - shell('iptables -t nat -A POSTROUTING -d 1.2.3.4/32 -o eth0 -m statistic --mode nth --every 2 -j SNAT --to-source 2.3.4.5') + # This command doesn't work with all versions/oses, so let it fail + shell('iptables -t nat -A POSTROUTING -d 1.2.3.4/32 -o eth0 -m statistic --mode nth --every 2 -j SNAT --to-source 2.3.4.5', :acceptable_exit_codes => [0,1,2] ) shell('iptables -t nat -A POSTROUTING -d 1.2.3.4/32 -o eth0 -m statistic --mode nth --every 1 --packet 0 -j SNAT --to-source 2.3.4.6') shell('iptables -t nat -A POSTROUTING -d 1.2.3.4/32 -o eth0 -m statistic --mode random --probability 0.99 -j SNAT --to-source 2.3.4.7') end @@ -102,7 +103,7 @@ shell('puppet resource firewall') do |r| r.exit_code.should be_zero # don't check stdout, testing preexisting rules, output is normal - r.stderr.should be_empty + # don't check stderr, puppet throws deprecation warnings end end end @@ -121,7 +122,7 @@ shell('puppet resource firewall') do |r| r.exit_code.should be_zero # don't check stdout, testing preexisting rules, output is normal - r.stderr.should be_empty + # don't check stderr, puppet throws deprecation warnings end end end diff --git a/firewall/spec/acceptance/rules_spec.rb b/firewall/spec/acceptance/rules_spec.rb index b7eb2df16..c44b8535e 100644 --- a/firewall/spec/acceptance/rules_spec.rb +++ b/firewall/spec/acceptance/rules_spec.rb @@ -223,7 +223,9 @@ class { '::firewall': } # Run it twice and test for idempotency apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end it 'contains appropriate rules' do diff --git a/firewall/spec/acceptance/socket_spec.rb b/firewall/spec/acceptance/socket_spec.rb index 5503a9a07..2a21066c0 100644 --- a/firewall/spec/acceptance/socket_spec.rb +++ b/firewall/spec/acceptance/socket_spec.rb @@ -20,7 +20,9 @@ class { '::firewall': } EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end shell('iptables-save -t raw') do |r| expect(r.stdout).to match(/#{line_match}/) @@ -40,7 +42,11 @@ class { '::firewall': } } EOS - apply_manifest(pp, :catch_changes => true) + if fact('selinux') == 'true' + apply_manifest(pp, :catch_failures => true) + else + apply_manifest(pp, :catch_changes => true) + end shell('iptables-save -t raw') do |r| expect(r.stdout).to match(/#{line_match}/) diff --git a/firewall/spec/acceptance/standard_usage_spec.rb b/firewall/spec/acceptance/standard_usage_spec.rb index 8dcbceff1..7585bc15b 100644 --- a/firewall/spec/acceptance/standard_usage_spec.rb +++ b/firewall/spec/acceptance/standard_usage_spec.rb @@ -50,6 +50,8 @@ class { 'firewall': } # Run it twice and test for idempotency apply_manifest(pp, :catch_failures => true) - expect(apply_manifest(pp, :catch_failures => true).exit_code).to be_zero + unless fact('selinux') == 'true' + apply_manifest(pp, :catch_changes => true) + end end end diff --git a/firewall/spec/spec_helper_acceptance.rb b/firewall/spec/spec_helper_acceptance.rb index ca29ce1cb..b9af87641 100644 --- a/firewall/spec/spec_helper_acceptance.rb +++ b/firewall/spec/spec_helper_acceptance.rb @@ -36,10 +36,10 @@ def ip6tables_flush_all_tables # Configure all nodes in nodeset c.before :suite do # Install module and dependencies - puppet_module_install(:source => proj_root, :module_name => 'firewall') hosts.each do |host| - shell('/bin/touch /etc/puppet/hiera.yaml') - shell('puppet module install puppetlabs-stdlib --version 3.2.0', { :acceptable_exit_codes => [0,1] }) + copy_module_to(host, :source => proj_root, :module_name => 'firewall') + on(host, "/bin/touch #{host['hieraconf']}") + on host, puppet('module install puppetlabs-stdlib --version 3.2.0'), { :acceptable_exit_codes => [0,1] } end end end diff --git a/firewall/spec/unit/puppet/util/firewall_spec.rb b/firewall/spec/unit/puppet/util/firewall_spec.rb index e5864879c..4d6f92c66 100644 --- a/firewall/spec/unit/puppet/util/firewall_spec.rb +++ b/firewall/spec/unit/puppet/util/firewall_spec.rb @@ -143,13 +143,22 @@ subject.persist_iptables(proto) end - it 'should exec for CentOS identified from operatingsystem' do + it 'should exec for CentOS 6 identified from operatingsystem and operatingsystemrelease' do allow(Facter.fact(:osfamily)).to receive(:value).and_return(nil) allow(Facter.fact(:operatingsystem)).to receive(:value).and_return('CentOS') + allow(Facter.fact(:operatingsystemrelease)).to receive(:value).and_return('6.5') expect(subject).to receive(:execute).with(%w{/sbin/service iptables save}) subject.persist_iptables(proto) end + it 'should exec for CentOS 7 identified from operatingsystem and operatingsystemrelease' do + allow(Facter.fact(:osfamily)).to receive(:value).and_return(nil) + allow(Facter.fact(:operatingsystem)).to receive(:value).and_return('CentOS') + allow(Facter.fact(:operatingsystemrelease)).to receive(:value).and_return('7.0.1406') + expect(subject).to receive(:execute).with(%w{/usr/libexec/iptables/iptables.init save}) + subject.persist_iptables(proto) + end + it 'should exec for Archlinux identified from osfamily' do allow(Facter.fact(:osfamily)).to receive(:value).and_return('Archlinux') expect(subject).to receive(:execute).with(['/bin/sh', '-c', '/usr/sbin/iptables-save > /etc/iptables/iptables.rules']) diff --git a/galera/Modulefile b/galera/Modulefile index 57a50db3c..e03c12a44 100644 --- a/galera/Modulefile +++ b/galera/Modulefile @@ -1,7 +1,7 @@ name 'puppet-galera' version '0.0.2' source 'https://github.com/rohara/puppet-galera' -author 'Ryan O'Hara' +author 'Ryan O\'Hara' license 'Apache License 2.0' summary 'Install/configure MariaDB with Galera' description 'Install/configure MariaDB with Galera' diff --git a/galera/manifests/server.pp b/galera/manifests/server.pp index 4d328543e..57a1718e8 100644 --- a/galera/manifests/server.pp +++ b/galera/manifests/server.pp @@ -13,6 +13,7 @@ # [*service_ensure*] - Defaults to running, needed to set root password. # [*service_provider*] - What service provider to use. # [*wsrep_bind_address*] - Address to bind galera service. +# [*wsrep_node_address*] - Address of local galera node. # [*wsrep_provider*] - Full path to wsrep provider library or 'none'. # [*wsrep_cluster_name*] - Logical cluster name. Should be the same for all nodes. # [*wsrep_cluster_members*] - List of cluster members, IP addresses or hostnames. @@ -51,6 +52,7 @@ $service_ensure = 'running', $service_provider = $mysql::params::service_provider, $wsrep_bind_address = '0.0.0.0', + $wsrep_node_address = undef, $wsrep_provider = '/usr/lib64/galera/libgalera_smm.so', $wsrep_cluster_name = 'galera_cluster', $wsrep_cluster_members = [ $::ipaddress ], diff --git a/galera/templates/wsrep.cnf.erb b/galera/templates/wsrep.cnf.erb index 8057d8f37..287584f8a 100644 --- a/galera/templates/wsrep.cnf.erb +++ b/galera/templates/wsrep.cnf.erb @@ -49,7 +49,7 @@ wsrep_cluster_name="<%= @wsrep_cluster_name %>" # Group communication system handle <% if @bootstrap -%> wsrep_cluster_address="gcomm://" -<% else -%> +<% elsif !@wsrep_cluster_members.empty? -%> wsrep_cluster_address="gcomm://<%= @wsrep_cluster_members.join ',' %>" <% end -%> @@ -59,7 +59,11 @@ wsrep_cluster_address="gcomm://<%= @wsrep_cluster_members.join ',' %>" # Base replication [:port] of the node. # The values supplied will be used as defaults for state transfer receiving, # listening ports and so on. Default: address of the first network interface. +<% if @wsrep_node_address -%> +wsrep_node_address=<%= @wsrep_node_address %> +<% else -%> #wsrep_node_address= +<% end -%> # Address for incoming client connections. Autodetect by default. #wsrep_node_incoming_address= diff --git a/horizon/Modulefile b/horizon/Modulefile deleted file mode 100644 index 218752463..000000000 --- a/horizon/Modulefile +++ /dev/null @@ -1,12 +0,0 @@ -name 'puppetlabs-horizon' -version '4.0.0' -author 'Puppet Labs and StackForge Contributors' -license 'Apache License 2.0' -summary 'Puppet module for OpenStack Horizon' -description 'Installs and configures OpenStack Horizon (Dashboard).' -project_page 'https://launchpad.net/puppet-horizon' -source 'https://github.com/stackforge/puppet-horizon' - -dependency 'puppetlabs/apache', '>= 1.0.0 <2.0.0' -dependency 'puppetlabs/stdlib', '>= 3.2.0' -dependency 'saz/memcached', '>= 2.0.2 <3.0.0' diff --git a/horizon/manifests/init.pp b/horizon/manifests/init.pp index 985a564d9..2186628e3 100644 --- a/horizon/manifests/init.pp +++ b/horizon/manifests/init.pp @@ -173,6 +173,10 @@ # https on public sites. See: http://docs.openstack.org/developer/horizon/topics/deployment.html#secure-site-recommendations # Defaults to false # +# [*django_session_engine*] +# (optional) Selects the session engine for Django to use. +# Defaults to undefined - will not add entry to local settings. +# # === Deprecation notes # # If any value is provided for keystone_scheme, keystone_host, or @@ -222,6 +226,8 @@ $hypervisor_options = {}, $neutron_options = {}, $file_upload_temp_dir = '/tmp', + $policy_files_path = undef, + $policy_files = undef, # DEPRECATED PARAMETERS $can_set_mount_point = undef, $keystone_host = undef, @@ -229,6 +235,7 @@ $keystone_scheme = undef, $vhost_extra_params = undef, $secure_cookies = false, + $django_session_engine = undef, ) { include ::horizon::params diff --git a/horizon/manifests/wsgi/apache.pp b/horizon/manifests/wsgi/apache.pp index 3af6ca0c3..935530090 100644 --- a/horizon/manifests/wsgi/apache.pp +++ b/horizon/manifests/wsgi/apache.pp @@ -67,13 +67,7 @@ $final_server_aliases = $server_aliases } - if $::osfamily == 'RedHat' { - class { 'apache::mod::wsgi': - wsgi_socket_prefix => '/var/run/wsgi' - } - } else { - include ::apache::mod::wsgi - } + include ::apache::mod::wsgi # We already use apache::vhost to generate our own # configuration file, let's clean the configuration diff --git a/horizon/metadata.json b/horizon/metadata.json new file mode 100644 index 000000000..e7576515a --- /dev/null +++ b/horizon/metadata.json @@ -0,0 +1,38 @@ +{ + "name": "stackforge-horizon", + "version": "5.0.0", + "author": "Puppet Labs and StackForge Contributors", + "summary": "Puppet module for OpenStack Horizon", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-horizon.git", + "project_page": "https://launchpad.net/puppet-horizon", + "issues_url": "https://bugs.launchpad.net/puppet-horizon", + "requirements": [ + { "name": "pe","version_requirement": "3.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": ["7"] + }, + { + "operatingsystem": "Fedora", + "operatingsystemrelease": ["20"] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": ["6.5","7"] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": ["12.04","14.04"] + } + ], + "description": "Installs and configures OpenStack Horizon (Dashboard).", + "dependencies": [ + { "name": "puppetlabs/apache", "version_requirement": ">=1.1.2 <2.0.0" }, + { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, + { "name": "saz/memcached", "version_requirement": ">=2.0.2 <3.0.0" } + ] +} diff --git a/horizon/spec/classes/horizon_init_spec.rb b/horizon/spec/classes/horizon_init_spec.rb index 3bd7c9297..eadbafb02 100644 --- a/horizon/spec/classes/horizon_init_spec.rb +++ b/horizon/spec/classes/horizon_init_spec.rb @@ -63,6 +63,12 @@ 'COMPRESS_OFFLINE = True', "FILE_UPLOAD_TEMP_DIR = '/tmp'" ]) + + # From internals of verify_contents, get the contents to check for absence of a line + content = subject.resource('file', platforms_params[:config_file]).send(:parameters)[:content] + + # With default options, should _not_ have a line to configure SESSION_ENGINE + content.should_not match(/^SESSION_ENGINE/) end end @@ -70,6 +76,7 @@ before do params.merge!({ :cache_server_ip => '10.0.0.1', + :django_session_engine => 'django.contrib.sessions.backends.cache', :keystone_default_role => 'SwiftOperator', :keystone_url => 'https://keystone.example.com:4682', :openstack_endpoint_type => 'internalURL', @@ -92,6 +99,7 @@ 'SESSION_COOKIE_SECURE = True', "SECRET_KEY = 'elj1IWiLoWHgcyYxFVLj7cM5rGOOxWl0'", " 'LOCATION': '10.0.0.1:11211',", + 'SESSION_ENGINE = "django.contrib.sessions.backends.cache"', 'OPENSTACK_KEYSTONE_URL = "https://keystone.example.com:4682"', 'OPENSTACK_KEYSTONE_DEFAULT_ROLE = "SwiftOperator"', " 'can_set_mount_point': False,", @@ -106,7 +114,7 @@ 'SECONDARY_ENDPOINT_TYPE = "ANY-VALUE"', 'API_RESULT_LIMIT = 4682', 'COMPRESS_OFFLINE = False', - "FILE_UPLOAD_TEMP_DIR = '/var/spool/horizon'", + "FILE_UPLOAD_TEMP_DIR = '/var/spool/horizon'" ]) end @@ -214,6 +222,30 @@ end end + context 'with policy parameters' do + before do + params.merge!({ + :policy_files_path => '/opt/openstack-dashboard', + :policy_files => { + 'identity' => 'keystone_policy.json', + 'compute' => 'nova_policy.json', + 'network' => 'neutron_policy.json', + } + }) + end + + it 'POLICY_FILES_PATH and POLICY_FILES are configured' do + verify_contents(subject, platforms_params[:config_file], [ + "POLICY_FILES_PATH = '/opt/openstack-dashboard'", + "POLICY_FILES = {", + " 'identity': 'keystone_policy.json',", + " 'compute': 'nova_policy.json',", + " 'network': 'neutron_policy.json',", + "} # POLICY_FILES" + ]) + end + end + context 'with overriding local_settings_template' do before do params.merge!({ diff --git a/horizon/spec/classes/horizon_wsgi_apache_spec.rb b/horizon/spec/classes/horizon_wsgi_apache_spec.rb index 68633fbab..9e7a1632a 100644 --- a/horizon/spec/classes/horizon_wsgi_apache_spec.rb +++ b/horizon/spec/classes/horizon_wsgi_apache_spec.rb @@ -196,6 +196,9 @@ end it_behaves_like 'apache for horizon' + it { + should contain_class('apache::mod::wsgi').with(:wsgi_socket_prefix => '/var/run/wsgi') + } end context 'on Debian platforms' do diff --git a/horizon/templates/local_settings.py.erb b/horizon/templates/local_settings.py.erb index e2b10de30..d4880c875 100644 --- a/horizon/templates/local_settings.py.erb +++ b/horizon/templates/local_settings.py.erb @@ -129,6 +129,9 @@ CACHES = { } } +<% if @django_session_engine %> +SESSION_ENGINE = "<%= @django_session_engine %>" +<% end %> # Send email to the console by default EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' @@ -276,15 +279,27 @@ EXTERNAL_MONITORING = <%= @horizon_app_links %> # target installation. # Path to directory containing policy.json files -#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") -<% if @osfamily == 'RedHat' %> +<% if !(@policy_files_path.nil?) %> +POLICY_FILES_PATH = '<%= @policy_files_path %>' +<% elsif @osfamily == 'RedHat' %> POLICY_FILES_PATH = '/etc/openstack-dashboard' -<% end %> +<% else %> +#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") +<% end -%> + # Map of local copy of service policy files +<% if @policy_files.kind_of?(Hash) %> +POLICY_FILES = { +<% @policy_files.each do |service_name,filename| -%> + '<%= service_name -%>': '<%= filename -%>', +<% end -%> +} # POLICY_FILES +<% else -%> #POLICY_FILES = { # 'identity': 'keystone_policy.json', # 'compute': 'nova_policy.json' #} +<% end -%> # Trove user and database extension support. By default support for # creating users and databases on database instances is turned on. diff --git a/ipa/manifests/vardir.pp b/ipa/manifests/vardir.pp index e033549f4..d048c2ba2 100644 --- a/ipa/manifests/vardir.pp +++ b/ipa/manifests/vardir.pp @@ -16,6 +16,14 @@ # along with this program. If not, see . class ipa::vardir { # module vardir snippet + $module_vardir = sprintf("%s/ipa/", regsubst($tmp, '\/$', '')) + file { "${module_vardir}": # /var/lib/puppet/tmp/ipa/ + ensure => directory, # make sure this is a directory + recurse => true, # recursively manage directory + purge => true, # purge all unmanaged files + force => true, # also purge subdirs and links + owner => root, group => nobody, mode => 600, backup => false, + } if "${::puppet_vardirtmp}" == '' { if "${::puppet_vardir}" == '' { # here, we require that the puppetlabs fact exist! @@ -38,14 +46,6 @@ } else { $tmp = sprintf("%s/", regsubst($::puppet_vardirtmp, '\/$', '')) } - $module_vardir = sprintf("%s/ipa/", regsubst($tmp, '\/$', '')) - file { "${module_vardir}": # /var/lib/puppet/tmp/ipa/ - ensure => directory, # make sure this is a directory - recurse => true, # recursively manage directory - purge => true, # purge all unmanaged files - force => true, # also purge subdirs and links - owner => root, group => nobody, mode => 600, backup => false, - } } # vim: ts=8 diff --git a/keystone/Modulefile b/keystone/Modulefile deleted file mode 100644 index 27670233a..000000000 --- a/keystone/Modulefile +++ /dev/null @@ -1,13 +0,0 @@ -name 'puppetlabs-keystone' -version '4.0.0' -author 'Puppet Labs and StackForge Contributors' -license 'Apache License 2.0' -summary 'Puppet module for OpenStack Keystone' -description 'Installs and configures OpenStack Keystone (Identity).' -project_page 'https://launchpad.net/puppet-keystone' -source 'https://github.com/stackforge/puppet-keystone' - -dependency 'puppetlabs/apache', '>=1.0.0 <2.0.0' -dependency 'puppetlabs/inifile', '>=1.0.0 <2.0.0' -dependency 'puppetlabs/stdlib', '>= 3.2.0' -dependency 'stackforge/openstacklib', '>=5.0.0' diff --git a/keystone/manifests/init.pp b/keystone/manifests/init.pp index 55c88f2f0..05e95b081 100644 --- a/keystone/manifests/init.pp +++ b/keystone/manifests/init.pp @@ -30,7 +30,7 @@ # Defaults to 'keystone.token.providers.uuid.Provider' # Supports PKI and UUID. # [token_driver] Driver to use for managing tokens. -# Optional. Defaults to 'keystone.token.backends.sql.Token' +# Optional. Defaults to 'keystone.token.persistence.backends.sql.Token' # [token_expiration] Amount of time a token should remain valid (seconds). # Optional. Defaults to 3600 (1 hour). # [token_format] Deprecated: Use token_provider instead. @@ -71,6 +71,14 @@ # [signing_ca_key] Use this CA key file along with signing_certfile/signing_keyfile for signing # pki tokens and revocation lists. Optional. Default: /etc/keystone/ssl/private/cakey.pem # +# [*signing_cert_subject*] +# (optional) Certificate subject (auto generated certificate) for token signing. +# Defaults to '/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com' +# +# [*signing_key_size*] +# (optional) Key size (in bits) for token signing cert (auto generated certificate) +# Defaults to 2048 +# # [rabbit_host] Location of rabbitmq installation. Optional. Defaults to localhost. # [rabbit_port] Port for rabbitmq instance. Optional. Defaults to 5672. # [rabbit_hosts] Location of rabbitmq installation. Optional. Defaults to undef. @@ -261,7 +269,7 @@ $catalog_template_file = '/etc/keystone/default_catalog.templates', $token_format = false, $token_provider = 'keystone.token.providers.uuid.Provider', - $token_driver = 'keystone.token.backends.sql.Token', + $token_driver = 'keystone.token.persistence.backends.sql.Token', $token_expiration = 3600, $public_endpoint = false, $admin_endpoint = false, @@ -281,6 +289,8 @@ $signing_keyfile = '/etc/keystone/ssl/private/signing_key.pem', $signing_ca_certs = '/etc/keystone/ssl/certs/ca.pem', $signing_ca_key = '/etc/keystone/ssl/private/cakey.pem', + $signing_cert_subject = '/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com', + $signing_key_size = 2048, $rabbit_host = 'localhost', $rabbit_hosts = false, $rabbit_password = 'guest', @@ -514,10 +524,12 @@ # Set the signing key/cert configuration values. keystone_config { - 'signing/certfile': value => $signing_certfile; - 'signing/keyfile': value => $signing_keyfile; - 'signing/ca_certs': value => $signing_ca_certs; - 'signing/ca_key': value => $signing_ca_key; + 'signing/certfile': value => $signing_certfile; + 'signing/keyfile': value => $signing_keyfile; + 'signing/ca_certs': value => $signing_ca_certs; + 'signing/ca_key': value => $signing_ca_key; + 'signing/cert_subject': value => $signing_cert_subject; + 'signing/key_size': value => $signing_key_size; } # Create cache directory used for signing. diff --git a/keystone/manifests/logging.pp b/keystone/manifests/logging.pp index aa355c88d..dade7df73 100644 --- a/keystone/manifests/logging.pp +++ b/keystone/manifests/logging.pp @@ -35,10 +35,13 @@ # (optional) Hash of logger (keys) and level (values) pairs. # Defaults to undef. # Example: -# { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', -# 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', -# 'iso8601' => 'WARN', -# 'requests.packages.urllib3.connectionpool' => 'WARN' } +# { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', +# 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', +# 'oslo.messaging' => 'INFO', 'iso8601' => 'WARN', +# 'requests.packages.urllib3.connectionpool' => 'WARN', +# 'urllib3.connectionpool' => 'WARN', +# 'websocket' => 'WARN', 'keystonemiddleware' => 'WARN', +# 'routes.middleware' => 'WARN', stevedore => 'WARN' } # # [*publish_errors*] # (optional) Publish error events (boolean value). diff --git a/keystone/metadata.json b/keystone/metadata.json new file mode 100644 index 000000000..368a28263 --- /dev/null +++ b/keystone/metadata.json @@ -0,0 +1,39 @@ +{ + "name": "stackforge-keystone", + "version": "5.0.0", + "author": "Puppet Labs and StackForge Contributors", + "summary": "Puppet module for OpenStack Keystone", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-keystone.git", + "project_page": "https://launchpad.net/puppet-keystone", + "issues_url": "https://bugs.launchpad.net/puppet-keystone", + "requirements": [ + { "name": "pe","version_requirement": "3.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": ["7"] + }, + { + "operatingsystem": "Fedora", + "operatingsystemrelease": ["20"] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": ["6.5","7"] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": ["12.04","14.04"] + } + ], + "description": "Installs and configures OpenStack Keystone (Identity).", + "dependencies": [ + { "name": "puppetlabs/apache", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, + { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0" } + ] +} diff --git a/keystone/spec/classes/keystone_spec.rb b/keystone/spec/classes/keystone_spec.rb index c2e97b74e..675d3a716 100644 --- a/keystone/spec/classes/keystone_spec.rb +++ b/keystone/spec/classes/keystone_spec.rb @@ -32,7 +32,7 @@ 'catalog_type' => 'sql', 'catalog_driver' => false, 'token_provider' => 'keystone.token.providers.uuid.Provider', - 'token_driver' => 'keystone.token.backends.sql.Token', + 'token_driver' => 'keystone.token.persistence.backends.sql.Token', 'cache_dir' => '/var/cache/keystone', 'enable_ssl' => false, 'ssl_certfile' => '/etc/keystone/ssl/certs/keystone.pem', @@ -294,13 +294,15 @@ describe 'when configuring PKI signing cert paths with UUID and with pki_setup disabled' do let :params do { - 'admin_token' => 'service_token', - 'token_provider' => 'keystone.token.providers.uuid.Provider', - 'enable_pki_setup' => false, - 'signing_certfile' => 'signing_certfile', - 'signing_keyfile' => 'signing_keyfile', - 'signing_ca_certs' => 'signing_ca_certs', - 'signing_ca_key' => 'signing_ca_key' + 'admin_token' => 'service_token', + 'token_provider' => 'keystone.token.providers.uuid.Provider', + 'enable_pki_setup' => false, + 'signing_certfile' => 'signing_certfile', + 'signing_keyfile' => 'signing_keyfile', + 'signing_ca_certs' => 'signing_ca_certs', + 'signing_ca_key' => 'signing_ca_key', + 'signing_cert_subject' => 'signing_cert_subject', + 'signing_key_size' => 2048 } end @@ -321,18 +323,28 @@ it 'should contain correct PKI ca_key config' do should contain_keystone_config('signing/ca_key').with_value('signing_ca_key') end + + it 'should contain correct PKI cert_subject config' do + should contain_keystone_config('signing/cert_subject').with_value('signing_cert_subject') + end + + it 'should contain correct PKI key_size config' do + should contain_keystone_config('signing/key_size').with_value('2048') + end end describe 'when configuring PKI signing cert paths with pki_setup disabled' do let :params do { - 'admin_token' => 'service_token', - 'token_provider' => 'keystone.token.providers.pki.Provider', - 'enable_pki_setup' => false, - 'signing_certfile' => 'signing_certfile', - 'signing_keyfile' => 'signing_keyfile', - 'signing_ca_certs' => 'signing_ca_certs', - 'signing_ca_key' => 'signing_ca_key' + 'admin_token' => 'service_token', + 'token_provider' => 'keystone.token.providers.pki.Provider', + 'enable_pki_setup' => false, + 'signing_certfile' => 'signing_certfile', + 'signing_keyfile' => 'signing_keyfile', + 'signing_ca_certs' => 'signing_ca_certs', + 'signing_ca_key' => 'signing_ca_key', + 'signing_cert_subject' => 'signing_cert_subject', + 'signing_key_size' => 2048 } end @@ -353,6 +365,14 @@ it 'should contain correct PKI ca_key config' do should contain_keystone_config('signing/ca_key').with_value('signing_ca_key') end + + it 'should contain correct PKI cert_subject config' do + should contain_keystone_config('signing/cert_subject').with_value('signing_cert_subject') + end + + it 'should contain correct PKI key_size config' do + should contain_keystone_config('signing/key_size').with_value('2048') + end end describe 'with invalid catalog_type' do diff --git a/mysql/README.md b/mysql/README.md index 379d88a05..d7fd5355b 100644 --- a/mysql/README.md +++ b/mysql/README.md @@ -128,6 +128,7 @@ To add custom MySQL configuration, drop additional files into * `mysql::server::providers`: Creates users, grants, and databases. * `mysql::bindings::java`: Installs Java bindings. * `mysql::bindings::perl`: Installs Perl bindings. +* `mysql::bindings::php`: Installs PHP bindings. * `mysql::bindings::python`: Installs Python bindings. * `mysql::bindings::ruby`: Installs Ruby bindings. * `mysql::client::install`: Installs MySQL client. @@ -461,6 +462,23 @@ Then collect it on the remote DB server. Mysql::Db <<| tag == $domain |>> ``` +If you set the sql param to a file when creating a database, +the file gets imported into the new database. + +For large sql files you should raise the $import_timeout parameter, +set by default to 300 seconds + +```puppet + mysql::db { 'mydb': + user => 'myuser', + password => 'mypass', + host => 'localhost', + grant => ['SELECT', 'UPDATE'], + sql => '/path/to/sqlfile', + import_timeout => 900, + } +``` + ###Providers ####mysql_database @@ -510,6 +528,16 @@ mysql_grant { 'root@localhost/*.*': } ``` +It is possible to specify privileges down to the column level: +```puppet +mysql_grant { 'root@localhost/mysql.user': + ensure => 'present', + privileges => ['SELECT (Host, User)'], + table => 'mysql.user', + user => 'root@localhost', +} +``` + ##Limitations This module has been tested on: diff --git a/mysql/lib/puppet/provider/database/mysql.rb b/mysql/lib/puppet/provider/database/mysql.rb deleted file mode 100644 index ace742967..000000000 --- a/mysql/lib/puppet/provider/database/mysql.rb +++ /dev/null @@ -1,41 +0,0 @@ -require File.expand_path(File.join(File.dirname(__FILE__), '..', 'mysql')) -Puppet::Type.type(:database).provide(:mysql, :parent => Puppet::Provider::Mysql) do - desc 'Manages MySQL database.' - - defaultfor :kernel => 'Linux' - - optional_commands :mysql => 'mysql' - optional_commands :mysqladmin => 'mysqladmin' - - def self.instances - mysql([defaults_file, '-NBe', 'show databases'].compact).split("\n").collect do |name| - new(:name => name) - end - end - - def create - mysql([defaults_file, '-NBe', "create database `#{@resource[:name]}` character set #{resource[:charset]}"].compact) - end - - def destroy - mysqladmin([defaults_file, '-f', 'drop', @resource[:name]].compact) - end - - def charset - mysql([defaults_file, '-NBe', "show create database `#{resource[:name]}`"].compact).match(/.*?(\S+)\s(?:COLLATE.*)?\*\//)[1] - end - - def charset=(value) - mysql([defaults_file, '-NBe', "alter database `#{resource[:name]}` CHARACTER SET #{value}"].compact) - end - - def exists? - begin - mysql([defaults_file, '-NBe', 'show databases'].compact).match(/^#{@resource[:name]}$/) - rescue => e - debug(e.message) - return nil - end - end - -end diff --git a/mysql/lib/puppet/provider/database_grant/mysql.rb b/mysql/lib/puppet/provider/database_grant/mysql.rb deleted file mode 100644 index eabc649c3..000000000 --- a/mysql/lib/puppet/provider/database_grant/mysql.rb +++ /dev/null @@ -1,199 +0,0 @@ -# A grant is either global or per-db. This can be distinguished by the syntax -# of the name: -# user@host => global -# user@host/db => per-db - -require File.expand_path(File.join(File.dirname(__FILE__), '..', 'mysql')) -Puppet::Type.type(:database_grant).provide(:mysql, :parent => Puppet::Provider::Mysql) do - - desc 'Uses mysql as database.' - - defaultfor :kernel => 'Linux' - - optional_commands :mysql => 'mysql' - optional_commands :mysqladmin => 'mysqladmin' - - def self.prefetch(resources) - @user_privs = query_user_privs - @db_privs = query_db_privs - end - - def self.user_privs - @user_privs || query_user_privs - end - - def self.db_privs - @db_privs || query_db_privs - end - - def user_privs - self.class.user_privs - end - - def db_privs - self.class.db_privs - end - - def self.query_user_privs - results = mysql([defaults_file, 'mysql', '-Be', 'describe user'].compact) - column_names = results.split(/\n/).map { |l| l.chomp.split(/\t/)[0] } - @user_privs = column_names.delete_if { |e| !( e =~/_priv$/) } - end - - def self.query_db_privs - results = mysql([defaults_file, 'mysql', '-Be', 'describe db'].compact) - column_names = results.split(/\n/).map { |l| l.chomp.split(/\t/)[0] } - @db_privs = column_names.delete_if { |e| !(e =~/_priv$/) } - end - - def mysql_flush - mysqladmin([defaults_file, 'flush-privileges'].compact) - end - - # this parses the - def split_name(string) - matches = /^([^@]*)@([^\/]*)(\/(.*))?$/.match(string).captures.compact - case matches.length - when 2 - { - :type => :user, - :user => matches[0], - :host => matches[1] - } - when 4 - { - :type => :db, - :user => matches[0], - :host => matches[1], - :db => matches[3] - } - end - end - - def create_row - unless @resource.should(:privileges).empty? - name = split_name(@resource[:name]) - case name[:type] - when :user - mysql([defaults_file, 'mysql', '-e', "INSERT INTO user (host, user) VALUES ('%s', '%s')" % [ - name[:host], name[:user], - ]].compact) - when :db - mysql([defaults_file, 'mysql', '-e', "INSERT INTO db (host, user, db) VALUES ('%s', '%s', '%s')" % [ - name[:host], name[:user], name[:db], - ]].compact) - end - mysql_flush - end - end - - def destroy - mysql([defaults_file, 'mysql', '-e', "REVOKE ALL ON '%s'.* FROM '%s@%s'" % [ @resource[:privileges], @resource[:database], @resource[:name], @resource[:host] ]].compact) - end - - def row_exists? - name = split_name(@resource[:name]) - fields = [:user, :host] - if name[:type] == :db - fields << :db - end - not mysql([defaults_file, 'mysql', '-NBe', "SELECT '1' FROM %s WHERE %s" % [ name[:type], fields.map do |f| "%s='%s'" % [f, name[f]] end.join(' AND ')]].compact).empty? - end - - def all_privs_set? - all_privs = case split_name(@resource[:name])[:type] - when :user - user_privs - when :db - db_privs - end - all_privs = all_privs.collect do |p| p.downcase end.sort.join('|') - privs = privileges.collect do |p| p.downcase end.sort.join('|') - - all_privs == privs - end - - def privileges - name = split_name(@resource[:name]) - privs = '' - - case name[:type] - when :user - privs = mysql([defaults_file, 'mysql', '-Be', "select * from mysql.user where user='%s' and host='%s'" % [ name[:user], name[:host] ]].compact) - when :db - privs = mysql([defaults_file, 'mysql', '-Be', "select * from mysql.db where user='%s' and host='%s' and db='%s'" % [ name[:user], name[:host], name[:db] ]].compact) - end - - if privs.match(/^$/) - privs = [] # no result, no privs - else - # returns a line with field names and a line with values, each tab-separated - privs = privs.split(/\n/).map! do |l| l.chomp.split(/\t/) end - # transpose the lines, so we have key/value pairs - privs = privs[0].zip(privs[1]) - privs = privs.select do |p| p[0].match(/_priv$/) and p[1] == 'Y' end - end - - privs.collect do |p| p[0] end - end - - def privileges=(privs) - unless row_exists? - create_row - end - - # puts "Setting privs: ", privs.join(", ") - name = split_name(@resource[:name]) - stmt = '' - where = '' - all_privs = [] - case name[:type] - when :user - stmt = 'update user set ' - where = " where user='%s' and host='%s'" % [ name[:user], name[:host] ] - all_privs = user_privs - when :db - stmt = 'update db set ' - where = " where user='%s' and host='%s' and db='%s'" % [ name[:user], name[:host], name[:db] ] - all_privs = db_privs - end - - if privs[0].downcase == 'all' - privs = all_privs - end - - # Downcase the requested priviliges for case-insensitive selection - # we don't map! here because the all_privs object has to remain in - # the same case the DB gave it to us in - privs = privs.map { |p| p.downcase } - - # puts "stmt:", stmt - set = all_privs.collect do |p| "%s = '%s'" % [p, privs.include?(p.downcase) ? 'Y' : 'N'] end.join(', ') - # puts "set:", set - stmt = stmt << set << where - - validate_privs privs, all_privs - mysql([defaults_file, 'mysql', '-Be', stmt].compact) - mysql_flush - end - - def validate_privs(set_privs, all_privs) - all_privs = all_privs.collect { |p| p.downcase } - set_privs = set_privs.collect { |p| p.downcase } - invalid_privs = Array.new - hints = Array.new - # Test each of the user provided privs to see if they exist in all_privs - set_privs.each do |priv| - invalid_privs << priv unless all_privs.include?(priv) - hints << "#{priv}_priv" if all_privs.include?("#{priv}_priv") - end - unless invalid_privs.empty? - # Print a decently helpful and gramatically correct error message - hints = "Did you mean '#{hints.join(',')}'?" unless hints.empty? - p = invalid_privs.size > 1 ? ['s', 'are not valid'] : ['', 'is not valid'] - detail = ["The privilege#{p[0]} '#{invalid_privs.join(',')}' #{p[1]}."] - fail [detail, hints].join(' ') - end - end - -end diff --git a/mysql/lib/puppet/provider/database_user/mysql.rb b/mysql/lib/puppet/provider/database_user/mysql.rb deleted file mode 100644 index 71e76d5c3..000000000 --- a/mysql/lib/puppet/provider/database_user/mysql.rb +++ /dev/null @@ -1,65 +0,0 @@ -require File.expand_path(File.join(File.dirname(__FILE__), '..', 'mysql')) -Puppet::Type.type(:database_user).provide(:mysql, :parent => Puppet::Provider::Mysql) do - - desc 'manage users for a mysql database.' - - defaultfor :kernel => 'Linux' - - commands :mysql => 'mysql' - commands :mysqladmin => 'mysqladmin' - - def self.instances - users = mysql([defaults_file, 'mysql', '-BNe' "select concat(User, '@',Host) as User from mysql.user"].compact).split("\n") - users.select{ |user| user =~ /.+@/ }.collect do |name| - new(:name => name) - end - end - - def create - merged_name = self.class.cmd_user(@resource[:name]) - password_hash = @resource.value(:password_hash) - max_user_connections = @resource.value(:max_user_connections) || 0 - - mysql([defaults_file, 'mysql', '-e', "grant usage on *.* to #{merged_name} identified by PASSWORD - '#{password_hash}' with max_user_connections #{max_user_connections}"].compact) - - exists? ? (return true) : (return false) - end - - def destroy - merged_name = self.class.cmd_user(@resource[:name]) - mysql([defaults_file, 'mysql', '-e', "drop user #{merged_name}"].compact) - - exists? ? (return false) : (return true) - end - - def password_hash - mysql([defaults_file, 'mysql', '-NBe', "select password from mysql.user where CONCAT(user, '@', host) = '#{@resource[:name]}'"].compact).chomp - end - - def password_hash=(string) - mysql([defaults_file, 'mysql', '-e', "SET PASSWORD FOR #{self.class.cmd_user(@resource[:name])} = '#{string}'"].compact) - - password_hash == string ? (return true) : (return false) - end - - def max_user_connections - mysql([defaults_file, "mysql", "-NBe", "select max_user_connections from mysql.user where CONCAT(user, '@', host) = '#{@resource[:name]}'"].compact).chomp - end - - def max_user_connections=(int) - mysql([defaults_file, "mysql", "-e", "grant usage on *.* to %s with max_user_connections #{int}" % [ self.class.cmd_user(@resource[:name])] ].compact).chomp - - max_user_connections == int ? (return true) : (return false) - end - - def exists? - not mysql([defaults_file, 'mysql', '-NBe', "select '1' from mysql.user where CONCAT(user, '@', host) = '%s'" % @resource.value(:name)].compact).empty? - end - - def flush - @property_hash.clear - mysqladmin([defaults_file, 'flush-privileges'].compact) - end - -end diff --git a/mysql/lib/puppet/provider/mysql_database/mysql.rb b/mysql/lib/puppet/provider/mysql_database/mysql.rb index 213e0de7a..4587c1882 100644 --- a/mysql/lib/puppet/provider/mysql_database/mysql.rb +++ b/mysql/lib/puppet/provider/mysql_database/mysql.rb @@ -31,7 +31,7 @@ def self.prefetch(resources) end def create - mysql([defaults_file, '-NBe', "create database if not exists `#{@resource[:name]}` character set #{@resource[:charset]} collate #{@resource[:collate]}"].compact) + mysql([defaults_file, '-NBe', "create database if not exists `#{@resource[:name]}` character set `#{@resource[:charset]}` collate `#{@resource[:collate]}`"].compact) @property_hash[:ensure] = :present @property_hash[:charset] = @resource[:charset] diff --git a/mysql/lib/puppet/provider/mysql_grant/mysql.rb b/mysql/lib/puppet/provider/mysql_grant/mysql.rb index 3fe691d56..32211e36a 100644 --- a/mysql/lib/puppet/provider/mysql_grant/mysql.rb +++ b/mysql/lib/puppet/provider/mysql_grant/mysql.rb @@ -27,12 +27,21 @@ def self.instances # Match the munges we do in the type. munged_grant = grant.delete("'").delete("`") # Matching: GRANT (SELECT, UPDATE) PRIVILEGES ON (*.*) TO ('root')@('127.0.0.1') (WITH GRANT OPTION) - if match = munged_grant.match(/^GRANT\s(.+)\sON\s(.+)\sTO\s(.*)@(.*?)(\s.*)$/) + if match = munged_grant.match(/^GRANT\s(.+)\sON\s(.+)\sTO\s(.*)@(.*?)(\s.*)?$/) privileges, table, user, host, rest = match.captures - # Once we split privileges up on the , we need to make sure we - # shortern ALL PRIVILEGES to just all. - stripped_privileges = privileges.split(',').map do |priv| - priv == 'ALL PRIVILEGES' ? 'ALL' : priv.lstrip.rstrip + # split on ',' if it is not a non-'('-containing string followed by a + # closing parenthesis ')'-char - e.g. only split comma separated elements not in + # parentheses + stripped_privileges = privileges.strip.split(/\s*,\s*(?![^(]*\))/).map do |priv| + # split and sort the column_privileges in the parentheses and rejoin + if priv.include?('(') + type, col=priv.strip.split(/\s+|\b/,2) + type.upcase + " (" + col.slice(1...-1).strip.split(/\s*,\s*/).sort.join(', ') + ")" + else + # Once we split privileges up on the , we need to make sure we + # shortern ALL PRIVILEGES to just all. + priv == 'ALL PRIVILEGES' ? 'ALL' : priv.strip + end end # Same here, but to remove OPTION leaving just GRANT. options = ['GRANT'] if rest.match(/WITH\sGRANT\sOPTION/) @@ -89,13 +98,15 @@ def revoke(user, table) user_string = self.class.cmd_user(user) table_string = self.class.cmd_table(table) - query = "REVOKE ALL ON #{table_string} FROM #{user_string}" - mysql([defaults_file, '-e', query].compact) # revoke grant option needs to be a extra query, because # "REVOKE ALL PRIVILEGES, GRANT OPTION [..]" is only valid mysql syntax # if no ON clause is used. + # It hast to be executed before "REVOKE ALL [..]" since a GRANT has to + # exist to be executed successfully query = "REVOKE GRANT OPTION ON #{table_string} FROM #{user_string}" mysql([defaults_file, '-e', query].compact) + query = "REVOKE ALL ON #{table_string} FROM #{user_string}" + mysql([defaults_file, '-e', query].compact) end def destroy diff --git a/mysql/lib/puppet/type/database.rb b/mysql/lib/puppet/type/database.rb deleted file mode 100644 index b02fb1099..000000000 --- a/mysql/lib/puppet/type/database.rb +++ /dev/null @@ -1,21 +0,0 @@ -# This has to be a separate type to enable collecting -Puppet::Type.newtype(:database) do - @doc = 'Manage databases.' - - ensurable - - newparam(:name, :namevar=>true) do - desc 'The name of the database.' - validate do |value| - Puppet.warning("database has been deprecated in favor of mysql_database.") - true - end - end - - newproperty(:charset) do - desc 'The characterset to use for a database' - defaultto :utf8 - newvalue(/^\S+$/) - end - -end diff --git a/mysql/lib/puppet/type/database_grant.rb b/mysql/lib/puppet/type/database_grant.rb deleted file mode 100644 index 7fdad8231..000000000 --- a/mysql/lib/puppet/type/database_grant.rb +++ /dev/null @@ -1,79 +0,0 @@ -# This has to be a separate type to enable collecting -Puppet::Type.newtype(:database_grant) do - @doc = "Manage a database user's rights." - #ensurable - - autorequire :database do - # puts "Starting db autoreq for %s" % self[:name] - reqs = [] - matches = self[:name].match(/^([^@]+)@([^\/]+)\/(.+)$/) - unless matches.nil? - reqs << matches[3] - end - # puts "Autoreq: '%s'" % reqs.join(" ") - reqs - end - - autorequire :database_user do - # puts "Starting user autoreq for %s" % self[:name] - reqs = [] - matches = self[:name].match(/^([^@]+)@([^\/]+).*$/) - unless matches.nil? - reqs << '%s@%s' % [ matches[1], matches[2] ] - end - # puts "Autoreq: '%s'" % reqs.join(" ") - reqs - end - - newparam(:name, :namevar=>true) do - desc 'The primary key: either user@host for global privilges or user@host/database for database specific privileges' - validate do |value| - Puppet.warning("database_grant has been deprecated in favor of mysql_grant.") - true - end - end - - newproperty(:privileges, :array_matching => :all) do - desc 'The privileges the user should have. The possible values are implementation dependent.' - - def should_to_s(newvalue = @should) - if newvalue - unless newvalue.is_a?(Array) - newvalue = [ newvalue ] - end - newvalue.collect do |v| v.downcase end.sort.join ', ' - else - nil - end - end - - def is_to_s(currentvalue = @is) - if currentvalue - unless currentvalue.is_a?(Array) - currentvalue = [ currentvalue ] - end - currentvalue.collect do |v| v.downcase end.sort.join ', ' - else - nil - end - end - - # use the sorted outputs for comparison - def insync?(is) - if defined? @should and @should - case self.should_to_s - when 'all' - self.provider.all_privs_set? - when self.is_to_s(is) - true - else - false - end - else - true - end - end - end - -end - diff --git a/mysql/lib/puppet/type/database_user.rb b/mysql/lib/puppet/type/database_user.rb deleted file mode 100644 index 24abc70d5..000000000 --- a/mysql/lib/puppet/type/database_user.rb +++ /dev/null @@ -1,36 +0,0 @@ -# This has to be a separate type to enable collecting -Puppet::Type.newtype(:database_user) do - @doc = 'Manage a database user. This includes management of users password as well as privileges' - - ensurable - - newparam(:name, :namevar=>true) do - desc "The name of the user. This uses the 'username@hostname' or username@hostname." - validate do |value| - Puppet.warning("database has been deprecated in favor of mysql_user.") - # https://dev.mysql.com/doc/refman/5.1/en/account-names.html - # Regex should problably be more like this: /^[`'"]?[^`'"]*[`'"]?@[`'"]?[\w%\.]+[`'"]?$/ - raise(ArgumentError, "Invalid database user #{value}") unless value =~ /[\w-]*@[\w%\.:]+/ - username = value.split('@')[0] - if username.size > 16 - raise ArgumentError, 'MySQL usernames are limited to a maximum of 16 characters' - end - end - - munge do |value| - user_part, host_part = value.split('@') - "#{user_part}@#{host_part.downcase}" - end - end - - newproperty(:password_hash) do - desc 'The password hash of the user. Use mysql_password() for creating such a hash.' - newvalue(/\w+/) - end - - newproperty(:max_user_connections) do - desc "Max concurrent connections for the user. 0 means no (or global) limit." - newvalue(/\d+/) - end - -end diff --git a/mysql/lib/puppet/type/mysql_database.rb b/mysql/lib/puppet/type/mysql_database.rb index 3e8518c96..5df43218f 100644 --- a/mysql/lib/puppet/type/mysql_database.rb +++ b/mysql/lib/puppet/type/mysql_database.rb @@ -3,6 +3,8 @@ ensurable + autorequire(:file) { '/root/.my.cnf' } + newparam(:name, :namevar => true) do desc 'The name of the MySQL database to manage.' end diff --git a/mysql/lib/puppet/type/mysql_grant.rb b/mysql/lib/puppet/type/mysql_grant.rb index a268e4cb5..8262e1e53 100644 --- a/mysql/lib/puppet/type/mysql_grant.rb +++ b/mysql/lib/puppet/type/mysql_grant.rb @@ -17,7 +17,15 @@ def initialize(*args) # Sort the privileges array in order to ensure the comparision in the provider # self.instances method match. Otherwise this causes it to keep resetting the # privileges. - self[:privileges] = Array(self[:privileges]).map(&:upcase).uniq.reject{|k| k == 'GRANT' or k == 'GRANT OPTION'}.sort! + self[:privileges] = Array(self[:privileges]).map{ |priv| + # split and sort the column_privileges in the parentheses and rejoin + if priv.include?('(') + type, col=priv.strip.split(/\s+|\b/,2) + type.upcase + " (" + col.slice(1...-1).strip.split(/\s*,\s*/).sort.join(', ') + ")" + else + priv.strip.upcase + end + }.uniq.reject{|k| k == 'GRANT' or k == 'GRANT OPTION'}.sort! end validate do @@ -37,10 +45,6 @@ def initialize(*args) newproperty(:privileges, :array_matching => :all) do desc 'Privileges for user' - - munge do |value| - value.upcase - end end newproperty(:table) do diff --git a/mysql/lib/puppet/type/mysql_user.rb b/mysql/lib/puppet/type/mysql_user.rb index 2d059ce3f..759eb52c6 100644 --- a/mysql/lib/puppet/type/mysql_user.rb +++ b/mysql/lib/puppet/type/mysql_user.rb @@ -4,6 +4,8 @@ ensurable + autorequire(:file) { '/root/.my.cnf' } + newparam(:name, :namevar => true) do desc "The name of the user. This uses the 'username@hostname' or username@hostname." validate do |value| diff --git a/mysql/manifests/backup.pp b/mysql/manifests/backup.pp deleted file mode 100644 index 680a5744d..000000000 --- a/mysql/manifests/backup.pp +++ /dev/null @@ -1,31 +0,0 @@ -# Deprecated class -class mysql::backup ( - $backupuser, - $backuppassword, - $backupdir, - $backupcompress = true, - $backuprotate = 30, - $delete_before_dump = false, - $backupdatabases = [], - $file_per_database = false, - $ensure = 'present', - $time = ['23', '5'], -) { - - crit("This class has been deprecated and callers should directly call - mysql::server::backup now.") - - class { 'mysql::server::backup': - ensure => $ensure, - backupuser => $backupuser, - backuppassword => $backuppassword, - backupdir => $backupdir, - backupcompress => $backupcompress, - backuprotate => $backuprotate, - delete_before_dump => $delete_before_dump, - backupdatabases => $backupdatabases, - file_per_database => $file_per_database, - time => $time, - } - -} diff --git a/mysql/manifests/db.pp b/mysql/manifests/db.pp index 69e2081fb..29181eee9 100644 --- a/mysql/manifests/db.pp +++ b/mysql/manifests/db.pp @@ -7,9 +7,10 @@ $collate = 'utf8_general_ci', $host = 'localhost', $grant = 'ALL', - $sql = '', + $sql = undef, $enforce_sql = false, - $ensure = 'present' + $ensure = 'present', + $import_timeout = 300, ) { #input validation validate_re($ensure, '^(present|absent)$', @@ -54,6 +55,7 @@ refreshonly => $refresh, require => Mysql_grant["${user}@${host}/${table}"], subscribe => Mysql_database[$dbname], + timeout => $import_timeout, } } } diff --git a/mysql/manifests/init.pp b/mysql/manifests/init.pp deleted file mode 100644 index eba5c2063..000000000 --- a/mysql/manifests/init.pp +++ /dev/null @@ -1,100 +0,0 @@ -# -class mysql( - $basedir = '', - $bind_address = '', - $client_package_ensure = '', - $client_package_name = '', - $config_file = '', - $config_template = '', - $datadir = '', - $default_engine = '', - $etc_root_password = '', - $log_error = '', - $manage_config_file = '', - $manage_service = '', - $max_allowed_packet = '', - $max_connections = '', - $old_root_password = '', - $package_ensure = '', - $php_package_name = '', - $pidfile = '', - $port = '', - $purge_conf_dir = '', - $restart = '', - $root_group = '', - $root_password = '', - $server_package_name = '', - $service_name = '', - $service_provider = '', - $socket = '', - $ssl = '', - $ssl_ca = '', - $ssl_cert = '', - $ssl_key = '', - $tmpdir = '', - $attempt_compatibility_mode = false, -) { - - if $attempt_compatibility_mode { - notify { "An attempt has been made below to automatically apply your custom - settings to mysql::server. Please verify this works in a safe test - environment.": } - - $override_options = { - 'client' => { - 'port' => $port, - 'socket' => $socket - }, - 'mysqld_safe' => { - 'log_error' => $log_error, - 'socket' => $socket, - }, - 'mysqld' => { - 'basedir' => $basedir, - 'bind_address' => $bind_address, - 'datadir' => $datadir, - 'log_error' => $log_error, - 'max_allowed_packet' => $max_allowed_packet, - 'max_connections' => $max_connections, - 'pid_file' => $pidfile, - 'port' => $port, - 'socket' => $socket, - 'ssl-ca' => $ssl_ca, - 'ssl-cert' => $ssl_cert, - 'ssl-key' => $ssl_key, - 'tmpdir' => $tmpdir, - }, - 'mysqldump' => { - 'max_allowed_packet' => $max_allowed_packet, - }, - 'config_file' => $config_file, - 'etc_root_password' => $etc_root_password, - 'manage_config_file' => $manage_config_file, - 'old_root_password' => $old_root_password, - 'purge_conf_dir' => $purge_conf_dir, - 'restart' => $restart, - 'root_group' => $root_group, - 'root_password' => $root_password, - 'service_name' => $service_name, - 'ssl' => $ssl - } - $filtered_options = mysql_strip_hash($override_options) - validate_hash($filtered_options) - notify { $filtered_options: } - class { 'mysql::server': - override_options => $filtered_options, - } - - } else { - fail("ERROR: This class has been deprecated and the functionality moved - into mysql::server. If you run mysql::server without correctly calling - mysql:: server with the new override_options hash syntax you will revert - your MySQL to the stock settings. Do not proceed without removing this - class and using mysql::server correctly. - - If you are brave you may set attempt_compatibility_mode in this class which - attempts to automap the previous settings to appropriate calls to - mysql::server") - } - -} diff --git a/mysql/manifests/params.pp b/mysql/manifests/params.pp index 68742e643..e46ab98ef 100644 --- a/mysql/manifests/params.pp +++ b/mysql/manifests/params.pp @@ -38,7 +38,7 @@ $provider = 'mysql' } } - /^(RedHat|CentOS|Scientific)$/: { + /^(RedHat|CentOS|Scientific|OracleLinux)$/: { if $::operatingsystemmajrelease >= 7 { $provider = 'mariadb' } else { @@ -184,6 +184,29 @@ $ruby_package_name = 'mysql-ruby' } + 'Gentoo': { + $client_package_name = 'virtual/mysql' + $server_package_name = 'virtual/mysql' + $basedir = '/usr' + $config_file = '/etc/mysql/my.cnf' + $datadir = '/var/lib/mysql' + $log_error = '/var/log/mysql/mysqld.err' + $pidfile = '/run/mysqld/mysqld.pid' + $root_group = 'root' + $server_service_name = 'mysql' + $socket = '/run/mysqld/mysqld.sock' + $ssl_ca = '/etc/mysql/cacert.pem' + $ssl_cert = '/etc/mysql/server-cert.pem' + $ssl_key = '/etc/mysql/server-key.pem' + $tmpdir = '/tmp' + # mysql::bindings + $java_package_name = 'dev-java/jdbc-mysql' + $perl_package_name = 'dev-perl/DBD-mysql' + $php_package_name = undef + $python_package_name = 'dev-python/mysql-python' + $ruby_package_name = 'dev-ruby/mysql-ruby' + } + 'FreeBSD': { $client_package_name = 'databases/mysql55-client' $server_package_name = 'databases/mysql55-server' diff --git a/mysql/manifests/server/service.pp b/mysql/manifests/server/service.pp index 7dab2b56b..95618fb8d 100644 --- a/mysql/manifests/server/service.pp +++ b/mysql/manifests/server/service.pp @@ -1,23 +1,25 @@ # class mysql::server::service { - if $mysql::server::real_service_enabled { - $service_ensure = 'running' - } else { - $service_ensure = 'stopped' - } - if $mysql::server::real_service_manage { - file { $mysql::params::log_error: - owner => 'mysql', - group => 'mysql', - } - service { 'mysqld': - ensure => $service_ensure, - name => $mysql::server::service_name, - enable => $mysql::server::real_service_enabled, - provider => $mysql::server::service_provider, + if $mysql::server::real_service_enabled { + $service_ensure = 'running' + } else { + $service_ensure = 'stopped' } } + file { $mysql::params::log_error: + ensure => present, + owner => 'mysql', + group => 'mysql', + } + + service { 'mysqld': + ensure => $service_ensure, + name => $mysql::server::service_name, + enable => $mysql::server::real_service_enabled, + provider => $mysql::server::service_provider, + } + } diff --git a/mysql/metadata.json b/mysql/metadata.json index 0e9cfe536..f00ff1818 100644 --- a/mysql/metadata.json +++ b/mysql/metadata.json @@ -1,6 +1,6 @@ { "name": "puppetlabs-mysql", - "version": "2.3.1", + "version": "3.0.0", "author": "Puppet Labs", "summary": "Mysql module", "license": "Apache 2.0", @@ -43,6 +43,7 @@ { "operatingsystem": "SLES", "operatingsystemrelease": [ + "10 SP4", "11 SP1" ] }, @@ -65,7 +66,7 @@ "requirements": [ { "name": "pe", - "version_requirement": ">= 3.2.0 < 3.4.0" + "version_requirement": "3.x" }, { "name": "puppet", diff --git a/mysql/spec/acceptance/mysql_backup_spec.rb b/mysql/spec/acceptance/mysql_backup_spec.rb index 5b67c8b25..7f6e322ab 100644 --- a/mysql/spec/acceptance/mysql_backup_spec.rb +++ b/mysql/spec/acceptance/mysql_backup_spec.rb @@ -61,4 +61,72 @@ class { 'mysql::server::backup': end end end + + context 'with one file per database' do + context 'should work with no errors' do + it 'when configuring mysql backups' do + pp = <<-EOS + class { 'mysql::server': root_password => 'password' } + mysql::db { [ + 'backup1', + 'backup2' + ]: + user => 'backup', + password => 'secret', + } + + class { 'mysql::server::backup': + backupuser => 'myuser', + backuppassword => 'mypassword', + backupdir => '/tmp/backups', + backupcompress => true, + file_per_database => true, + postscript => [ + 'rm -rf /var/tmp/mysqlbackups', + 'rm -f /var/tmp/mysqlbackups.done', + 'cp -r /tmp/backups /var/tmp/mysqlbackups', + 'touch /var/tmp/mysqlbackups.done', + ], + execpath => '/usr/bin:/usr/sbin:/bin:/sbin:/opt/zimbra/bin', + } + EOS + + apply_manifest(pp, :catch_failures => true) + apply_manifest(pp, :catch_failures => true) + end + end + + describe 'mysqlbackup.sh' do + it 'should run mysqlbackup.sh with no errors without root credentials' do + shell("HOME=/tmp/dontreadrootcredentials /usr/local/sbin/mysqlbackup.sh") do |r| + expect(r.stderr).to eq("") + end + end + + it 'should create one file per database' do + ['backup1', 'backup2'].each do |database| + shell("ls -l /tmp/backups/mysql_backup_#{database}_*-*.sql.bz2 | wc -l") do |r| + expect(r.stdout).to match(/1/) + expect(r.exit_code).to be_zero + end + end + end + + context 'should create one file per database per run' do + it 'executes mysqlbackup.sh a second time' do + shell('sleep 1') + shell('HOME=/tmp/dontreadrootcredentials /usr/local/sbin/mysqlbackup.sh') + end + + it 'has one file per database per run' do + ['backup1', 'backup2'].each do |database| + shell("ls -l /tmp/backups/mysql_backup_#{database}_*-*.sql.bz2 | wc -l") do |r| + expect(r.stdout).to match(/2/) + expect(r.exit_code).to be_zero + end + end + end + end + end + end end diff --git a/mysql/spec/classes/graceful_failures_spec.rb b/mysql/spec/classes/graceful_failures_spec.rb index d8f22b76e..7f0781bcc 100644 --- a/mysql/spec/classes/graceful_failures_spec.rb +++ b/mysql/spec/classes/graceful_failures_spec.rb @@ -7,7 +7,7 @@ let(:facts) { facts } context 'should gracefully fail' do - it { expect { should compile}.to raise_error(Puppet::Error, /Unsupported platform:/) } + it { expect { is_expected.to compile}.to raise_error(Puppet::Error, /Unsupported platform:/) } end end end diff --git a/mysql/spec/classes/mycnf_template_spec.rb b/mysql/spec/classes/mycnf_template_spec.rb new file mode 100644 index 000000000..c0607fb0a --- /dev/null +++ b/mysql/spec/classes/mycnf_template_spec.rb @@ -0,0 +1,78 @@ +require 'spec_helper' + +describe 'mysql::server' do + context 'my.cnf template' do + on_pe_supported_platforms(PLATFORMS).each do |pe_version,pe_platforms| + pe_platforms.each do |pe_platform,facts| + describe "on #{pe_version} #{pe_platform}" do + let(:facts) { facts } + + context 'normal entry' do + let(:params) {{ :override_options => { 'mysqld' => { 'socket' => '/var/lib/mysql/mysql.sock' } } }} + it do + is_expected.to contain_file('mysql-config-file').with({ + :mode => '0644', + }).with_content(/socket = \/var\/lib\/mysql\/mysql.sock/) + end + end + + describe 'array entry' do + let(:params) {{ :override_options => { 'mysqld' => { 'replicate-do-db' => ['base1', 'base2'], } }}} + it do + is_expected.to contain_file('mysql-config-file').with_content( + /.*replicate-do-db = base1\nreplicate-do-db = base2.*/ + ) + end + end + + describe 'ssl set to true' do + let(:params) {{ :override_options => { 'mysqld' => { 'ssl' => true }}}} + it { is_expected.to contain_file('mysql-config-file').with_content(/ssl/) } + it { is_expected.to contain_file('mysql-config-file').without_content(/ssl = true/) } + end + + describe 'ssl set to false' do + let(:params) {{ :override_options => { 'mysqld' => { 'ssl' => false }}}} + it { is_expected.to contain_file('mysql-config-file').with_content(/ssl = false/) } + end + + # ssl-disable (and ssl) are special cased within mysql. + describe 'possibility of disabling ssl completely' do + let(:params) {{ :override_options => { 'mysqld' => { 'ssl' => true, 'ssl-disable' => true }}}} + it { is_expected.to contain_file('mysql-config-file').without_content(/ssl = true/) } + end + + describe 'a non ssl option set to true' do + let(:params) {{ :override_options => { 'mysqld' => { 'test' => true }}}} + it { is_expected.to contain_file('mysql-config-file').with_content(/^test$/) } + it { is_expected.to contain_file('mysql-config-file').without_content(/test = true/) } + end + + context 'with includedir' do + let(:params) {{ :includedir => '/etc/my.cnf.d' }} + it 'makes the directory' do + is_expected.to contain_file('/etc/my.cnf.d').with({ + :ensure => :directory, + :mode => '0755', + }) + end + + it { is_expected.to contain_file('mysql-config-file').with_content(/!includedir/) } + end + + context 'without includedir' do + let(:params) {{ :includedir => '' }} + it 'shouldnt contain the directory' do + is_expected.not_to contain_file('mysql-config-file').with({ + :ensure => :directory, + :mode => '0755', + }) + end + + it { is_expected.to contain_file('mysql-config-file').without_content(/!includedir/) } + end + end + end + end + end +end diff --git a/mysql/spec/classes/mysql_bindings_spec.rb b/mysql/spec/classes/mysql_bindings_spec.rb index 0ee2e89b2..805817956 100644 --- a/mysql/spec/classes/mysql_bindings_spec.rb +++ b/mysql/spec/classes/mysql_bindings_spec.rb @@ -1,74 +1,30 @@ require 'spec_helper' describe 'mysql::bindings' do - let(:params) {{ - 'java_enable' => true, - 'perl_enable' => true, - 'php_enable' => true, - 'python_enable' => true, - 'ruby_enable' => true, - 'client_dev' => true, - 'daemon_dev' => true, - }} - - shared_examples 'bindings' do |osfamily, operatingsystem, operatingsystemrelease, java_name, perl_name, php_name, python_name, ruby_name, client_dev_name, daemon_dev_name| - let :facts do - { :osfamily => osfamily, :operatingsystem => operatingsystem, - :operatingsystemrelease => operatingsystemrelease, :root_home => '/root', - } - end - it { should contain_package('mysql-connector-java').with( - :name => java_name, - :ensure => 'present' - )} - it { should contain_package('perl_mysql').with( - :name => perl_name, - :ensure => 'present' - )} - it { should contain_package('python-mysqldb').with( - :name => python_name, - :ensure => 'present' - )} - it { should contain_package('ruby_mysql').with( - :name => ruby_name, - :ensure => 'present' - )} - if client_dev_name - it { should contain_package('mysql-client_dev').with( - :name => client_dev_name, - :ensure => 'present' - )} + on_pe_supported_platforms(PLATFORMS).each do |pe_version,pe_platforms| + pe_platforms.each do |pe_platform,facts| + describe "on #{pe_version} #{pe_platform}" do + let(:facts) { facts } + + let(:params) {{ + 'java_enable' => true, + 'perl_enable' => true, + 'php_enable' => true, + 'python_enable' => true, + 'ruby_enable' => true, + 'client_dev' => true, + 'daemon_dev' => true, + 'client_dev_package_name' => 'libmysqlclient-devel', + 'daemon_dev_package_name' => 'mysql-devel', + }} + + it { is_expected.to contain_package('mysql-connector-java') } + it { is_expected.to contain_package('perl_mysql') } + it { is_expected.to contain_package('python-mysqldb') } + it { is_expected.to contain_package('ruby_mysql') } + it { is_expected.to contain_package('mysql-client_dev') } + it { is_expected.to contain_package('mysql-daemon_dev') } + end end - if daemon_dev_name - it { should contain_package('mysql-daemon_dev').with( - :name => daemon_dev_name, - :ensure => 'present' - )} - end - end - - context 'Debian' do - it_behaves_like 'bindings', 'Debian', 'Debian', '7.4','libmysql-java', 'libdbd-mysql-perl', 'php5-mysql', 'python-mysqldb', 'libmysql-ruby', 'libmysqlclient-dev', 'libmysqld-dev' - it_behaves_like 'bindings', 'Debian', 'Ubuntu', '14.04', 'libmysql-java', 'libdbd-mysql-perl', 'php5-mysql', 'python-mysqldb', 'libmysql-ruby', 'libmysqlclient-dev', 'libmysqld-dev' - end - - context 'freebsd' do - it_behaves_like 'bindings', 'FreeBSD', 'FreeBSD', '10.0', 'databases/mysql-connector-java', 'p5-DBD-mysql', 'databases/php5-mysql', 'databases/py-MySQLdb', 'databases/ruby-mysql' end - - context 'redhat' do - it_behaves_like 'bindings', 'RedHat', 'RedHat', '6.5', 'mysql-connector-java', 'perl-DBD-MySQL', 'php-mysql', 'MySQL-python', 'ruby-mysql', nil, 'mysql-devel' - it_behaves_like 'bindings', 'RedHat', 'OpenSuSE', '11.3', 'mysql-connector-java', 'perl-DBD-MySQL', 'php-mysql', 'MySQL-python', 'ruby-mysql', nil, 'mysql-devel' - end - - describe 'on any other os' do - let :facts do - {:osfamily => 'foo', :root_home => '/root'} - end - - it 'should fail' do - expect { subject }.to raise_error(/Unsupported platform:/) - end - end - end diff --git a/mysql/spec/classes/mysql_client_spec.rb b/mysql/spec/classes/mysql_client_spec.rb index d5e63e27e..7f67f49cd 100644 --- a/mysql/spec/classes/mysql_client_spec.rb +++ b/mysql/spec/classes/mysql_client_spec.rb @@ -7,15 +7,15 @@ let(:facts) { facts } context 'with defaults' do - it { should_not contain_class('mysql::bindings') } - it { should contain_package('mysql_client') } + it { is_expected.not_to contain_class('mysql::bindings') } + it { is_expected.to contain_package('mysql_client') } end context 'with bindings enabled' do let(:params) {{ :bindings_enable => true }} - it { should contain_class('mysql::bindings') } - it { should contain_package('mysql_client') } + it { is_expected.to contain_class('mysql::bindings') } + it { is_expected.to contain_package('mysql_client') } end end end diff --git a/mysql/spec/classes/mysql_server_account_security_spec.rb b/mysql/spec/classes/mysql_server_account_security_spec.rb index 855856fce..9bd65082f 100644 --- a/mysql/spec/classes/mysql_server_account_security_spec.rb +++ b/mysql/spec/classes/mysql_server_account_security_spec.rb @@ -6,33 +6,29 @@ describe "on #{pe_version} #{pe_platform}" do let(:facts) { facts.merge({:fqdn => 'myhost.mydomain', :hostname => 'myhost'}) } - it 'should remove Mysql_User[root@myhost.mydomain]' do - should contain_mysql_user('root@myhost.mydomain').with_ensure('absent') + [ 'root@myhost.mydomain', + 'root@127.0.0.1', + 'root@::1', + '@myhost.mydomain', + '@localhost', + '@%', + ].each do |user| + it 'removes Mysql_User[#{user}]' do + is_expected.to contain_mysql_user(user).with_ensure('absent') + end end - it 'should remove Mysql_User[root@myhost]' do - should contain_mysql_user('root@myhost').with_ensure('absent') - end - it 'should remove Mysql_User[root@127.0.0.1]' do - should contain_mysql_user('root@127.0.0.1').with_ensure('absent') - end - it 'should remove Mysql_User[root@::1]' do - should contain_mysql_user('root@::1').with_ensure('absent') - end - it 'should remove Mysql_User[@myhost.mydomain]' do - should contain_mysql_user('@myhost.mydomain').with_ensure('absent') - end - it 'should remove Mysql_User[@myhost]' do - should contain_mysql_user('@myhost').with_ensure('absent') - end - it 'should remove Mysql_User[@localhost]' do - should contain_mysql_user('@localhost').with_ensure('absent') - end - it 'should remove Mysql_User[@%]' do - should contain_mysql_user('@%').with_ensure('absent') + + # When the hostname doesn't match the fqdn we also remove these. + # We don't need to test the inverse as when they match they are + # covered by the above list. + [ 'root@myhost', '@myhost' ].each do |user| + it 'removes Mysql_User[#{user}]' do + is_expected.to contain_mysql_user(user).with_ensure('absent') + end end it 'should remove Mysql_database[test]' do - should contain_mysql_database('test').with_ensure('absent') + is_expected.to contain_mysql_database('test').with_ensure('absent') end end end diff --git a/mysql/spec/classes/mysql_server_backup_spec.rb b/mysql/spec/classes/mysql_server_backup_spec.rb index d244756d4..c7e2583c2 100644 --- a/mysql/spec/classes/mysql_server_backup_spec.rb +++ b/mysql/spec/classes/mysql_server_backup_spec.rb @@ -19,47 +19,47 @@ context 'standard conditions' do let(:params) { default_params } - it { should contain_mysql_user('testuser@localhost').with( - :require => 'Class[Mysql::Server::Root_password]' - )} + # Cannot use that_requires here, doesn't work on classes. + it { is_expected.to contain_mysql_user('testuser@localhost').with( + :require => 'Class[Mysql::Server::Root_password]') } - it { should contain_mysql_grant('testuser@localhost/*.*').with( - :privileges => ["SELECT", "RELOAD", "LOCK TABLES", "SHOW VIEW", "PROCESS"] - )} + it { is_expected.to contain_mysql_grant('testuser@localhost/*.*').with( + :privileges => ['SELECT', 'RELOAD', 'LOCK TABLES', 'SHOW VIEW', 'PROCESS'] + ).that_requires('Mysql_user[testuser@localhost]') } - it { should contain_cron('mysql-backup').with( + it { is_expected.to contain_cron('mysql-backup').with( :command => '/usr/local/sbin/mysqlbackup.sh', :ensure => 'present' )} - it { should contain_file('mysqlbackup.sh').with( + it { is_expected.to contain_file('mysqlbackup.sh').with( :path => '/usr/local/sbin/mysqlbackup.sh', :ensure => 'present' ) } - it { should contain_file('mysqlbackupdir').with( + it { is_expected.to contain_file('mysqlbackupdir').with( :path => '/tmp', :ensure => 'directory' )} it 'should have compression by default' do - verify_contents(subject, 'mysqlbackup.sh', [ - ' --all-databases | bzcat -zc > ${DIR}/${PREFIX}`date +%Y%m%d-%H%M%S`.sql.bz2', - ]) + is_expected.to contain_file('mysqlbackup.sh').with( + :content => /bzcat -zc/ + ) end it 'should skip backing up events table by default' do - verify_contents(subject, 'mysqlbackup.sh', [ - 'EVENTS="--ignore-table=mysql.event"', - ]) + is_expected.to contain_file('mysqlbackup.sh').with( + :content => /EVENTS="--ignore-table=mysql.event"/ + ) end it 'should have 25 days of rotation' do - # MySQL counts from 0 I guess. - should contain_file('mysqlbackup.sh').with_content(/.*ROTATE=24.*/) + # MySQL counts from 0 + is_expected.to contain_file('mysqlbackup.sh').with_content(/.*ROTATE=24.*/) end it 'should have a standard PATH' do - should contain_file('mysqlbackup.sh').with_content(%r{PATH=/usr/bin:/usr/sbin:/bin:/sbin:/opt/zimbra/bin}) + is_expected.to contain_file('mysqlbackup.sh').with_content(%r{PATH=/usr/bin:/usr/sbin:/bin:/sbin:/opt/zimbra/bin}) end end @@ -71,7 +71,7 @@ }.merge(default_params) end - it { should contain_file('mysqlbackupdir').with( + it { is_expected.to contain_file('mysqlbackupdir').with( :path => '/tmp', :ensure => 'directory', :mode => '0750', @@ -85,15 +85,15 @@ { :backupcompress => false }.merge(default_params) end - it { should contain_file('mysqlbackup.sh').with( + it { is_expected.to contain_file('mysqlbackup.sh').with( :path => '/usr/local/sbin/mysqlbackup.sh', :ensure => 'present' ) } it 'should be able to disable compression' do - verify_contents(subject, 'mysqlbackup.sh', [ - ' --all-databases > ${DIR}/${PREFIX}`date +%Y%m%d-%H%M%S`.sql', - ]) + is_expected.to contain_file('mysqlbackup.sh').without_content( + /.*bzcat -zc.*/ + ) end end @@ -102,36 +102,34 @@ { :ignore_events => false }.merge(default_params) end - it { should contain_file('mysqlbackup.sh').with( + it { is_expected.to contain_file('mysqlbackup.sh').with( :path => '/usr/local/sbin/mysqlbackup.sh', :ensure => 'present' ) } it 'should be able to backup events table' do - verify_contents(subject, 'mysqlbackup.sh', [ - 'EVENTS="--events"', - ]) + is_expected.to contain_file('mysqlbackup.sh').with_content( + /EVENTS="--events"/ + ) end end - context 'with database list specified' do let(:params) do { :backupdatabases => ['mysql'] }.merge(default_params) end - it { should contain_file('mysqlbackup.sh').with( + it { is_expected.to contain_file('mysqlbackup.sh').with( :path => '/usr/local/sbin/mysqlbackup.sh', :ensure => 'present' - ) } + ) + } it 'should have a backup file for each database' do - content = subject.resource('file','mysqlbackup.sh').send(:parameters)[:content] - content.should match(' mysql | bzcat -zc \${DIR}\\\${PREFIX}mysql_`date') - # verify_contents(subject, 'mysqlbackup.sh', [ - # ' mysql | bzcat -zc ${DIR}/${PREFIX}mysql_`date +%Y%m%d-%H%M%S`.sql', - # ]) - end + is_expected.to contain_file('mysqlbackup.sh').with_content( + /mysql | bzcat -zc \${DIR}\\\${PREFIX}mysql_`date'/ + ) + end end context 'with file per database' do @@ -140,14 +138,7 @@ end it 'should loop through backup all databases' do - verify_contents(subject, 'mysqlbackup.sh', [ - 'mysql -s -r -N -e \'SHOW DATABASES\' | while read dbname', - 'do', - ' mysqldump -u${USER} -p${PASS} --opt --flush-logs --single-transaction \\', - ' ${EVENTS} \\', - ' ${dbname} | bzcat -zc > ${DIR}/${PREFIX}${dbname}_`date +%Y%m%d-%H%M%S`.sql.bz2', - 'done', - ]) + is_expected.to contain_file('mysqlbackup.sh').with_content(/.*SHOW DATABASES.*/) end context 'with compression disabled' do @@ -156,9 +147,12 @@ end it 'should loop through backup all databases without compression' do - verify_contents(subject, 'mysqlbackup.sh', [ - ' ${dbname} > ${DIR}/${PREFIX}${dbname}_`date +%Y%m%d-%H%M%S`.sql', - ]) + is_expected.to contain_file('mysqlbackup.sh').with_content( + /.*SHOW DATABASES.*/ + ) + is_expected.to contain_file('mysqlbackup.sh').without_content( + /.*bzcat -zc.*/ + ) end end end @@ -169,9 +163,9 @@ end it 'should be add postscript' do - verify_contents(subject, 'mysqlbackup.sh', [ - 'rsync -a /tmp backup01.local-lan:', - ]) + is_expected.to contain_file('mysqlbackup.sh').with_content( + /rsync -a \/tmp backup01.local-lan:/ + ) end end @@ -184,10 +178,9 @@ end it 'should be add postscript' do - verify_contents(subject, 'mysqlbackup.sh', [ - 'rsync -a /tmp backup01.local-lan:', - 'rsync -a /tmp backup02.local-lan:', - ]) + is_expected.to contain_file('mysqlbackup.sh').with_content( + /.*rsync -a \/tmp backup01.local-lan:\n\nrsync -a \/tmp backup02.local-lan:.*/ + ) end end end diff --git a/mysql/spec/classes/mysql_server_monitor_spec.rb b/mysql/spec/classes/mysql_server_monitor_spec.rb index 79ad33dd6..d9eea001d 100644 --- a/mysql/spec/classes/mysql_server_monitor_spec.rb +++ b/mysql/spec/classes/mysql_server_monitor_spec.rb @@ -20,9 +20,9 @@ default_params end - it { should contain_mysql_user('monitoruser@monitorhost')} + it { is_expected.to contain_mysql_user('monitoruser@monitorhost')} - it { should contain_mysql_grant('monitoruser@monitorhost/*.*').with( + it { is_expected.to contain_mysql_grant('monitoruser@monitorhost/*.*').with( :ensure => 'present', :user => 'monitoruser@monitorhost', :table => '*.*', diff --git a/mysql/spec/classes/mysql_server_mysqltuner_spec.rb b/mysql/spec/classes/mysql_server_mysqltuner_spec.rb index 7645983d3..51d2a7aa0 100644 --- a/mysql/spec/classes/mysql_server_mysqltuner_spec.rb +++ b/mysql/spec/classes/mysql_server_mysqltuner_spec.rb @@ -6,7 +6,7 @@ describe "on #{pe_version} #{pe_platform}" do let(:facts) { facts } - it { should contain_file('/usr/local/bin/mysqltuner') } + it { is_expected.to contain_file('/usr/local/bin/mysqltuner') } end end end diff --git a/mysql/spec/classes/mysql_server_spec.rb b/mysql/spec/classes/mysql_server_spec.rb index b79e9f1ed..21efa1170 100644 --- a/mysql/spec/classes/mysql_server_spec.rb +++ b/mysql/spec/classes/mysql_server_spec.rb @@ -7,142 +7,40 @@ let(:facts) { facts } context 'with defaults' do - it { should contain_class('mysql::server::install') } - it { should contain_class('mysql::server::config') } - it { should contain_class('mysql::server::service') } - it { should contain_class('mysql::server::root_password') } - it { should contain_class('mysql::server::providers') } - end - - # make sure that overriding the mysqld settings keeps the defaults for everything else - context 'with overrides' do - let(:params) {{ :override_options => { 'mysqld' => { 'socket' => '/var/lib/mysql/mysql.sock' } } }} - it do - should contain_file('mysql-config-file').with({ - :mode => '0644', - }).with_content(/basedir/) - end - end - - describe 'with multiple instance of an option' do - let(:params) {{ :override_options => { 'mysqld' => { 'replicate-do-db' => ['base1', 'base2', 'base3'], } }}} - it do - should contain_file('mysql-config-file').with_content( - /^replicate-do-db = base1$/ - ).with_content( - /^replicate-do-db = base2$/ - ).with_content( - /^replicate-do-db = base3$/ - ) - end - end - - describe 'an option set to true' do - let(:params) { - { :override_options => { 'mysqld' => { 'ssl' => true } }} - } - it do - should contain_file('mysql-config-file').with_content(/^\s*ssl\s*(?:$|= true)/m) - end - end - - describe 'an option set to false' do - let(:params) { - { :override_options => { 'mysqld' => { 'ssl' => false } }} - } - it do - should contain_file('mysql-config-file').with_content(/^\s*ssl = false/m) - end + it { is_expected.to contain_class('mysql::server::install') } + it { is_expected.to contain_class('mysql::server::config') } + it { is_expected.to contain_class('mysql::server::service') } + it { is_expected.to contain_class('mysql::server::root_password') } + it { is_expected.to contain_class('mysql::server::providers') } end context 'with remove_default_accounts set' do - let (:params) {{ :remove_default_accounts => true }} - it { should contain_class('mysql::server::account_security') } - end - - describe 'possibility of disabling ssl completely' do - let(:params) { - { :override_options => { 'mysqld' => { 'ssl' => true, 'ssl-disable' => true } }} - } - it do - should contain_file('mysql-config-file').without_content(/^\s*ssl\s*(?:$|= true)/m) - end + let(:params) {{ :remove_default_accounts => true }} + it { is_expected.to contain_class('mysql::server::account_security') } end context 'mysql::server::install' do - let(:params) {{ :package_ensure => 'present', :name => 'mysql-server' }} - it do - should contain_package('mysql-server').with({ - :ensure => :present, - :name => 'mysql-server', - }) - end - end - - if pe_platform =~ /redhat-7/ - context 'mysql::server::install on RHEL 7' do - let(:params) {{ :package_ensure => 'present', :name => 'mariadb-server' }} - it do - should contain_package('mysql-server').with({ + it 'contains the package' do + is_expected.to contain_package('mysql-server').with({ :ensure => :present, - :name => 'mariadb-server', }) - end end - end - - context 'mysql::server::config' do - context 'with includedir' do - let(:params) {{ :includedir => '/etc/my.cnf.d' }} - it do - should contain_file('/etc/my.cnf.d').with({ - :ensure => :directory, - :mode => '0755', - }) - end - - it do - should contain_file('mysql-config-file').with({ - :mode => '0644', - }) - end - - it do - should contain_file('mysql-config-file').with_content(/!includedir/) - end - end - - context 'without includedir' do - let(:params) {{ :includedir => '' }} - it do - should_not contain_file('mysql-config-file').with({ - :ensure => :directory, - :mode => '0755', - }) - end - - it do - should contain_file('mysql-config-file').with({ - :mode => '0644', - }) - end - - it do - should contain_file('mysql-config-file').without_content(/!includedir/) - end + context 'with datadir overridden' do + let(:params) {{ :override_options => { 'mysqld' => { 'datadir' => '/tmp' }} }} + it { is_expected.to contain_exec('mysql_install_db') } end end context 'mysql::server::service' do context 'with defaults' do - it { should contain_service('mysqld') } + it { is_expected.to contain_service('mysqld') } end context 'service_enabled set to false' do let(:params) {{ :service_enabled => false }} it do - should contain_service('mysqld').with({ + is_expected.to contain_service('mysqld').with({ :ensure => :stopped }) end @@ -151,15 +49,14 @@ context 'mysql::server::root_password' do describe 'when defaults' do - it { should_not contain_mysql_user('root@localhost') } - it { should_not contain_file('/root/.my.cnf') } + it { is_expected.not_to contain_mysql_user('root@localhost') } + it { is_expected.not_to contain_file('/root/.my.cnf') } end describe 'when set' do let(:params) {{:root_password => 'SET' }} - it { should contain_mysql_user('root@localhost') } - it { should contain_file('/root/.my.cnf') } + it { is_expected.to contain_mysql_user('root@localhost') } + it { is_expected.to contain_file('/root/.my.cnf') } end - end context 'mysql::server::providers' do @@ -174,14 +71,14 @@ }, 'foo2@localhost' => {} }}} - it { should contain_mysql_user('foo@localhost').with( + it { is_expected.to contain_mysql_user('foo@localhost').with( :max_connections_per_hour => '1', :max_queries_per_hour => '2', :max_updates_per_hour => '3', :max_user_connections => '4', :password_hash => '*F3A2A51A9B0F2BE2468926B4132313728C250DBF' )} - it { should contain_mysql_user('foo2@localhost').with( + it { is_expected.to contain_mysql_user('foo2@localhost').with( :max_connections_per_hour => nil, :max_queries_per_hour => nil, :max_updates_per_hour => nil, @@ -204,13 +101,13 @@ 'privileges' => ["SELECT"], }, }}} - it { should contain_mysql_grant('foo@localhost/somedb.*').with( + it { is_expected.to contain_mysql_grant('foo@localhost/somedb.*').with( :user => 'foo@localhost', :table => 'somedb.*', :privileges => ["SELECT", "UPDATE"], :options => ["GRANT"] )} - it { should contain_mysql_grant('foo2@localhost/*.*').with( + it { is_expected.to contain_mysql_grant('foo2@localhost/*.*').with( :user => 'foo2@localhost', :table => '*.*', :privileges => ["SELECT"], @@ -226,11 +123,11 @@ }, 'somedb2' => {} }}} - it { should contain_mysql_database('somedb').with( + it { is_expected.to contain_mysql_database('somedb').with( :charset => 'latin1', :collate => 'latin1' )} - it { should contain_mysql_database('somedb2')} + it { is_expected.to contain_mysql_database('somedb2')} end end end diff --git a/mysql/spec/defines/mysql_db_spec.rb b/mysql/spec/defines/mysql_db_spec.rb index 01ec80733..15f433bb5 100644 --- a/mysql/spec/defines/mysql_db_spec.rb +++ b/mysql/spec/defines/mysql_db_spec.rb @@ -17,33 +17,33 @@ end it 'should not notify the import sql exec if no sql script was provided' do - should contain_mysql_database('test_db').without_notify + is_expected.to contain_mysql_database('test_db').without_notify end it 'should subscribe to database if sql script is given' do params.merge!({'sql' => 'test_sql'}) - should contain_exec('test_db-import').with_subscribe('Mysql_database[test_db]') + is_expected.to contain_exec('test_db-import').with_subscribe('Mysql_database[test_db]') end it 'should only import sql script on creation if not enforcing' do params.merge!({'sql' => 'test_sql', 'enforce_sql' => false}) - should contain_exec('test_db-import').with_refreshonly(true) + is_expected.to contain_exec('test_db-import').with_refreshonly(true) end it 'should import sql script on creation if enforcing' do params.merge!({'sql' => 'test_sql', 'enforce_sql' => true}) - should contain_exec('test_db-import').with_refreshonly(false) + is_expected.to contain_exec('test_db-import').with_refreshonly(false) end it 'should not create database and database user' do params.merge!({'ensure' => 'absent', 'host' => 'localhost'}) - should contain_mysql_database('test_db').with_ensure('absent') - should contain_mysql_user('testuser@localhost').with_ensure('absent') + is_expected.to contain_mysql_database('test_db').with_ensure('absent') + is_expected.to contain_mysql_user('testuser@localhost').with_ensure('absent') end it 'should create with an appropriate collate and charset' do params.merge!({'charset' => 'utf8', 'collate' => 'utf8_danish_ci'}) - should contain_mysql_database('test_db').with({ + is_expected.to contain_mysql_database('test_db').with({ 'charset' => 'utf8', 'collate' => 'utf8_danish_ci', }) @@ -51,6 +51,6 @@ it 'should use dbname parameter as database name instead of name' do params.merge!({'dbname' => 'real_db'}) - should contain_mysql_database('real_db') + is_expected.to contain_mysql_database('real_db') end end diff --git a/mysql/spec/spec_helper_acceptance.rb b/mysql/spec/spec_helper_acceptance.rb index 28bcdd60c..cfae833bd 100644 --- a/mysql/spec/spec_helper_acceptance.rb +++ b/mysql/spec/spec_helper_acceptance.rb @@ -3,11 +3,12 @@ UNSUPPORTED_PLATFORMS = [ 'Windows', 'Solaris', 'AIX' ] unless ENV['RS_PROVISION'] == 'no' or ENV['BEAKER_provision'] == 'no' - if hosts.first.is_pe? - install_pe - else - install_puppet - end + # This will install the latest available package on el and deb based + # systems fail on windows and osx, and install via gem on other *nixes + foss_opts = { :default_action => 'gem_install' } + + if default.is_pe?; then install_pe; else install_puppet( foss_opts ); end + hosts.each do |host| on hosts, "mkdir -p #{host['distmoduledir']}" end @@ -29,6 +30,9 @@ if fact('osfamily') == 'RedHat' version = fact("operatingsystemmajrelease") shell("yum localinstall -y http://yum.puppetlabs.com/puppetlabs-release-el-#{version}.noarch.rpm") + if fact('operatingsystemmajrelease') =~ /7/ || fact('operatingsystem') =~ /Fedora/ + shell("yum install -y bzip2") + end end shell("/bin/touch #{default['puppetpath']}/hiera.yaml") diff --git a/mysql/spec/unit/puppet/functions/mysql_deepmerge_spec.rb b/mysql/spec/unit/puppet/functions/mysql_deepmerge_spec.rb index fa9c72b78..18cb26bfa 100644 --- a/mysql/spec/unit/puppet/functions/mysql_deepmerge_spec.rb +++ b/mysql/spec/unit/puppet/functions/mysql_deepmerge_spec.rb @@ -7,7 +7,7 @@ describe 'when calling mysql_deepmerge from puppet' do it "should not compile when no arguments are passed" do - pending("Fails on 2.6.x, see bug #15912") if Puppet.version =~ /^2\.6\./ + skip("Fails on 2.6.x, see bug #15912") if Puppet.version =~ /^2\.6\./ Puppet[:code] = '$x = mysql_deepmerge()' expect { scope.compiler.compile @@ -15,7 +15,7 @@ end it "should not compile when 1 argument is passed" do - pending("Fails on 2.6.x, see bug #15912") if Puppet.version =~ /^2\.6\./ + skip("Fails on 2.6.x, see bug #15912") if Puppet.version =~ /^2\.6\./ Puppet[:code] = "$my_hash={'one' => 1}\n$x = mysql_deepmerge($my_hash)" expect { scope.compiler.compile @@ -35,57 +35,57 @@ it 'should be able to mysql_deepmerge two hashes' do new_hash = scope.function_mysql_deepmerge([{'one' => '1', 'two' => '1'}, {'two' => '2', 'three' => '2'}]) - new_hash['one'].should == '1' - new_hash['two'].should == '2' - new_hash['three'].should == '2' + expect(new_hash['one']).to eq('1') + expect(new_hash['two']).to eq('2') + expect(new_hash['three']).to eq('2') end it 'should mysql_deepmerge multiple hashes' do hash = scope.function_mysql_deepmerge([{'one' => 1}, {'one' => '2'}, {'one' => '3'}]) - hash['one'].should == '3' + expect(hash['one']).to eq('3') end it 'should accept empty hashes' do - scope.function_mysql_deepmerge([{},{},{}]).should == {} + expect(scope.function_mysql_deepmerge([{},{},{}])).to eq({}) end it 'should mysql_deepmerge subhashes' do hash = scope.function_mysql_deepmerge([{'one' => 1}, {'two' => 2, 'three' => { 'four' => 4 } }]) - hash['one'].should == 1 - hash['two'].should == 2 - hash['three'].should == { 'four' => 4 } + expect(hash['one']).to eq(1) + expect(hash['two']).to eq(2) + expect(hash['three']).to eq({ 'four' => 4 }) end it 'should append to subhashes' do hash = scope.function_mysql_deepmerge([{'one' => { 'two' => 2 } }, { 'one' => { 'three' => 3 } }]) - hash['one'].should == { 'two' => 2, 'three' => 3 } + expect(hash['one']).to eq({ 'two' => 2, 'three' => 3 }) end it 'should append to subhashes 2' do hash = scope.function_mysql_deepmerge([{'one' => 1, 'two' => 2, 'three' => { 'four' => 4 } }, {'two' => 'dos', 'three' => { 'five' => 5 } }]) - hash['one'].should == 1 - hash['two'].should == 'dos' - hash['three'].should == { 'four' => 4, 'five' => 5 } + expect(hash['one']).to eq(1) + expect(hash['two']).to eq('dos') + expect(hash['three']).to eq({ 'four' => 4, 'five' => 5 }) end it 'should append to subhashes 3' do hash = scope.function_mysql_deepmerge([{ 'key1' => { 'a' => 1, 'b' => 2 }, 'key2' => { 'c' => 3 } }, { 'key1' => { 'b' => 99 } }]) - hash['key1'].should == { 'a' => 1, 'b' => 99 } - hash['key2'].should == { 'c' => 3 } + expect(hash['key1']).to eq({ 'a' => 1, 'b' => 99 }) + expect(hash['key2']).to eq({ 'c' => 3 }) end it 'should equate keys mod dash and underscore' do hash = scope.function_mysql_deepmerge([{ 'a-b-c' => 1 } , { 'a_b_c' => 10 }]) - hash['a_b_c'].should == 10 - hash.should_not have_key('a-b-c') + expect(hash['a_b_c']).to eq(10) + expect(hash).not_to have_key('a-b-c') end it 'should keep style of the last when keys are euqal mod dash and underscore' do hash = scope.function_mysql_deepmerge([{ 'a-b-c' => 1, 'b_c_d' => { 'c-d-e' => 2, 'e-f-g' => 3 }} , { 'a_b_c' => 10, 'b-c-d' => { 'c_d_e' => 12 } }]) - hash['a_b_c'].should == 10 - hash.should_not have_key('a-b-c') - hash['b-c-d'].should == { 'e-f-g' => 3, 'c_d_e' => 12 } - hash.should_not have_key('b_c_d') + expect(hash['a_b_c']).to eq(10) + expect(hash).not_to have_key('a-b-c') + expect(hash['b-c-d']).to eq({ 'e-f-g' => 3, 'c_d_e' => 12 }) + expect(hash).not_to have_key('b_c_d') end end end diff --git a/mysql/spec/unit/puppet/functions/mysql_password_spec.rb b/mysql/spec/unit/puppet/functions/mysql_password_spec.rb index 073691004..77726d13f 100644 --- a/mysql/spec/unit/puppet/functions/mysql_password_spec.rb +++ b/mysql/spec/unit/puppet/functions/mysql_password_spec.rb @@ -8,20 +8,20 @@ let(:scope) { PuppetlabsSpec::PuppetInternals.scope } it 'should exist' do - Puppet::Parser::Functions.function('mysql_password').should == 'function_mysql_password' + expect(Puppet::Parser::Functions.function('mysql_password')).to eq('function_mysql_password') end it 'should raise a ParseError if there is less than 1 arguments' do - lambda { scope.function_mysql_password([]) }.should( raise_error(Puppet::ParseError)) + expect { scope.function_mysql_password([]) }.to( raise_error(Puppet::ParseError)) end it 'should raise a ParseError if there is more than 1 arguments' do - lambda { scope.function_mysql_password(%w(foo bar)) }.should( raise_error(Puppet::ParseError)) + expect { scope.function_mysql_password(%w(foo bar)) }.to( raise_error(Puppet::ParseError)) end it 'should convert password into a hash' do result = scope.function_mysql_password(%w(password)) - result.should(eq('*2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19')) + expect(result).to(eq('*2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19')) end end diff --git a/mysql/spec/unit/puppet/provider/database/mysql_spec.rb b/mysql/spec/unit/puppet/provider/database/mysql_spec.rb deleted file mode 100644 index e2557fc35..000000000 --- a/mysql/spec/unit/puppet/provider/database/mysql_spec.rb +++ /dev/null @@ -1,86 +0,0 @@ -require 'spec_helper' - -provider_class = Puppet::Type.type(:database).provider(:mysql) - -describe provider_class do - subject { provider_class } - - let(:root_home) { '/root' } - let(:defaults_file) { '--defaults-extra-file=/root/.my.cnf' } - - let(:raw_databases) do - <<-SQL_OUTPUT -information_schema -mydb -mysql -performance_schema -test - SQL_OUTPUT - end - - let(:parsed_databases) { %w(information_schema mydb mysql performance_schema test) } - - before :each do - @resource = Puppet::Type::Database.new( - { :charset => 'utf8', :name => 'new_database' } - ) - @provider = provider_class.new(@resource) - Facter.stubs(:value).with(:root_home).returns(root_home) - Puppet::Util.stubs(:which).with('mysql').returns('/usr/bin/mysql') - subject.stubs(:which).with('mysql').returns('/usr/bin/mysql') - subject.stubs(:defaults_file).returns('--defaults-extra-file=/root/.my.cnf') - end - - describe 'self.instances' do - it 'returns an array of databases' do - subject.stubs(:mysql).with([defaults_file, '-NBe', 'show databases']).returns(raw_databases) - - databases = subject.instances.collect {|x| x.name } - parsed_databases.should match_array(databases) - end - end - - describe 'create' do - it 'makes a user' do - subject.expects(:mysql).with([defaults_file, '-NBe', "create database `#{@resource[:name]}` character set #{@resource[:charset]}"]) - @provider.create - end - end - - describe 'destroy' do - it 'removes a user if present' do - subject.expects(:mysqladmin).with([defaults_file, '-f', 'drop', "#{@resource[:name]}"]) - @provider.destroy - end - end - - describe 'charset' do - it 'returns a charset' do - subject.expects(:mysql).with([defaults_file, '-NBe', "show create database `#{@resource[:name]}`"]).returns('mydbCREATE DATABASE `mydb` /*!40100 DEFAULT CHARACTER SET utf8 */') - @provider.charset.should == 'utf8' - end - end - - describe 'charset=' do - it 'changes the charset' do - subject.expects(:mysql).with([defaults_file, '-NBe', "alter database `#{@resource[:name]}` CHARACTER SET blah"]).returns('0') - - @provider.charset=('blah') - end - end - - describe 'exists?' do - it 'checks if user exists' do - subject.expects(:mysql).with([defaults_file, '-NBe', 'show databases']).returns('information_schema\nmydb\nmysql\nperformance_schema\ntest') - @provider.exists? - end - end - - describe 'self.defaults_file' do - it 'sets --defaults-extra-file' do - File.stubs(:file?).with('#{root_home}/.my.cnf').returns(true) - @provider.defaults_file.should == '--defaults-extra-file=/root/.my.cnf' - end - end - -end diff --git a/mysql/spec/unit/puppet/provider/database_grant/mysql_spec.rb b/mysql/spec/unit/puppet/provider/database_grant/mysql_spec.rb deleted file mode 100644 index 4d9484d04..000000000 --- a/mysql/spec/unit/puppet/provider/database_grant/mysql_spec.rb +++ /dev/null @@ -1,95 +0,0 @@ -require 'puppet' -require 'mocha/api' -require 'spec_helper' -RSpec.configure do |config| - config.mock_with :mocha -end -provider_class = Puppet::Type.type(:database_grant).provider(:mysql) -describe provider_class do - let(:root_home) { '/root' } - - before :each do - @resource = Puppet::Type::Database_grant.new( - { :privileges => 'all', :provider => 'mysql', :name => 'user@host'} - ) - @provider = provider_class.new(@resource) - Facter.stubs(:value).with(:root_home).returns(root_home) - File.stubs(:file?).with("#{root_home}/.my.cnf").returns(true) - end - - it 'should query privileges from the database' do - provider_class.expects(:mysql) .with(["--defaults-extra-file=#{root_home}/.my.cnf", 'mysql', '-Be', 'describe user']).returns <<-EOT -Field Type Null Key Default Extra -Host char(60) NO PRI -User char(16) NO PRI -Password char(41) NO -Select_priv enum('N','Y') NO N -Insert_priv enum('N','Y') NO N -Update_priv enum('N','Y') NO N -EOT - provider_class.expects(:mysql).with(["--defaults-extra-file=#{root_home}/.my.cnf", 'mysql', '-Be', 'describe db']).returns <<-EOT -Field Type Null Key Default Extra -Host char(60) NO PRI -Db char(64) NO PRI -User char(16) NO PRI -Select_priv enum('N','Y') NO N -Insert_priv enum('N','Y') NO N -Update_priv enum('N','Y') NO N -EOT - provider_class.user_privs.should == %w(Select_priv Insert_priv Update_priv) - provider_class.db_privs.should == %w(Select_priv Insert_priv Update_priv) - end - - it 'should query set privileges' do - provider_class.expects(:mysql).with(["--defaults-extra-file=#{root_home}/.my.cnf", 'mysql', '-Be', "select * from mysql.user where user='user' and host='host'"]).returns <<-EOT -Host User Password Select_priv Insert_priv Update_priv -host user Y N Y -EOT - @provider.privileges.should == %w(Select_priv Update_priv) - end - - it 'should recognize when all privileges are set' do - provider_class.expects(:mysql).with(["--defaults-extra-file=#{root_home}/.my.cnf", 'mysql', '-Be', "select * from mysql.user where user='user' and host='host'"]).returns <<-EOT -Host User Password Select_priv Insert_priv Update_priv -host user Y Y Y -EOT - @provider.all_privs_set?.should == true - end - - it 'should recognize when all privileges are not set' do - provider_class.expects(:mysql).with(["--defaults-extra-file=#{root_home}/.my.cnf", 'mysql', '-Be', "select * from mysql.user where user='user' and host='host'"]).returns <<-EOT -Host User Password Select_priv Insert_priv Update_priv -host user Y N Y -EOT - @provider.all_privs_set?.should == false - end - - it 'should be able to set all privileges' do - provider_class.expects(:mysql).with(["--defaults-extra-file=#{root_home}/.my.cnf", 'mysql', '-NBe', "SELECT '1' FROM user WHERE user='user' AND host='host'"]).returns "1\n" - provider_class.expects(:mysql).with(["--defaults-extra-file=#{root_home}/.my.cnf", 'mysql', '-Be', "update user set Select_priv = 'Y', Insert_priv = 'Y', Update_priv = 'Y' where user='user' and host='host'"]) - provider_class.expects(:mysqladmin).with(%W(--defaults-extra-file=#{root_home}/.my.cnf flush-privileges)) - @provider.privileges=(%w(all)) - end - - it 'should be able to set partial privileges' do - provider_class.expects(:mysql).with(["--defaults-extra-file=#{root_home}/.my.cnf", 'mysql', '-NBe', "SELECT '1' FROM user WHERE user='user' AND host='host'"]).returns "1\n" - provider_class.expects(:mysql).with(["--defaults-extra-file=#{root_home}/.my.cnf", 'mysql', '-Be', "update user set Select_priv = 'Y', Insert_priv = 'N', Update_priv = 'Y' where user='user' and host='host'"]) - provider_class.expects(:mysqladmin).with(%W(--defaults-extra-file=#{root_home}/.my.cnf flush-privileges)) - @provider.privileges=(%w(Select_priv Update_priv)) - end - - it 'should be case insensitive' do - provider_class.expects(:mysql).with(["--defaults-extra-file=#{root_home}/.my.cnf", 'mysql', '-NBe', "SELECT '1' FROM user WHERE user='user' AND host='host'"]).returns "1\n" - provider_class.expects(:mysql).with(["--defaults-extra-file=#{root_home}/.my.cnf", 'mysql', '-Be', "update user set Select_priv = 'Y', Insert_priv = 'Y', Update_priv = 'Y' where user='user' and host='host'"]) - provider_class.expects(:mysqladmin).with(["--defaults-extra-file=#{root_home}/.my.cnf", 'flush-privileges']) - @provider.privileges=(%w(SELECT_PRIV insert_priv UpDaTe_pRiV)) - end - - it 'should not pass --defaults-extra-file if $root_home/.my.cnf is absent' do - File.stubs(:file?).with("#{root_home}/.my.cnf").returns(false) - provider_class.expects(:mysql).with(['mysql', '-NBe', "SELECT '1' FROM user WHERE user='user' AND host='host'"]).returns "1\n" - provider_class.expects(:mysql).with(['mysql', '-Be', "update user set Select_priv = 'Y', Insert_priv = 'N', Update_priv = 'Y' where user='user' and host='host'"]) - provider_class.expects(:mysqladmin).with(%w(flush-privileges)) - @provider.privileges=(%w(Select_priv Update_priv)) - end -end diff --git a/mysql/spec/unit/puppet/provider/database_user/mysql_spec.rb b/mysql/spec/unit/puppet/provider/database_user/mysql_spec.rb deleted file mode 100644 index d85306822..000000000 --- a/mysql/spec/unit/puppet/provider/database_user/mysql_spec.rb +++ /dev/null @@ -1,119 +0,0 @@ -require 'spec_helper' - -provider_class = Puppet::Type.type(:database_user).provider(:mysql) - -describe provider_class do - subject { provider_class } - - let(:root_home) { '/root' } - let(:defaults_file) { '--defaults-extra-file=/root/.my.cnf' } - let(:newhash) { '*6C8989366EAF75BB670AD8EA7A7FC1176A95CEF5' } - - let(:raw_users) do - <<-SQL_OUTPUT -root@127.0.0.1 -root@::1 -@localhost -debian-sys-maint@localhost -root@localhost -usvn_user@localhost -@vagrant-ubuntu-raring-64 - SQL_OUTPUT - end - - let(:parsed_users) { %w(root@127.0.0.1 root@::1 debian-sys-maint@localhost root@localhost usvn_user@localhost) } - - before :each do - # password hash = mypass - @resource = Puppet::Type::Database_user.new( - { :password_hash => '*6C8989366EAF75BB670AD8EA7A7FC1176A95CEF4', - :name => 'joe@localhost', - :max_user_connections => '10' - } - ) - @provider = provider_class.new(@resource) - Facter.stubs(:value).with(:root_home).returns(root_home) - Puppet::Util.stubs(:which).with('mysql').returns('/usr/bin/mysql') - subject.stubs(:which).with('mysql').returns('/usr/bin/mysql') - subject.stubs(:defaults_file).returns('--defaults-extra-file=/root/.my.cnf') - end - - describe 'self.instances' do - it 'returns an array of users' do - subject.stubs(:mysql).with([defaults_file, 'mysql', "-BNeselect concat(User, '@',Host) as User from mysql.user"]).returns(raw_users) - - usernames = subject.instances.collect {|x| x.name } - parsed_users.should match_array(usernames) - end - end - - describe 'create' do - it 'makes a user' do - subject.expects(:mysql).with([defaults_file, 'mysql', '-e', "grant usage on *.* to 'joe'@'localhost' identified by PASSWORD - '*6C8989366EAF75BB670AD8EA7A7FC1176A95CEF4' with max_user_connections 10"]) - @provider.expects(:exists?).returns(true) - @provider.create.should be_truthy - end - end - - describe 'destroy' do - it 'removes a user if present' do - subject.expects(:mysql).with([defaults_file, 'mysql', '-e', "drop user 'joe'@'localhost'"]) - @provider.expects(:exists?).returns(false) - @provider.destroy.should be_truthy - end - end - - describe 'password_hash' do - it 'returns a hash' do - subject.expects(:mysql).with([defaults_file, 'mysql', '-NBe', "select password from mysql.user where CONCAT(user, '@', host) = 'joe@localhost'"]).returns('*6C8989366EAF75BB670AD8EA7A7FC1176A95CEF4') - @provider.password_hash.should == '*6C8989366EAF75BB670AD8EA7A7FC1176A95CEF4' - end - end - - describe 'password_hash=' do - it 'changes the hash' do - subject.expects(:mysql).with([defaults_file, 'mysql', '-e', "SET PASSWORD FOR 'joe'@'localhost' = '*6C8989366EAF75BB670AD8EA7A7FC1176A95CEF5'"]).returns('0') - - @provider.expects(:password_hash).returns('*6C8989366EAF75BB670AD8EA7A7FC1176A95CEF5') - @provider.password_hash=('*6C8989366EAF75BB670AD8EA7A7FC1176A95CEF5') - end - end - - describe 'max_user_connections' do - it 'returns max user connections' do - subject.expects(:mysql).with([defaults_file, 'mysql', '-NBe', "select max_user_connections from mysql.user where CONCAT(user, '@', host) = 'joe@localhost'"]).returns('10') - @provider.max_user_connections.should == '10' - end - end - - describe 'max_user_connections=' do - it 'changes max user connections' do - subject.expects(:mysql).with([defaults_file, 'mysql', '-e', "grant usage on *.* to 'joe'@'localhost' with max_user_connections 42"]).returns('0') - @provider.expects(:max_user_connections).returns('42') - @provider.max_user_connections=('42') - end - end - - describe 'exists?' do - it 'checks if user exists' do - subject.expects(:mysql).with([defaults_file, 'mysql', '-NBe', "select '1' from mysql.user where CONCAT(user, '@', host) = 'joe@localhost'"]).returns('1') - @provider.exists?.should be_truthy - end - end - - describe 'flush' do - it 'removes cached privileges' do - subject.expects(:mysqladmin).with([defaults_file, 'flush-privileges']) - @provider.flush - end - end - - describe 'self.defaults_file' do - it 'sets --defaults-extra-file' do - File.stubs(:file?).with('#{root_home}/.my.cnf').returns(true) - @provider.defaults_file.should == '--defaults-extra-file=/root/.my.cnf' - end - end - -end diff --git a/mysql/spec/unit/puppet/provider/mysql_database/mysql_spec.rb b/mysql/spec/unit/puppet/provider/mysql_database/mysql_spec.rb index 4bc24b46c..465e59dd5 100644 --- a/mysql/spec/unit/puppet/provider/mysql_database/mysql_spec.rb +++ b/mysql/spec/unit/puppet/provider/mysql_database/mysql_spec.rb @@ -43,7 +43,7 @@ provider.class.stubs(:mysql).with([defaults_file, '-NBe', "show variables like '%_database'", db.chomp]).returns("character_set_database latin1\ncollation_database latin1_swedish_ci\nskip_show_database OFF") end databases = provider.class.instances.collect {|x| x.name } - parsed_databases.should match_array(databases) + expect(parsed_databases).to match_array(databases) end end @@ -56,9 +56,9 @@ describe 'create' do it 'makes a database' do - provider.expects(:mysql).with([defaults_file, '-NBe', "create database if not exists `#{resource[:name]}` character set #{resource[:charset]} collate #{resource[:collate]}"]) + provider.expects(:mysql).with([defaults_file, '-NBe', "create database if not exists `#{resource[:name]}` character set `#{resource[:charset]}` collate `#{resource[:collate]}`"]) provider.expects(:exists?).returns(true) - provider.create.should be_truthy + expect(provider.create).to be_truthy end end @@ -66,30 +66,30 @@ it 'removes a database if present' do provider.expects(:mysql).with([defaults_file, '-NBe', "drop database if exists `#{resource[:name]}`"]) provider.expects(:exists?).returns(false) - provider.destroy.should be_truthy + expect(provider.destroy).to be_truthy end end describe 'exists?' do it 'checks if database exists' do - instance.exists?.should be_truthy + expect(instance.exists?).to be_truthy end end describe 'self.defaults_file' do it 'sets --defaults-extra-file' do File.stubs(:file?).with('/root/.my.cnf').returns(true) - provider.defaults_file.should eq '--defaults-extra-file=/root/.my.cnf' + expect(provider.defaults_file).to eq '--defaults-extra-file=/root/.my.cnf' end it 'fails if file missing' do File.stubs(:file?).with('/root/.my.cnf').returns(false) - provider.defaults_file.should be_nil + expect(provider.defaults_file).to be_nil end end describe 'charset' do it 'returns a charset' do - instance.charset.should == 'latin1' + expect(instance.charset).to eq('latin1') end end @@ -103,7 +103,7 @@ describe 'collate' do it 'returns a collate' do - instance.collate.should == 'latin1_swedish_ci' + expect(instance.collate).to eq('latin1_swedish_ci') end end diff --git a/mysql/spec/unit/puppet/provider/mysql_user/mysql_spec.rb b/mysql/spec/unit/puppet/provider/mysql_user/mysql_spec.rb index 50f0c3bd2..dacbae4b0 100644 --- a/mysql/spec/unit/puppet/provider/mysql_user/mysql_spec.rb +++ b/mysql/spec/unit/puppet/provider/mysql_user/mysql_spec.rb @@ -50,7 +50,7 @@ end usernames = provider.class.instances.collect {|x| x.name } - parsed_users.should match_array(usernames) + expect(parsed_users).to match_array(usernames) end end @@ -65,7 +65,7 @@ it 'makes a user' do provider.expects(:mysql).with([defaults_file, '-e', "GRANT USAGE ON *.* TO 'joe'@'localhost' IDENTIFIED BY PASSWORD '*6C8989366EAF75BB670AD8EA7A7FC1176A95CEF4' WITH MAX_USER_CONNECTIONS 10 MAX_CONNECTIONS_PER_HOUR 10 MAX_QUERIES_PER_HOUR 10 MAX_UPDATES_PER_HOUR 10"]) provider.expects(:exists?).returns(true) - provider.create.should be_truthy + expect(provider.create).to be_truthy end end @@ -73,30 +73,30 @@ it 'removes a user if present' do provider.expects(:mysql).with([defaults_file, '-e', "DROP USER 'joe'@'localhost'"]) provider.expects(:exists?).returns(false) - provider.destroy.should be_truthy + expect(provider.destroy).to be_truthy end end describe 'exists?' do it 'checks if user exists' do - instance.exists?.should be_truthy + expect(instance.exists?).to be_truthy end end describe 'self.defaults_file' do it 'sets --defaults-extra-file' do File.stubs(:file?).with('/root/.my.cnf').returns(true) - provider.defaults_file.should eq '--defaults-extra-file=/root/.my.cnf' + expect(provider.defaults_file).to eq '--defaults-extra-file=/root/.my.cnf' end it 'fails if file missing' do File.expects(:file?).with('/root/.my.cnf').returns(false) - provider.defaults_file.should be_nil + expect(provider.defaults_file).to be_nil end end describe 'password_hash' do it 'returns a hash' do - instance.password_hash.should == '*6C8989366EAF75BB670AD8EA7A7FC1176A95CEF4' + expect(instance.password_hash).to eq('*6C8989366EAF75BB670AD8EA7A7FC1176A95CEF4') end end @@ -114,7 +114,7 @@ describe property do it "returns #{property}" do - instance.send("#{property}".to_sym).should == '10' + expect(instance.send("#{property}".to_sym)).to eq('10') end end diff --git a/mysql/spec/unit/puppet/type/mysql_database_spec.rb b/mysql/spec/unit/puppet/type/mysql_database_spec.rb index e2ebd90d4..7897d8109 100644 --- a/mysql/spec/unit/puppet/type/mysql_database_spec.rb +++ b/mysql/spec/unit/puppet/type/mysql_database_spec.rb @@ -7,17 +7,17 @@ end it 'should accept a database name' do - @user[:name].should == 'test' + expect(@user[:name]).to eq('test') end it 'should accept a charset' do @user[:charset] = 'latin1' - @user[:charset].should == 'latin1' + expect(@user[:charset]).to eq('latin1') end it 'should accept a collate' do @user[:collate] = 'latin1_swedish_ci' - @user[:collate].should == 'latin1_swedish_ci' + expect(@user[:collate]).to eq('latin1_swedish_ci') end it 'should require a name' do diff --git a/mysql/spec/unit/puppet/type/mysql_grant_spec.rb b/mysql/spec/unit/puppet/type/mysql_grant_spec.rb index 4171ab28e..9b33058bc 100644 --- a/mysql/spec/unit/puppet/type/mysql_grant_spec.rb +++ b/mysql/spec/unit/puppet/type/mysql_grant_spec.rb @@ -7,32 +7,32 @@ end it 'should accept a grant name' do - @user[:name].should == 'foo@localhost/*.*' + expect(@user[:name]).to eq('foo@localhost/*.*') end it 'should accept ALL privileges' do @user[:privileges] = 'ALL' - @user[:privileges].should == ['ALL'] + expect(@user[:privileges]).to eq(['ALL']) end it 'should accept PROXY privilege' do @user[:privileges] = 'PROXY' - @user[:privileges].should == ['PROXY'] + expect(@user[:privileges]).to eq(['PROXY']) end it 'should accept a table' do @user[:table] = '*.*' - @user[:table].should == '*.*' + expect(@user[:table]).to eq('*.*') end it 'should accept @ for table' do @user[:table] = '@' - @user[:table].should == '@' + expect(@user[:table]).to eq('@') end it 'should accept a user' do @user[:user] = 'foo@localhost' - @user[:user].should == 'foo@localhost' + expect(@user[:user]).to eq('foo@localhost') end it 'should require a name' do @@ -47,4 +47,28 @@ }.to raise_error /name must match user and table parameters/ end + describe 'it should munge privileges' do + + it 'to just ALL' do + @user = Puppet::Type.type(:mysql_grant).new( + :name => 'foo@localhost/*.*', :table => ['*.*','@'], :user => 'foo@localhost', + :privileges => ['ALL', 'PROXY'] ) + expect(@user[:privileges]).to eq(['ALL']) + end + + it 'to upcase and ordered' do + @user = Puppet::Type.type(:mysql_grant).new( + :name => 'foo@localhost/*.*', :table => ['*.*','@'], :user => 'foo@localhost', + :privileges => ['select', 'Insert'] ) + expect(@user[:privileges]).to eq(['INSERT', 'SELECT']) + end + + it 'ordered including column privileges' do + @user = Puppet::Type.type(:mysql_grant).new( + :name => 'foo@localhost/*.*', :table => ['*.*','@'], :user => 'foo@localhost', + :privileges => ['SELECT(Host,Address)', 'Insert'] ) + expect(@user[:privileges]).to eq(['INSERT', 'SELECT (Address, Host)']) + end + end + end diff --git a/mysql/spec/unit/puppet/type/mysql_user_spec.rb b/mysql/spec/unit/puppet/type/mysql_user_spec.rb index 7ffc801b6..f66741c99 100644 --- a/mysql/spec/unit/puppet/type/mysql_user_spec.rb +++ b/mysql/spec/unit/puppet/type/mysql_user_spec.rb @@ -20,12 +20,12 @@ end it 'should accept a user name' do - @user[:name].should == 'foo@localhost' + expect(@user[:name]).to eq('foo@localhost') end it 'should accept a password' do @user[:password_hash] = 'foo' - @user[:password_hash].should == 'foo' + expect(@user[:password_hash]).to eq('foo') end end @@ -35,7 +35,7 @@ end it 'should lowercase the user name' do - @user[:name].should == 'foo@localhost' + expect(@user[:name]).to eq('foo@localhost') end end diff --git a/mysql/templates/my.cnf.erb b/mysql/templates/my.cnf.erb index 8c2001ce8..f84f45c3b 100644 --- a/mysql/templates/my.cnf.erb +++ b/mysql/templates/my.cnf.erb @@ -10,7 +10,7 @@ <% vi.each do |vii| -%> <%= ki %> = <%= vii %> <% end -%> -<% elsif vi != :undef -%> +<% elsif ![nil, '', :undef].include?(vi) -%> <%= ki %> = <%= vi %> <% end -%> <% end -%> diff --git a/mysql/templates/mysqlbackup.sh.erb b/mysql/templates/mysqlbackup.sh.erb index 68c911fd5..e3ab7cc28 100755 --- a/mysql/templates/mysqlbackup.sh.erb +++ b/mysql/templates/mysqlbackup.sh.erb @@ -40,7 +40,7 @@ cleanup <% end -%> <% if @backupdatabases.empty? -%> <% if @file_per_database -%> -mysql -s -r -N -e 'SHOW DATABASES' | while read dbname +mysql -u${USER} -p${PASS} -s -r -N -e 'SHOW DATABASES' | while read dbname do mysqldump -u${USER} -p${PASS} --opt --flush-logs --single-transaction \ ${EVENTS} \ diff --git a/mysql/tests/mysql_database.pp b/mysql/tests/mysql_database.pp index 8747f707d..1ba24dfd9 100644 --- a/mysql/tests/mysql_database.pp +++ b/mysql/tests/mysql_database.pp @@ -10,3 +10,8 @@ ensure => present, charset => 'latin1', } +database{ 'test5': + ensure => present, + charset => 'binary', + collate => 'binary', +} diff --git a/neutron/Modulefile b/neutron/Modulefile deleted file mode 100644 index 74c6b7c83..000000000 --- a/neutron/Modulefile +++ /dev/null @@ -1,16 +0,0 @@ -name 'puppetlabs-neutron' -version '4.0.0' -author 'StackForge Contributors' -license 'Apache License 2.0' -summary 'Puppet module for OpenStack Neutron' -description 'Installs and configures OpenStack Neutron (Networking).' -project_page 'https://launchpad.net/puppet-neutron' -source 'https://github.com/stackforge/puppet-neutron' - -dependency 'puppetlabs/inifile', '>=1.0.0 <2.0.0' -dependency 'puppetlabs/keystone', '>=4.0.0 <5.0.0' -dependency 'puppetlabs/nova', '>=4.0.0 <5.0.0' -dependency 'puppetlabs/stdlib', '>=4.0.0 <5.0.0' -dependency 'puppetlabs/vswitch', '>=0.2.0 <1.0.0' -dependency 'duritong/sysctl', '>=0.0.1 <1.0.0' -dependency 'stackforge/openstacklib', '>=5.0.0' diff --git a/neutron/manifests/agents/l3.pp b/neutron/manifests/agents/l3.pp index 5465d6118..1748c1217 100644 --- a/neutron/manifests/agents/l3.pp +++ b/neutron/manifests/agents/l3.pp @@ -76,6 +76,29 @@ # (optional) namespaces can be deleted cleanly on the host running the L3 agent # Defaults to False # +# [*ha_enabled*] +# (optional) Enabled or not HA for L3 agent. +# Defaults to false +# +# [*ha_vrrp_auth_type*] +# (optional) VRRP authentication type. Can be AH or PASS. +# Defaults to "PASS" +# +# [*ha_vrrp_auth_password*] +# (optional) VRRP authentication password. Required if ha_enabled = true. +# Defaults to undef +# +# [*ha_vrrp_advert_int*] +# (optional) The advertisement interval in seconds. +# Defaults to '2' +# +# [*agent_mode*] +# (optional) The working mode for the agent. +# 'legacy': default behavior (without DVR) +# 'dvr': enable DVR for an L3 agent running on compute node (DVR in production) +# 'dvr_snat': enable DVR with centralized SNAT support (DVR for single-host, for testing only) +# Defaults to 'legacy' +# class neutron::agents::l3 ( $package_ensure = 'present', $enabled = true, @@ -93,7 +116,12 @@ $periodic_fuzzy_delay = '5', $enable_metadata_proxy = true, $network_device_mtu = undef, - $router_delete_namespaces = false + $router_delete_namespaces = false, + $ha_enabled = false, + $ha_vrrp_auth_type = 'PASS', + $ha_vrrp_auth_password = undef, + $ha_vrrp_advert_int = '3', + $agent_mode = 'legacy', ) { include neutron::params @@ -101,6 +129,14 @@ Neutron_config<||> ~> Service['neutron-l3'] Neutron_l3_agent_config<||> ~> Service['neutron-l3'] + if $ha_enabled { + neutron_l3_agent_config { + 'DEFAULT/ha_vrrp_auth_type': value => $ha_vrrp_auth_type; + 'DEFAULT/ha_vrrp_auth_password': value => $ha_vrrp_auth_password; + 'DEFAULT/ha_vrrp_advert_int': value => $ha_vrrp_advert_int; + } + } + neutron_l3_agent_config { 'DEFAULT/debug': value => $debug; 'DEFAULT/external_network_bridge': value => $external_network_bridge; @@ -115,6 +151,7 @@ 'DEFAULT/periodic_fuzzy_delay': value => $periodic_fuzzy_delay; 'DEFAULT/enable_metadata_proxy': value => $enable_metadata_proxy; 'DEFAULT/router_delete_namespaces': value => $router_delete_namespaces; + 'DEFAULT/agent_mode': value => $agent_mode; } if $network_device_mtu { diff --git a/neutron/manifests/agents/lbaas.pp b/neutron/manifests/agents/lbaas.pp index c330c5d4a..ac967ca65 100644 --- a/neutron/manifests/agents/lbaas.pp +++ b/neutron/manifests/agents/lbaas.pp @@ -29,7 +29,8 @@ # Defaults to true. # # [*user_group*] -# (optional) The user group. Defaults to nogroup. +# (optional) The user group. +# Defaults to $::neutron::params::nobody_user_group # # [*manage_haproxy_package*] # (optional) Whether to manage the haproxy package. @@ -44,7 +45,7 @@ $interface_driver = 'neutron.agent.linux.interface.OVSInterfaceDriver', $device_driver = 'neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver', $use_namespaces = true, - $user_group = 'nogroup', + $user_group = $::neutron::params::nobody_user_group, $manage_haproxy_package = true, ) { diff --git a/neutron/manifests/agents/ml2/ovs.pp b/neutron/manifests/agents/ml2/ovs.pp index 34c1ddbad..709a87fb7 100644 --- a/neutron/manifests/agents/ml2/ovs.pp +++ b/neutron/manifests/agents/ml2/ovs.pp @@ -83,21 +83,27 @@ # (optional) Firewall driver for realizing neutron security group function. # Defaults to 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver'. # +# [*enable_distributed_routing*] +# (optional) Set to True on L2 agents to enable support +# for distributed virtual routing. +# Defaults to false +# class neutron::agents::ml2::ovs ( - $package_ensure = 'present', - $enabled = true, - $bridge_uplinks = [], - $bridge_mappings = [], - $integration_bridge = 'br-int', - $enable_tunneling = false, - $tunnel_types = [], - $local_ip = false, - $tunnel_bridge = 'br-tun', - $vxlan_udp_port = 4789, - $polling_interval = 2, - $l2_population = false, - $arp_responder = false, - $firewall_driver = 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver' + $package_ensure = 'present', + $enabled = true, + $bridge_uplinks = [], + $bridge_mappings = [], + $integration_bridge = 'br-int', + $enable_tunneling = false, + $tunnel_types = [], + $local_ip = false, + $tunnel_bridge = 'br-tun', + $vxlan_udp_port = 4789, + $polling_interval = 2, + $l2_population = false, + $arp_responder = false, + $firewall_driver = 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver', + $enable_distributed_routing = false, ) { include neutron::params @@ -107,6 +113,10 @@ fail('Local ip for ovs agent must be set when tunneling is enabled') } + if $enable_distributed_routing and ! $l2_population { + fail('L2 population must be enabled when DVR is enabled') + } + Neutron_plugin_ml2<||> ~> Service['neutron-ovs-agent-service'] if ($bridge_mappings != []) { @@ -137,10 +147,11 @@ } neutron_plugin_ml2 { - 'agent/polling_interval': value => $polling_interval; - 'agent/l2_population': value => $l2_population; - 'agent/arp_responder': value => $arp_responder; - 'ovs/integration_bridge': value => $integration_bridge; + 'agent/polling_interval': value => $polling_interval; + 'agent/l2_population': value => $l2_population; + 'agent/arp_responder': value => $arp_responder; + 'agent/enable_distributed_routing': value => $enable_distributed_routing; + 'ovs/integration_bridge': value => $integration_bridge; } if ($firewall_driver) { diff --git a/neutron/manifests/params.pp b/neutron/manifests/params.pp index c143bf890..4268616f9 100644 --- a/neutron/manifests/params.pp +++ b/neutron/manifests/params.pp @@ -2,6 +2,8 @@ class neutron::params { if($::osfamily == 'Redhat') { + $nobody_user_group = 'nobody' + $package_name = 'openstack-neutron' $server_package = false $server_service = 'neutron-server' @@ -43,7 +45,11 @@ $vpnaas_agent_package = 'openstack-neutron-vpn-agent' $vpnaas_agent_service = 'neutron-vpn-agent' - $openswan_package = 'openswan' + if $::operatingsystemrelease =~ /^7.*/ { + $openswan_package = 'libreswan' + } else { + $openswan_package = 'openswan' + } $l3_agent_package = false $l3_agent_service = 'neutron-l3-agent' @@ -56,6 +62,8 @@ } elsif($::osfamily == 'Debian') { + $nobody_user_group = 'nogroup' + $package_name = 'neutron-common' $server_package = 'neutron-server' $server_service = 'neutron-server' diff --git a/neutron/manifests/plugins/cisco.pp b/neutron/manifests/plugins/cisco.pp index 3db672e25..10ae87437 100644 --- a/neutron/manifests/plugins/cisco.pp +++ b/neutron/manifests/plugins/cisco.pp @@ -171,9 +171,14 @@ # In RH, this link is used to start Neutron process but in Debian, it's used only # to manage database synchronization. - ensure_resource('file', '/etc/neutron/plugin.ini', { - ensure => link, - target => '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini', - require => Package['neutron-plugin-ovs'] - }) + if defined(File['/etc/neutron/plugin.ini']) { + File <| path == '/etc/neutron/plugin.ini' |> { target => '/etc/neutron/plugins/cisco/cisco_plugins.ini' } + } + else { + file {'/etc/neutron/plugin.ini': + ensure => link, + target => '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini', + require => Package['neutron-plugin-ovs'], + } + } } diff --git a/neutron/manifests/plugins/ml2.pp b/neutron/manifests/plugins/ml2.pp index 127a053b5..9e4a3967d 100644 --- a/neutron/manifests/plugins/ml2.pp +++ b/neutron/manifests/plugins/ml2.pp @@ -138,6 +138,7 @@ Package['neutron-plugin-ml2'] -> Neutron_plugin_ml2<||> Package['neutron-plugin-ml2'] -> File['/etc/neutron/plugin.ini'] } else { + Package <| title == 'neutron-server' |> -> Neutron_plugin_ml2<||> Package['neutron'] -> File['/etc/neutron/plugin.ini'] } diff --git a/neutron/manifests/policy.pp b/neutron/manifests/policy.pp new file mode 100644 index 000000000..434de7700 --- /dev/null +++ b/neutron/manifests/policy.pp @@ -0,0 +1,28 @@ +# == Class: neutron::policy +# +# Configure the neutron policies +# +# === Parameters +# +# [*policies*] +# (optional) Set of policies to configure for neutron +# Example : { 'neutron-context_is_admin' => {'context_is_admin' => 'true'}, 'neutron-default' => {'default' => 'rule:admin_or_owner'} } +# Defaults to empty hash. +# +# [*policy_path*] +# (optional) Path to the neutron policy.json file +# Defaults to /etc/neutron/policy.json +# +class neutron::policy ( + $policies = {}, + $policy_path = '/etc/neutron/policy.json', +) { + + Openstacklib::Policy::Base { + file_path => $policy_path, + } + class { 'openstacklib::policy' : + policies => $policies, + } + +} diff --git a/neutron/manifests/server.pp b/neutron/manifests/server.pp index 5325150c6..4921fca7e 100644 --- a/neutron/manifests/server.pp +++ b/neutron/manifests/server.pp @@ -153,52 +153,94 @@ # [*mysql_module*] # (optional) Deprecated. Does nothing. # +# [*router_distributed*] +# (optional) Setting the "router_distributed" flag to "True" will default to the creation +# of distributed tenant routers. +# Also can be the type of the router on the create request (admin-only attribute). +# Defaults to false +# +# [*l3_ha*] +# (optional) Enable high availability for virtual routers. +# Defaults to false +# +# [*max_l3_agents_per_router*] +# (optional) Maximum number of l3 agents which a HA router will be scheduled on. If set to '0', a router will be scheduled on every agent. +# Defaults to '3' +# +# [*min_l3_agents_per_router*] +# (optional) Minimum number of l3 agents which a HA router will be scheduled on. +# Defaults to '2' +# +# [*l3_ha_net_cidr*] +# (optional) CIDR of the administrative network if HA mode is enabled. +# Defaults to '169.254.192.0/18' +# class neutron::server ( - $package_ensure = 'present', - $enabled = true, - $manage_service = true, - $auth_password = false, - $auth_type = 'keystone', - $auth_host = 'localhost', - $auth_port = '35357', - $auth_admin_prefix = false, - $auth_tenant = 'services', - $auth_user = 'neutron', - $auth_protocol = 'http', - $auth_uri = false, - $database_connection = 'sqlite:////var/lib/neutron/ovs.sqlite', - $database_max_retries = 10, - $database_idle_timeout = 3600, - $database_retry_interval = 10, - $database_min_pool_size = 1, - $database_max_pool_size = 10, - $database_max_overflow = 20, - $sync_db = false, - $api_workers = $::processorcount, - $rpc_workers = $::processorcount, - $agent_down_time = '75', - $router_scheduler_driver = 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler', + $package_ensure = 'present', + $enabled = true, + $manage_service = true, + $auth_password = false, + $auth_type = 'keystone', + $auth_host = 'localhost', + $auth_port = '35357', + $auth_admin_prefix = false, + $auth_tenant = 'services', + $auth_user = 'neutron', + $auth_protocol = 'http', + $auth_uri = false, + $database_connection = 'sqlite:////var/lib/neutron/ovs.sqlite', + $database_max_retries = 10, + $database_idle_timeout = 3600, + $database_retry_interval = 10, + $database_min_pool_size = 1, + $database_max_pool_size = 10, + $database_max_overflow = 20, + $sync_db = false, + $api_workers = $::processorcount, + $rpc_workers = $::processorcount, + $agent_down_time = '75', + $router_scheduler_driver = 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler', + $router_distributed = false, + $l3_ha = false, + $max_l3_agents_per_router = 3, + $min_l3_agents_per_router = 2, + $l3_ha_net_cidr = '169.254.192.0/18', # DEPRECATED PARAMETERS - $mysql_module = undef, - $sql_connection = undef, - $connection = undef, - $sql_max_retries = undef, - $max_retries = undef, - $sql_idle_timeout = undef, - $idle_timeout = undef, - $sql_reconnect_interval = undef, - $retry_interval = undef, - $log_dir = undef, - $log_file = undef, - $report_interval = undef, + $mysql_module = undef, + $sql_connection = undef, + $connection = undef, + $sql_max_retries = undef, + $max_retries = undef, + $sql_idle_timeout = undef, + $idle_timeout = undef, + $sql_reconnect_interval = undef, + $retry_interval = undef, + $log_dir = undef, + $log_file = undef, + $report_interval = undef, ) { include neutron::params + include neutron::policy require keystone::python Nova_admin_tenant_id_setter<||> ~> Service['neutron-server'] Neutron_config<||> ~> Service['neutron-server'] Neutron_api_config<||> ~> Service['neutron-server'] + Class['neutron::policy'] ~> Service['neutron-server'] + + if $l3_ha { + if $min_l3_agents_per_router <= $max_l3_agents_per_router or $max_l3_agents_per_router == '0' { + neutron_config { + 'DEFAULT/ha_enabled': value => true; + 'DEFAULT/max_l3_agents_per_router': value => $max_l3_agents_per_router; + 'DEFAULT/min_l3_agents_per_router': value => $min_l3_agents_per_router; + 'DEFAULT/l3_ha_net_cidr': value => $l3_ha_net_cidr; + } + } else { + fail('min_l3_agents_per_router should be less than or equal to max_l3_agents_per_router.') + } + } if $mysql_module { warning('The mysql_module parameter is deprecated. The latest 2.x mysql module will be used.') @@ -296,6 +338,7 @@ 'DEFAULT/rpc_workers': value => $rpc_workers; 'DEFAULT/agent_down_time': value => $agent_down_time; 'DEFAULT/router_scheduler_driver': value => $router_scheduler_driver; + 'DEFAULT/router_distributed': value => $router_distributed; 'database/connection': value => $database_connection_real, secret => true; 'database/idle_timeout': value => $database_idle_timeout_real; 'database/retry_interval': value => $database_retry_interval_real; @@ -309,6 +352,7 @@ Package['neutron-server'] -> Neutron_api_config<||> Package['neutron-server'] -> Neutron_config<||> Package['neutron-server'] -> Service['neutron-server'] + Package['neutron-server'] -> Class['neutron::policy'] package { 'neutron-server': ensure => $package_ensure, name => $::neutron::params::server_package, @@ -316,6 +360,7 @@ } else { # Some platforms (RedHat) does not provide a neutron-server package. # The neutron api config file is provided by the neutron package. + Package['neutron'] -> Class['neutron::policy'] Package['neutron'] -> Neutron_api_config<||> } diff --git a/neutron/metadata.json b/neutron/metadata.json new file mode 100644 index 000000000..d24811ff2 --- /dev/null +++ b/neutron/metadata.json @@ -0,0 +1,42 @@ +{ + "name": "stackforge-neutron", + "version": "5.0.0", + "author": "StackForge Contributors", + "summary": "Puppet module for OpenStack Neutron", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-neutron.git", + "project_page": "https://launchpad.net/puppet-neutron", + "issues_url": "https://bugs.launchpad.net/puppet-neutron", + "requirements": [ + { "name": "pe","version_requirement": "3.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": ["7"] + }, + { + "operatingsystem": "Fedora", + "operatingsystemrelease": ["20"] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": ["6.5","7"] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": ["12.04","14.04"] + } + ], + "description": "Installs and configures OpenStack Neutron (Networking).", + "dependencies": [ + { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "stackforge/nova", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, + { "name": "stackforge/vswitch", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "duritong/sysctl", "version_requirement": ">=0.0.1 <1.0.0" }, + { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0" } + ] +} diff --git a/neutron/spec/classes/neutron_agents_l3_spec.rb b/neutron/spec/classes/neutron_agents_l3_spec.rb index b3d773703..a7301fd7b 100644 --- a/neutron/spec/classes/neutron_agents_l3_spec.rb +++ b/neutron/spec/classes/neutron_agents_l3_spec.rb @@ -22,7 +22,12 @@ :periodic_fuzzy_delay => '5', :enable_metadata_proxy => true, :network_device_mtu => nil, - :router_delete_namespaces => false } + :router_delete_namespaces => false, + :ha_enabled => false, + :ha_vrrp_auth_type => 'PASS', + :ha_vrrp_auth_password => nil, + :ha_vrrp_advert_int => '3', + :agent_mode => 'legacy' } end let :params do @@ -83,6 +88,27 @@ should contain_service('neutron-l3').without_ensure end end + + context 'with DVR' do + before :each do + params.merge!(:agent_mode => 'dvr') + end + it 'should enable DVR mode' do + should contain_neutron_l3_agent_config('DEFAULT/agent_mode').with_value(p[:agent_mode]) + end + end + + context 'with HA routers' do + before :each do + params.merge!(:ha_enabled => true, + :ha_vrrp_auth_password => 'secrete') + end + it 'should configure VRRP' do + should contain_neutron_l3_agent_config('DEFAULT/ha_vrrp_auth_type').with_value(p[:ha_vrrp_auth_type]) + should contain_neutron_l3_agent_config('DEFAULT/ha_vrrp_auth_password').with_value(p[:ha_vrrp_auth_password]) + should contain_neutron_l3_agent_config('DEFAULT/ha_vrrp_advert_int').with_value(p[:ha_vrrp_advert_int]) + end + end end shared_examples_for 'neutron l3 agent with network_device_mtu specified' do diff --git a/neutron/spec/classes/neutron_agents_lbaas_spec.rb b/neutron/spec/classes/neutron_agents_lbaas_spec.rb index 016581012..7f67953ae 100644 --- a/neutron/spec/classes/neutron_agents_lbaas_spec.rb +++ b/neutron/spec/classes/neutron_agents_lbaas_spec.rb @@ -17,7 +17,6 @@ :interface_driver => 'neutron.agent.linux.interface.OVSInterfaceDriver', :device_driver => 'neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver', :use_namespaces => true, - :user_group => 'nogroup', :manage_haproxy_package => true } end @@ -38,7 +37,7 @@ should contain_neutron_lbaas_agent_config('DEFAULT/interface_driver').with_value(p[:interface_driver]); should contain_neutron_lbaas_agent_config('DEFAULT/device_driver').with_value(p[:device_driver]); should contain_neutron_lbaas_agent_config('DEFAULT/use_namespaces').with_value(p[:use_namespaces]); - should contain_neutron_lbaas_agent_config('haproxy/user_group').with_value(p[:user_group]); + should contain_neutron_lbaas_agent_config('haproxy/user_group').with_value(platform_params[:nobody_user_group]); end it 'installs neutron lbaas agent package' do @@ -110,8 +109,9 @@ class { 'neutron': rabbit_password => 'passw0rd' }" end let :platform_params do - { :haproxy_package => 'haproxy', + { :haproxy_package => 'haproxy', :lbaas_agent_package => 'neutron-lbaas-agent', + :nobody_user_group => 'nogroup', :lbaas_agent_service => 'neutron-lbaas-agent' } end @@ -126,7 +126,8 @@ class { 'neutron': rabbit_password => 'passw0rd' }" end let :platform_params do - { :haproxy_package => 'haproxy', + { :haproxy_package => 'haproxy', + :nobody_user_group => 'nobody', :lbaas_agent_service => 'neutron-lbaas-agent' } end diff --git a/neutron/spec/classes/neutron_agents_ml2_ovs_spec.rb b/neutron/spec/classes/neutron_agents_ml2_ovs_spec.rb index a4e9f77a1..d9c9dc539 100644 --- a/neutron/spec/classes/neutron_agents_ml2_ovs_spec.rb +++ b/neutron/spec/classes/neutron_agents_ml2_ovs_spec.rb @@ -7,18 +7,19 @@ end let :default_params do - { :package_ensure => 'present', - :enabled => true, - :bridge_uplinks => [], - :bridge_mappings => [], - :integration_bridge => 'br-int', - :enable_tunneling => false, - :local_ip => false, - :tunnel_bridge => 'br-tun', - :polling_interval => 2, - :l2_population => false, - :arp_responder => false, - :firewall_driver => 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver' } + { :package_ensure => 'present', + :enabled => true, + :bridge_uplinks => [], + :bridge_mappings => [], + :integration_bridge => 'br-int', + :enable_tunneling => false, + :local_ip => false, + :tunnel_bridge => 'br-tun', + :polling_interval => 2, + :l2_population => false, + :arp_responder => false, + :enable_distributed_routing => false, + :firewall_driver => 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver' } end let :params do @@ -90,6 +91,16 @@ end end + context 'when enabling DVR' do + before :each do + params.merge!(:enable_distributed_routing => true, + :l2_population => true ) + end + it 'should enable DVR' do + should contain_neutron_plugin_ml2('agent/enable_distributed_routing').with_value(true) + end + end + context 'when supplying bridge mappings for provider networks' do before :each do params.merge!(:bridge_uplinks => ['br-ex:eth2'],:bridge_mappings => ['default:br-ex']) @@ -151,6 +162,18 @@ should contain_neutron_plugin_ml2('agent/vxlan_udp_port').with_value(params[:vxlan_udp_port]) end end + + context 'when l2 population is disabled and DVR enabled' do + before :each do + params.merge!(:enable_distributed_routing => true, + :l2_population => false ) + end + it 'should fail' do + expect do + subject + end.to raise_error(Puppet::Error, /L2 population must be enabled when DVR is enabled/) + end + end end end diff --git a/neutron/spec/classes/neutron_agents_vpnaas_spec.rb b/neutron/spec/classes/neutron_agents_vpnaas_spec.rb index ec6351136..8ecf1ff60 100644 --- a/neutron/spec/classes/neutron_agents_vpnaas_spec.rb +++ b/neutron/spec/classes/neutron_agents_vpnaas_spec.rb @@ -126,13 +126,31 @@ it_configures 'neutron vpnaas agent' end - context 'on RedHat platforms' do + context 'on RedHat 6 platforms' do let :facts do - { :osfamily => 'RedHat' } + { :osfamily => 'RedHat', + :operatingsystemrelease => '6.5', + :operatingsystemmajrelease => 6 } end let :platform_params do - { :openswan_package => 'openswan', + { :openswan_package => 'openswan', + :vpnaas_agent_package => 'openstack-neutron-vpn-agent', + :vpnaas_agent_service => 'neutron-vpn-agent'} + end + + it_configures 'neutron vpnaas agent' + end + + context 'on RedHat 7 platforms' do + let :facts do + { :osfamily => 'RedHat', + :operatingsystemrelease => '7.1.2', + :operatingsystemmajrelease => 7 } + end + + let :platform_params do + { :openswan_package => 'libreswan', :vpnaas_agent_package => 'openstack-neutron-vpn-agent', :vpnaas_agent_service => 'neutron-vpn-agent'} end diff --git a/neutron/spec/classes/neutron_policy_spec.rb b/neutron/spec/classes/neutron_policy_spec.rb new file mode 100644 index 000000000..4f30fae9e --- /dev/null +++ b/neutron/spec/classes/neutron_policy_spec.rb @@ -0,0 +1,40 @@ +require 'spec_helper' + +describe 'neutron::policy' do + + shared_examples_for 'neutron policies' do + let :params do + { + :policy_path => '/etc/neutron/policy.json', + :policies => { + 'context_is_admin' => { + 'key' => 'context_is_admin', + 'value' => 'foo:bar' + } + } + } + end + + it 'set up the policies' do + should contain_class('openstacklib::policy').with({ + :policies => params[:policies] + }) + end + end + + context 'on Debian platforms' do + let :facts do + { :osfamily => 'Debian' } + end + + it_configures 'neutron policies' + end + + context 'on RedHat platforms' do + let :facts do + { :osfamily => 'RedHat' } + end + + it_configures 'neutron policies' + end +end diff --git a/neutron/spec/classes/neutron_server_spec.rb b/neutron/spec/classes/neutron_server_spec.rb index a3f14f21f..878bee184 100644 --- a/neutron/spec/classes/neutron_server_spec.rb +++ b/neutron/spec/classes/neutron_server_spec.rb @@ -12,23 +12,28 @@ end let :default_params do - { :package_ensure => 'present', - :enabled => true, - :auth_type => 'keystone', - :auth_host => 'localhost', - :auth_port => '35357', - :auth_tenant => 'services', - :auth_user => 'neutron', - :database_connection => 'sqlite:////var/lib/neutron/ovs.sqlite', - :database_max_retries => '10', - :database_idle_timeout => '3600', - :database_retry_interval => '10', - :database_min_pool_size => '1', - :database_max_pool_size => '10', - :database_max_overflow => '20', - :sync_db => false, - :agent_down_time => '75', - :router_scheduler_driver => 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler', + { :package_ensure => 'present', + :enabled => true, + :auth_type => 'keystone', + :auth_host => 'localhost', + :auth_port => '35357', + :auth_tenant => 'services', + :auth_user => 'neutron', + :database_connection => 'sqlite:////var/lib/neutron/ovs.sqlite', + :database_max_retries => '10', + :database_idle_timeout => '3600', + :database_retry_interval => '10', + :database_min_pool_size => '1', + :database_max_pool_size => '10', + :database_max_overflow => '20', + :sync_db => false, + :agent_down_time => '75', + :router_scheduler_driver => 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler', + :router_distributed => false, + :l3_ha => false, + :max_l3_agents_per_router => '3', + :min_l3_agents_per_router => '2', + :l3_ha_net_cidr => '169.254.192.0/18' } end @@ -49,6 +54,7 @@ end it { should contain_class('neutron::params') } + it { should contain_class('neutron::policy') } it 'configures authentication middleware' do should contain_neutron_api_config('filter:authtoken/auth_host').with_value(p[:auth_host]); @@ -100,6 +106,48 @@ should contain_service('neutron-server').without_ensure end end + + context 'with DVR enabled' do + before :each do + params.merge!(:router_distributed => true) + end + it 'should enable DVR' do + should contain_neutron_config('DEFAULT/router_distributed').with_value(true) + end + end + + context 'with HA routers enabled' do + before :each do + params.merge!(:l3_ha => true) + end + it 'should enable HA routers' do + should contain_neutron_config('DEFAULT/ha_enabled').with_value(true) + should contain_neutron_config('DEFAULT/max_l3_agents_per_router').with_value('3') + should contain_neutron_config('DEFAULT/min_l3_agents_per_router').with_value('2') + should contain_neutron_config('DEFAULT/l3_ha_net_cidr').with_value('169.254.192.0/18') + end + end + + context 'with HA routers enabled with unlimited l3 agents per router' do + before :each do + params.merge!(:l3_ha => true, + :max_l3_agents_per_router => '0' ) + end + it 'should enable HA routers' do + should contain_neutron_config('DEFAULT/max_l3_agents_per_router').with_value('0') + end + end + + context 'with HA routers enabled and wrong parameters' do + before :each do + params.merge!(:l3_ha => true, + :max_l3_agents_per_router => '2', + :min_l3_agents_per_router => '3' ) + end + it 'should fail to configure HA routerd' do + expect { subject }.to raise_error(Puppet::Error, /min_l3_agents_per_router should be less than or equal to max_l3_agents_per_router./) + end + end end shared_examples_for 'a neutron server with auth_admin_prefix set' do diff --git a/nova/.fixtures.yml b/nova/.fixtures.yml index 6e01d4ee1..40b44d24c 100644 --- a/nova/.fixtures.yml +++ b/nova/.fixtures.yml @@ -5,7 +5,7 @@ fixtures: 'keystone': 'git://github.com/stackforge/puppet-keystone.git' 'mysql': repo: 'git://github.com/puppetlabs/puppetlabs-mysql.git' - ref: 'origin/2.x' + ref: 'origin/2.2.x' 'openstacklib': 'git://github.com/stackforge/puppet-openstacklib.git' 'postgresql': repo: "git://github.com/puppetlabs/puppet-postgresql.git" diff --git a/nova/Modulefile b/nova/Modulefile deleted file mode 100644 index adada319d..000000000 --- a/nova/Modulefile +++ /dev/null @@ -1,18 +0,0 @@ -name 'puppetlabs-nova' -version '4.0.0' -author 'Puppet Labs and StackForge Contributors' -license 'Apache License 2.0' -summary 'Puppet module for OpenStack Nova' -description 'Installs and configures OpenStack Nova (Compute).' -project_page 'https://launchpad.net/puppet-nova' -source 'https://github.com/stackforge/puppet-nova' - -dependency 'dprince/qpid', '>= 1.0.0 <2.0.0' -dependency 'duritong/sysctl', '>=0.0.1 <1.0.0' -dependency 'puppetlabs/cinder', '>=4.0.0 <5.0.0' -dependency 'puppetlabs/glance', '>=4.0.0 <5.0.0' -dependency 'puppetlabs/inifile', '>=1.0.0 <2.0.0' -dependency 'puppetlabs/keystone', '>=4.0.0 <5.0.0' -dependency 'puppetlabs/rabbitmq', '>=2.0.2 <4.0.0' -dependency 'puppetlabs/stdlib', '>=4.0.0 <5.0.0' -dependency 'stackforge/openstacklib', '>=5.0.0' diff --git a/nova/manifests/compute/libvirt.pp b/nova/manifests/compute/libvirt.pp index c4ddd28a4..c853c740a 100644 --- a/nova/manifests/compute/libvirt.pp +++ b/nova/manifests/compute/libvirt.pp @@ -65,7 +65,6 @@ # (optional) libvirt service name. # Defaults to $::nova::params::libvirt_service_name # - class nova::compute::libvirt ( $libvirt_virt_type = 'kvm', $vncserver_listen = '127.0.0.1', @@ -76,10 +75,10 @@ $remove_unused_kernels = undef, $remove_unused_resized_minimum_age_seconds = undef, $remove_unused_original_minimum_age_seconds = undef, - $libvirt_service_name = undef, + $libvirt_service_name = $::nova::params::libvirt_service_name, # DEPRECATED PARAMETER $libvirt_type = false -) { +) inherits nova::params { include nova::params @@ -137,16 +136,10 @@ name => $::nova::params::libvirt_package_name, } - if $libvirt_service_name { - $libvirt_service_name_real=$libvirt_service_name - } else { - $libvirt_service_name_real=$::nova::params::libvirt_service_name - } - service { 'libvirt' : ensure => running, enable => true, - name => $libvirt_service_name_real, + name => $libvirt_service_name, provider => $::nova::params::special_service_provider, require => Package['libvirt'], } diff --git a/nova/manifests/keystone/auth.pp b/nova/manifests/keystone/auth.pp index d72cf1d1e..4650c680a 100644 --- a/nova/manifests/keystone/auth.pp +++ b/nova/manifests/keystone/auth.pp @@ -71,6 +71,14 @@ # (optional) Whether to create the v3 endpoint. # Defaults to true # +# [*configure_user*] +# (optional) Whether to create the service user. +# Defaults to true +# +# [*configure_user_role*] +# (optional) Whether to configure the admin role for the service user. +# Defaults to true +# # [*cinder*] # (optional) Deprecated and has no effect # Defaults to undef @@ -105,6 +113,8 @@ $public_protocol = 'http', $configure_endpoint = true, $configure_endpoint_v3 = true, + $configure_user = true, + $configure_user_role = true, $admin_protocol = 'http', $internal_protocol = 'http' ) { @@ -127,16 +137,22 @@ Keystone_endpoint["${region}/${real_service_name}"] ~> Service <| name == 'nova-api' |> - keystone_user { $auth_name: - ensure => present, - password => $password, - email => $email, - tenant => $tenant, + if $configure_user { + keystone_user { $auth_name: + ensure => present, + password => $password, + email => $email, + tenant => $tenant, + } } - keystone_user_role { "${auth_name}@${tenant}": - ensure => present, - roles => 'admin', + + if $configure_user_role { + keystone_user_role { "${auth_name}@${tenant}": + ensure => present, + roles => 'admin', + } } + keystone_service { $real_service_name: ensure => present, type => 'compute', diff --git a/nova/manifests/migration/libvirt.pp b/nova/manifests/migration/libvirt.pp index 8abeaba69..6ee441f42 100644 --- a/nova/manifests/migration/libvirt.pp +++ b/nova/manifests/migration/libvirt.pp @@ -30,9 +30,10 @@ } file_line { '/etc/sysconfig/libvirtd libvirtd args': - path => '/etc/sysconfig/libvirtd', - line => 'LIBVIRTD_ARGS="--listen"', - match => 'LIBVIRTD_ARGS=', + path => '/etc/sysconfig/libvirtd', + line => 'LIBVIRTD_ARGS="--listen"', + match => 'LIBVIRTD_ARGS=', + notify => Service['libvirt'], } Package['libvirt'] -> File_line<| path == '/etc/sysconfig/libvirtd' |> @@ -59,14 +60,14 @@ match => 'auth_tcp =', notify => Service['libvirt'], } - - file_line { '/etc/default/libvirt-bin libvirtd opts': - path => '/etc/default/libvirt-bin', - line => 'libvirtd_opts="-d -l"', - match => 'libvirtd_opts=', + file_line { "/etc/default/${::nova::compute::libvirt::libvirt_service_name} libvirtd opts": + path => "/etc/default/${::nova::compute::libvirt::libvirt_service_name}", + line => 'libvirtd_opts="-d -l"', + match => 'libvirtd_opts=', + notify => Service['libvirt'], } - Package['libvirt'] -> File_line<| path == '/etc/default/libvirt-bin' |> + Package['libvirt'] -> File_line<| path == "/etc/default/${::nova::compute::libvirt::libvirt_service_name}" |> } default: { diff --git a/nova/manifests/params.pp b/nova/manifests/params.pp index e62f58d52..e5255e181 100644 --- a/nova/manifests/params.pp +++ b/nova/manifests/params.pp @@ -41,7 +41,7 @@ $root_helper = 'sudo nova-rootwrap' $lock_path = '/var/lib/nova/tmp' case $::operatingsystem { - 'Fedora', 'RedHat': { + 'Fedora': { $special_service_provider = undef } 'RedHat', 'CentOS', 'Scientific': { diff --git a/nova/metadata.json b/nova/metadata.json new file mode 100644 index 000000000..9e7e50b78 --- /dev/null +++ b/nova/metadata.json @@ -0,0 +1,44 @@ +{ + "name": "stackforge-nova", + "version": "5.0.0", + "author": "Puppet Labs and StackForge Contributors", + "summary": "Puppet module for OpenStack Nova", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-nova.git", + "project_page": "https://launchpad.net/puppet-nova", + "issues_url": "https://bugs.launchpad.net/puppet-nova", + "requirements": [ + { "name": "pe","version_requirement": "3.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": ["7"] + }, + { + "operatingsystem": "Fedora", + "operatingsystemrelease": ["20"] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": ["6.5","7"] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": ["12.04","14.04"] + } + ], + "description": "Installs and configures OpenStack Nova (Compute).", + "dependencies": [ + { "name": "dprince/qpid", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "duritong/sysctl", "version_requirement": ">=0.0.1 <1.0.0" }, + { "name": "stackforge/cinder", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "stackforge/glance", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "puppetlabs/rabbitmq", "version_requirement": ">=2.0.2 <4.0.0" }, + { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, + { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0" } + ] +} diff --git a/nova/spec/classes/nova_compute_libvirt_spec.rb b/nova/spec/classes/nova_compute_libvirt_spec.rb index bb84b2e5d..bdf7362d7 100644 --- a/nova/spec/classes/nova_compute_libvirt_spec.rb +++ b/nova/spec/classes/nova_compute_libvirt_spec.rb @@ -94,6 +94,7 @@ it { should contain_class('nova::migration::libvirt')} it { should contain_nova_config('DEFAULT/vncserver_listen').with_value('0.0.0.0')} + it { should contain_file_line('/etc/default/libvirt-bin libvirtd opts').with(:line => 'libvirtd_opts="-d -l"') } end context 'with vncserver_listen not set to 0.0.0.0' do @@ -105,6 +106,16 @@ it { expect { should contain_class('nova::compute::libvirt') }.to \ raise_error(Puppet::Error, /For migration support to work, you MUST set vncserver_listen to '0.0.0.0'/) } end + + context 'with custom libvirt service name on Debian plateforms' do + let :params do + { :libvirt_service_name => 'libvirtd', + :vncserver_listen => '0.0.0.0', + :migration_support => true } + end + it { should contain_file_line('/etc/default/libvirtd libvirtd opts').with(:line => 'libvirtd_opts="-d -l"') } + + end end end diff --git a/nova/spec/classes/nova_keystone_endpoint_spec.rb b/nova/spec/classes/nova_keystone_endpoint_spec.rb index 6beb46cd9..7b515e4f0 100644 --- a/nova/spec/classes/nova_keystone_endpoint_spec.rb +++ b/nova/spec/classes/nova_keystone_endpoint_spec.rb @@ -137,6 +137,42 @@ it { should_not contain_keystone_endpoint('RegionOne/nova_ec2') } end + describe 'when disabling user configuration' do + before do + params.merge!( :configure_user => false ) + end + + it { should_not contain_keystone_user('nova') } + + it { should contain_keystone_user_role('nova@services') } + + it { should contain_keystone_service('nova').with( + :ensure => 'present', + :type => 'compute', + :description => 'Openstack Compute Service' + )} + end + + describe 'when disabling user and user role configuration' do + let :params do + { + :configure_user => false, + :configure_user_role => false, + :password => 'nova_password' + } + end + + it { should_not contain_keystone_user('nova') } + + it { should_not contain_keystone_user_role('nova@services') } + + it { should contain_keystone_service('nova').with( + :ensure => 'present', + :type => 'compute', + :description => 'Openstack Compute Service' + )} + end + describe 'when configuring nova-api and the keystone endpoint' do let :pre_condition do "class { 'nova::api': admin_password => 'test' } diff --git a/openstacklib/.fixtures.yml b/openstacklib/.fixtures.yml index d33ced52d..4ae8866e8 100644 --- a/openstacklib/.fixtures.yml +++ b/openstacklib/.fixtures.yml @@ -1,5 +1,6 @@ fixtures: repositories: + aviator: git://github.com/aimonb/puppet_aviator.git mysql: git://github.com/puppetlabs/puppetlabs-mysql.git stdlib: git://github.com/puppetlabs/puppetlabs-stdlib.git symlinks: diff --git a/openstacklib/.gitignore b/openstacklib/.gitignore index e2f8e3382..8a0be9a46 100644 --- a/openstacklib/.gitignore +++ b/openstacklib/.gitignore @@ -1,4 +1,5 @@ *.swp -spec/fixtures/ +spec/fixtures/* +!spec/fixtures/vcr/ pkg Gemfile.lock diff --git a/openstacklib/Gemfile b/openstacklib/Gemfile index d965fa900..330d9ba56 100644 --- a/openstacklib/Gemfile +++ b/openstacklib/Gemfile @@ -4,9 +4,11 @@ group :development, :test do gem 'puppetlabs_spec_helper', :require => false gem 'puppet-lint', '~> 0.3.2' gem 'rake', '10.1.1' - gem 'rspec', '< 2.99' + gem 'rspec' + gem 'mocha' gem 'json' - gem 'webmock' + gem 'faraday', '0.8.8', :require => false + gem 'vcr', :require => false end if puppetversion = ENV['PUPPET_GEM_VERSION'] diff --git a/openstacklib/Modulefile b/openstacklib/Modulefile deleted file mode 100644 index 7c73b3ec2..000000000 --- a/openstacklib/Modulefile +++ /dev/null @@ -1,11 +0,0 @@ -name 'puppetlabs-openstacklib' -version '5.0.0-devel' -source 'https://github.com/stackforge/puppet-openstacklib' -author 'Puppet Labs' -license 'Apache License 2.0' -summary 'Puppet Labs OpenStackLib Module' -description 'Puppet module library to expose common functionality between OpenStack modules' -project_page 'https://launchpad.net/puppet-openstacklib' - -dependency 'puppetlabs/mysql', '>=2.2.0 <3.0.0' -dependency 'puppetlabs/stdlib', '>=3.2.0' diff --git a/openstacklib/README.md b/openstacklib/README.md index fee750957..79fc5d69a 100644 --- a/openstacklib/README.md +++ b/openstacklib/README.md @@ -119,6 +119,61 @@ array or string; optional; default to undef Privileges given to the database user; string or array of strings; optional; default to 'ALL' +### Types and Providers + +#### Aviator + +#####`Puppet::add_aviator_params` + +The aviator type is not a real type, but it serves to simulate a mixin model, +whereby other types can call out to the Puppet::add\_aviator\_params method in +order to add aviator-specific parameters to themselves. Currently this adds the +auth parameter to the given type. The method must be called after the type is +declared, e.g.: + +```puppet +require 'puppet/type/aviator' +Puppet::Type.newtype(:my_type) do +# ... +end +Puppet::add_aviator_params(:my_type) +``` + +#####`Puppet::Provider::Aviator` + +The aviator provider is a parent provider intended to serve as a base for other +providers that need to authenticate against keystone in order to accomplish a +task. + +**`Puppet::Provider::Aviator#authenticate`** + +Either creates an authenticated session or sets up an unauthenticated session +with instance variables initialized with a token to inject into the next request. +It takes as arguments a set of authentication parameters as a hash and a path +to a log file. Puppet::Provider::Aviator#authencate looks for five different +possible methods of authenticating, in the following order: + +1) Username and password credentials in the auth parameters +2) The path to an openrc file containing credentials to read in the auth + parameters +3) A service token in the auth parameters +4) Environment variables set for the environment in which Puppet is running +5) A service token in /etc/keystone/keystone.conf. This option provides + backwards compatibility with earlier keystone providers. + +If the provider has password credentials, it can create an authenticated +session. If it only has a service token, it initializes an unauthenciated +session and a hash of session data that can be injected into a future request. + +**`Puppet::Provider::Aviator#make_request`** + +After creating a session, the make\_request method provides an interface that +providers can use to make requests without worrying about whether they have an +authenticated or unauthenticated session. It takes as arguments the +Aviator::Service it is making a request at (for example, keystone), a symbol for +the request (for example, :list\_tenants), and optionally a block to execute +that will set parameters for an update request. + Implementation -------------- diff --git a/openstacklib/lib/puppet/provider/aviator.rb b/openstacklib/lib/puppet/provider/aviator.rb new file mode 100644 index 000000000..8de1bf201 --- /dev/null +++ b/openstacklib/lib/puppet/provider/aviator.rb @@ -0,0 +1,297 @@ +require 'puppet' +require 'puppet/feature/aviator' +require 'puppet/util/inifile' + +class Puppet::Provider::Aviator < Puppet::Provider + + def session + @session ||= authenticate(resource[:auth], resource[:log_file]) + end + + def self.session + @session ||= authenticate(nil, nil) + end + + def request(service, request, &block) + self.class.make_request(service, request, session_data, &block) + end + + def self.request(service, request, &block) + self.make_request(service, request, session_data, &block) + end + + # needed for tests + def session_data + @session_data + end + + def self.session_data + @session_data + end + + def session_data=(data) + @session_data=data + end + + def self.session_data=(data) + @session_data=data + end + + private + + # Attempt to find credentials in this order: + # 1. username,password,tenant,host set in type parameters + # 2. openrc file path set in type parameters + # 3. service token and host set in type parameters + # 4. username,password,tenant,host set in environment variables + # 5. service token and host set in keystone.conf (backwards compatible version) + def authenticate(auth_params, log_file) + auth_params ||= {} + if password_credentials_set?(auth_params) + @session = get_authenticated_session(auth_params, log_file) + + elsif openrc_set?(auth_params) + credentials = get_credentials_from_openrc(auth_params['openrc']) + @session = get_authenticated_session(credentials, log_file) + + elsif service_credentials_set?(auth_params) + session_hash = get_unauthenticated_session(auth_params, log_file) + @session_data = session_hash[:data] + @session = session_hash[:session] + + elsif env_vars_set? + credentials = get_credentials_from_env + @session = get_authenticated_session(credentials, log_file) + + else # Last effort: try to get the token from keystone.conf + session_hash = self.class.try_auth_with_token(keystone_file, log_file) + @session_data = session_hash[:data] + @session = session_hash[:session] + end + end + + def self.authenticate(auth_params, log_file) + auth_params = {} unless auth_params + if env_vars_set? + credentials = get_credentials_from_env + @session = get_authenticated_session(credentials, log_file) + + else # Last effort: try to get the token from keystone.conf + session_hash = try_auth_with_token(keystone_file, log_file) + @session_data = session_hash[:data] + @session = session_hash[:session] + end + end + + + def self.try_auth_with_token(conf_file, log_file) + service_token = get_admin_token_from_keystone_file(conf_file) + auth_url = get_auth_url_from_keystone_file(conf_file) + session_hash = {} + if service_token + credentials = { + 'service_token' => service_token, + 'host_uri' => auth_url, + } + session_hash = get_unauthenticated_session(credentials, log_file) + else # All authentication efforts failed + raise(Puppet::Error, 'No credentials provided.') + end + end + + + def self.make_request(service, request, session_data, &block) + response = nil + if service && service.default_session_data + response = service.request(request, :endpoint_type => 'admin') do |params| + yield(params) if block + end + elsif session_data + response = service.request(request, :endpoint_type => 'admin', + :session_data => session_data) do |params| + yield(params) if block + end + else + raise(Puppet::Error, 'Cannot make a request with no session data.') + end + if response.body.hash['error'] + raise(Puppet::Error, "Error making request: #{response.body.hash['error']['code']} #{response.body.hash['error']['title']}") + end + response + end + + + def password_credentials_set?(auth_params) + auth_params['username'] && auth_params['password'] && auth_params['tenant_name'] && auth_params['host_uri'] + end + + + def openrc_set?(auth_params) + auth_params['openrc'] + end + + + def service_credentials_set?(auth_params) + auth_params['service_token'] && auth_params['host_uri'] + end + + + def self.env_vars_set? + ENV['OS_USERNAME'] && ENV['OS_PASSWORD'] && ENV['OS_TENANT_NAME'] && ENV['OS_AUTH_URL'] + end + + + def env_vars_set? + self.class.env_vars_set? + end + + + def get_credentials_from_openrc(file) + creds = {} + begin + File.open(file).readlines.delete_if{|l| l=~ /^#/}.each do |line| + key, value = line.split('=') + key = key.split(' ').last + value = value.chomp.gsub(/'/, '') + creds[key] = value + end + return creds + rescue Exception => error + return {} + end + end + + + def self.get_credentials_from_env + ENV.to_hash.dup.delete_if { |key, _| ! (key =~ /^OS/) } # Ruby 1.8.7 + end + + def get_credentials_from_env + self.class.get_credentials_from_env + end + + + def self.keystone_file + keystone_file = Puppet::Util::IniConfig::File.new + keystone_file.read('/etc/keystone/keystone.conf') + keystone_file + end + + def keystone_file + return @keystone_file if @keystone_file + @keystone_file = Puppet::Util::IniConfig::File.new + @keystone_file.read('/etc/keystone/keystone.conf') + @keystone_file + end + + + def self.get_admin_token_from_keystone_file(conf_file) + if conf_file and conf_file['DEFAULT'] and conf_file['DEFAULT']['admin_token'] + return "#{conf_file['DEFAULT']['admin_token'].strip}" + else + return nil + end + end + + def get_admin_token_from_keystone_file + conf_file = keystone_file + self.class.get_admin_token_from_keystone_file(conf_file) + end + + + def self.get_auth_url_from_keystone_file(conf_file) + if conf_file + if conf_file['DEFAULT'] + if conf_file['DEFAULT']['admin_endpoint'] + auth_url = conf_file['DEFAULT']['admin_endpoint'].strip + return versioned_endpoint(auth_url) + end + + if conf_file['DEFAULT']['admin_port'] + admin_port = conf_file['DEFAULT']['admin_port'].strip + else + admin_port = '35357' + end + + if conf_file['DEFAULT']['admin_bind_host'] + host = conf_file['DEFAULT']['admin_bind_host'].strip + if host == "0.0.0.0" + host = "127.0.0.1" + end + else + host = "127.0.0.1" + end + end + + if conf_file['ssl'] && conf_file['ssl']['enable'] && conf_file['ssl']['enable'].strip.downcase == 'true' + protocol = 'https' + else + protocol = 'http' + end + end + + "#{protocol}://#{host}:#{admin_port}/v2.0/" + end + + def get_auth_url_from_keystone_file + self.class.get_auth_url_from_keystone_file(keystone_file) + end + + + def self.make_configuration(credentials) + host_uri = versioned_endpoint(credentials['host_uri'] || credentials['OS_AUTH_URL'], credentials['api_version']) + { + :provider => 'openstack', + :auth_service => { + :name => 'identity', + :host_uri => host_uri, + :request => 'create_token', + :validator => 'list_tenants', + }, + :auth_credentials => { + :username => credentials['username'] || credentials['OS_USERNAME'], + :password => credentials['password'] || credentials['OS_PASSWORD'], + :tenant_name => credentials['tenant_name'] || credentials['OS_TENANT_NAME'] + } + } + end + + + def self.get_authenticated_session(credentials, log_file) + configuration = make_configuration(credentials) + session = ::Aviator::Session.new(:config => configuration, :log_file => log_file) + session.authenticate + session + end + + def get_authenticated_session(credentials, log_file) + self.class.get_authenticated_session(credentials, log_file) + end + + + def self.get_unauthenticated_session(credentials, log_file) + configuration = { + :provider => 'openstack', + } + session_data = { + :base_url => credentials['host_uri'], + :service_token => credentials['service_token'] + } + session = ::Aviator::Session.new(:config => configuration, :log_file => log_file) + { :session => session, :data => session_data } + end + + def get_unauthenticated_session(credentials, log_file) + self.class.get_unauthenticated_session(credentials, log_file) + end + + + def self.versioned_endpoint(endpoint, version = 'v2.0') + version = 'v2.0' if version.nil? + if endpoint =~ /\/#{version}\/?$/ || endpoint =~ /\/v2.0\/?$/ || endpoint =~ /\/v3\/?$/ + endpoint + else + "#{endpoint.chomp('/')}/#{version}" + end + end +end diff --git a/openstacklib/lib/puppet/util/aviator.rb b/openstacklib/lib/puppet/util/aviator.rb new file mode 100644 index 000000000..bc24b025d --- /dev/null +++ b/openstacklib/lib/puppet/util/aviator.rb @@ -0,0 +1,46 @@ +# Add the auth parameter to whatever type is given +module Puppet::Util::Aviator + def self.add_aviator_params(type) + + type.newparam(:auth) do + + desc < { + 'username' => 'test', + 'password' => 'passw0rd', + 'tenant_name' => 'test', + 'host_uri' => 'http://localhost:35357/v2.0', +} + +or a path to an openrc file containing these credentials, e.g.: + +auth => { + 'openrc' => '/root/openrc', +} + +or a service token and host, e.g.: + +auth => { + 'service_token' => 'ADMIN', + 'host_uri' => 'http://localhost:35357/v2.0', +} + +If not present, the provider will first look for environment variables +for password credentials and then to /etc/keystone/keystone.conf for a +service token. +EOT + + validate do |value| + raise(Puppet::Error, 'This property must be a hash') unless value.is_a?(Hash) + end + end + + type.newparam(:log_file) do + desc 'Log file. Defaults to no logging.' + defaultto('/dev/null') + end + end +end diff --git a/openstacklib/manifests/db/mysql.pp b/openstacklib/manifests/db/mysql.pp index ac504e4ae..2f01625ca 100644 --- a/openstacklib/manifests/db/mysql.pp +++ b/openstacklib/manifests/db/mysql.pp @@ -43,7 +43,7 @@ $host = '127.0.0.1', $charset = 'utf8', $collate = 'utf8_unicode_ci', - $allowed_hosts = undef, + $allowed_hosts = [], $privileges = 'ALL', ) { @@ -56,35 +56,13 @@ require => [ Class['mysql::server'], Class['mysql::client'] ], } - mysql_user { "${user}@${host}": - ensure => present, - password_hash => $password_hash, - require => Class['mysql::server'], - } - - mysql_grant { "${user}@${host}/${dbname}.*": - privileges => $privileges, - user => "${user}@${host}", - table => "${dbname}.*", - require => [Mysql_database[$dbname], Mysql_user["${user}@${host}"], Class['mysql::server'] ], - } + $allowed_hosts_list = unique(concat(any2array($allowed_hosts), [$host])) + $real_allowed_hosts = prefix($allowed_hosts_list, "${dbname}_") - # Check allowed_hosts to avoid duplicate resource declarations - if is_array($allowed_hosts) and delete($allowed_hosts,$host) != [] { - $real_allowed_hosts = delete($allowed_hosts,$host) - $unique_real_allowed_hosts = prefix($real_allowed_hosts, "${dbname}_") - } elsif is_string($allowed_hosts) and ($allowed_hosts != $host) { - $real_allowed_hosts = $allowed_hosts - $unique_real_allowed_hosts = "${dbname}_${real_allowed_hosts}" - } - - if $real_allowed_hosts { - openstacklib::db::mysql::host_access { $unique_real_allowed_hosts: - user => $user, - password_hash => $password_hash, - database => $dbname, - privileges => $privileges, - } + openstacklib::db::mysql::host_access { $real_allowed_hosts: + user => $user, + password_hash => $password_hash, + database => $dbname, + privileges => $privileges, } - } diff --git a/openstacklib/manifests/policy.pp b/openstacklib/manifests/policy.pp new file mode 100644 index 000000000..b11142757 --- /dev/null +++ b/openstacklib/manifests/policy.pp @@ -0,0 +1,19 @@ +# == Class: openstacklib::policies +# +# This resource is an helper to call the policy definition +# +# == Parameters: +# +# [*policies*] +# Hash of policies one would like to set to specific values +# hash; optional +# +class openstacklib::policy ( + $policies = {}, +) { + + validate_hash($policies) + + create_resources('openstacklib::policy::base', $policies) + +} diff --git a/openstacklib/manifests/policy/base.pp b/openstacklib/manifests/policy/base.pp new file mode 100644 index 000000000..48925ddbe --- /dev/null +++ b/openstacklib/manifests/policy/base.pp @@ -0,0 +1,31 @@ +# == Definition: openstacklib::policy::base +# +# This resource configures the policy.json file for an OpenStack service +# +# == Parameters: +# +# [*file_path*] +# Path to the policy.json file +# string; required +# +# [*key*] +# The key to replace the value for +# string; required; the key to replace the value for +# +# [*value*] +# The value to set +# string; optional; the value to set +# +define openstacklib::policy::base ( + $file_path, + $key, + $value = '', +) { + + augeas { "${file_path}-${key}-${value}" : + lens => 'Json.lns', + incl => $file_path, + changes => "set dict/entry[*][.=\"${key}\"]/string ${value}" + } + +} diff --git a/openstacklib/metadata.json b/openstacklib/metadata.json new file mode 100644 index 000000000..42b743e65 --- /dev/null +++ b/openstacklib/metadata.json @@ -0,0 +1,38 @@ +{ + "name": "stackforge-openstacklib", + "version": "5.0.0", + "author": "Puppet Labs and StackForge Contributors", + "summary": "Puppet OpenStack Libraries", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-openstacklib.git", + "project_page": "https://launchpad.net/puppet-openstacklib", + "issues_url": "https://bugs.launchpad.net/puppet-openstacklib", + "requirements": [ + { "name": "pe","version_requirement": "3.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": ["7"] + }, + { + "operatingsystem": "Fedora", + "operatingsystemrelease": ["20"] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": ["6.5","7"] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": ["12.04","14.04"] + } + ], + "description": "Puppet module library to expose common functionality between OpenStack modules.", + "dependencies": [ + { "name": "aimonb/aviator", "version_requirement": ">=0.4.2 <1.0.0" }, + { "name": "puppetlabs/mysql", "version_requirement": ">=2.2.0 <3.0.0" }, + { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" } + ] +} diff --git a/openstacklib/spec/classes/openstacklib_policy_spec.rb b/openstacklib/spec/classes/openstacklib_policy_spec.rb new file mode 100644 index 000000000..9d6927c52 --- /dev/null +++ b/openstacklib/spec/classes/openstacklib_policy_spec.rb @@ -0,0 +1,25 @@ +require 'spec_helper' + +describe 'openstacklib::policy' do + + let :params do + { + :policies => { + 'foo' => { + 'file_path' => '/etc/nova/policy.json', + 'key' => 'context_is_admin', + 'value' => 'foo:bar' + } + } + } + end + + it 'configures the proper policy' do + should contain_openstacklib__policy__base('foo').with( + :file_path => '/etc/nova/policy.json', + :key => 'context_is_admin', + :value => 'foo:bar' + ) + end + +end diff --git a/openstacklib/spec/defines/openstacklib_db_mysql_host_access_spec.rb b/openstacklib/spec/defines/openstacklib_db_mysql_host_access_spec.rb new file mode 100644 index 000000000..8f47bfa13 --- /dev/null +++ b/openstacklib/spec/defines/openstacklib_db_mysql_host_access_spec.rb @@ -0,0 +1,50 @@ +require 'spec_helper' + +describe 'openstacklib::db::mysql::host_access' do + + let :pre_condition do + "include mysql::server\n" + + "openstacklib::db::mysql { 'nova':\n" + + " password_hash => 'AA1420F182E88B9E5F874F6FBE7459291E8F4601'}" + end + + shared_examples 'openstacklib::db::mysql::host_access examples' do + + context 'with required parameters' do + let (:title) { 'nova_10.0.0.1' } + let :params do + { :user => 'foobar', + :password_hash => 'AA1420F182E88B9E5F874F6FBE7459291E8F4601', + :database => 'nova', + :privileges => 'ALL' } + end + + it { should contain_mysql_user("#{params[:user]}@10.0.0.1").with( + :password_hash => params[:password_hash] + )} + + it { should contain_mysql_grant("#{params[:user]}@10.0.0.1/#{params[:database]}.*").with( + :user => "#{params[:user]}@10.0.0.1", + :privileges => 'ALL', + :table => "#{params[:database]}.*" + )} + end + + end + + context 'on a Debian osfamily' do + let :facts do + { :osfamily => "Debian" } + end + + include_examples 'openstacklib::db::mysql::host_access examples' + end + + context 'on a RedHat osfamily' do + let :facts do + { :osfamily => 'RedHat' } + end + + include_examples 'openstacklib::db::mysql::host_access examples' + end +end diff --git a/openstacklib/spec/defines/openstacklib_db_mysql_spec.rb b/openstacklib/spec/defines/openstacklib_db_mysql_spec.rb index 9f4edfb0b..91852a56e 100644 --- a/openstacklib/spec/defines/openstacklib_db_mysql_spec.rb +++ b/openstacklib/spec/defines/openstacklib_db_mysql_spec.rb @@ -6,17 +6,13 @@ 'include mysql::server' end - password_hash = 'AA1420F182E88B9E5F874F6FBE7459291E8F4601' + let (:title) { 'nova' } + let :required_params do - { :password_hash => password_hash } + { :password_hash => 'AA1420F182E88B9E5F874F6FBE7459291E8F4601' } end - title = 'nova' - let (:title) { title } - context 'on a Debian osfamily' do - let :facts do - { :osfamily => "Debian" } - end + shared_examples 'openstacklib::db::mysql examples' do context 'with only required parameters' do let :params do @@ -27,17 +23,46 @@ :charset => 'utf8', :collate => 'utf8_unicode_ci' )} - it { should contain_mysql_user("#{title}@127.0.0.1").with( - :password_hash => password_hash + it { should contain_openstacklib__db__mysql__host_access("#{title}_127.0.0.1").with( + :user => title, + :database => title, + :privileges => 'ALL' + )} + end + + context 'with overriding dbname parameter' do + let :params do + { :dbname => 'foobar' }.merge(required_params) + end + + it { should contain_mysql_database(params[:dbname]).with( + :charset => 'utf8', + :collate => 'utf8_unicode_ci' + )} + it { should contain_openstacklib__db__mysql__host_access("#{params[:dbname]}_127.0.0.1").with( + :user => title, + :database => params[:dbname], + :privileges => 'ALL' + )} + end + + context 'with overriding user parameter' do + let :params do + { :user => 'foobar' }.merge(required_params) + end + + it { should contain_mysql_database(title).with( + :charset => 'utf8', + :collate => 'utf8_unicode_ci' )} - it { should contain_mysql_grant("#{title}@127.0.0.1/#{title}.*").with( - :user => "#{title}@127.0.0.1", - :privileges => 'ALL', - :table => "#{title}.*" + it { should contain_openstacklib__db__mysql__host_access("#{title}_127.0.0.1").with( + :user => params[:user], + :database => title, + :privileges => 'ALL' )} end - context 'when overriding charset' do + context 'when overriding charset parameter' do let :params do { :charset => 'latin1' }.merge(required_params) end @@ -77,153 +102,62 @@ it { should contain_service('keystone').that_requires("Openstacklib::Db::Mysql[keystone]") } end - context "overriding allowed_hosts param to array" do + context "overriding allowed_hosts parameter with array value" do let :params do { :allowed_hosts => ['127.0.0.1','%'] }.merge(required_params) end - it {should_not contain_openstacklib__db__mysql__host_access("#{title}_127.0.0.1").with( + it {should contain_openstacklib__db__mysql__host_access("#{title}_127.0.0.1").with( :user => title, - :password_hash => password_hash, + :password_hash => params[:password_hash], :database => title )} it {should contain_openstacklib__db__mysql__host_access("#{title}_%").with( :user => title, - :password_hash => password_hash, + :password_hash => params[:password_hash], :database => title )} end - context "overriding allowed_hosts param to string" do + context "overriding allowed_hosts parameter with string value" do let :params do - { - :password_hash => password_hash, - :allowed_hosts => '192.168.1.1' - } + { :allowed_hosts => '192.168.1.1' }.merge(required_params) end it {should contain_openstacklib__db__mysql__host_access("#{title}_192.168.1.1").with( :user => title, - :password_hash => password_hash, + :password_hash => params[:password_hash], :database => title )} end - context "overriding allowed_hosts param equals to host param " do + context "overriding allowed_hosts parameter equals to host param " do let :params do - { - :password_hash => password_hash, - :allowed_hosts => '127.0.0.1' - } + { :allowed_hosts => '127.0.0.1' }.merge(required_params) end - it {should_not contain_openstacklib__db__mysql__host_access("#{title}_127.0.0.1").with( + it {should contain_openstacklib__db__mysql__host_access("#{title}_127.0.0.1").with( :user => title, - :password_hash => password_hash, + :password_hash => params[:password_hash], :database => title )} end + end - context 'on a RedHat osfamily' do + context 'on a Debian osfamily' do let :facts do - { :osfamily => 'RedHat' } - end - - context 'with only required parameters' do - let :params do - required_params - end - - it { should contain_mysql_database(title).with( - :charset => 'utf8', - :collate => 'utf8_unicode_ci' - )} - it { should contain_mysql_user("#{title}@127.0.0.1").with( - :password_hash => password_hash - )} - it { should contain_mysql_grant("#{title}@127.0.0.1/#{title}.*").with( - :user => "#{title}@127.0.0.1", - :privileges => 'ALL', - :table => "#{title}.*" - )} - end - - context 'when overriding charset' do - let :params do - { :charset => 'latin1' }.merge(required_params) - end - - it { should contain_mysql_database(title).with_charset(params[:charset]) } - end - - context 'when omitting the required parameter password' do - let :params do - required_params.delete(:password) - end - it { expect { should raise_error(Puppet::Error) } } - end - - context 'when notifying other resources' do - let(:pre_condition) { 'exec {"nova-db-sync":}' } - let(:params) { { :notify => 'Exec[nova-db-sync]'}.merge(required_params) } - - it { should contain_exec('nova-db-sync').that_subscribes_to("Openstacklib::Db::Mysql[#{title}]") } - end - - context 'when required for other openstack services' do - let(:pre_condition) { 'service {"keystone":}' } - let(:title) { 'keystone' } - let(:params) { { :before => 'Service[keystone]'}.merge(required_params) } - - it { should contain_service('keystone').that_requires("Openstacklib::Db::Mysql[keystone]") } - end - - context "overriding allowed_hosts param to array" do - let :params do - { :allowed_hosts => ['127.0.0.1','%'] }.merge(required_params) - end - - it {should_not contain_openstacklib__db__mysql__host_access("#{title}_127.0.0.1").with( - :user => title, - :password_hash => password_hash, - :database => title - )} - it {should contain_openstacklib__db__mysql__host_access("#{title}_%").with( - :user => title, - :password_hash => password_hash, - :database => title - )} + { :osfamily => "Debian" } end - context "overriding allowed_hosts param to string" do - let :params do - { - :password_hash => password_hash, - :allowed_hosts => '192.168.1.1' - } - end + include_examples 'openstacklib::db::mysql examples' + end - it {should contain_openstacklib__db__mysql__host_access("#{title}_192.168.1.1").with( - :user => title, - :password_hash => password_hash, - :database => title - )} + context 'on a RedHat osfamily' do + let :facts do + { :osfamily => 'RedHat' } end - context "overriding allowed_hosts param equals to host param " do - let :params do - { - :password_hash => password_hash, - :allowed_hosts => '127.0.0.1' - } - end - - it {should_not contain_openstacklib__db__mysql__host_access("#{title}_127.0.0.1").with( - :user => title, - :password_hash => password_hash, - :database => title - )} - end + include_examples 'openstacklib::db::mysql examples' end end diff --git a/openstacklib/spec/defines/openstacklib_policy_spec.rb b/openstacklib/spec/defines/openstacklib_policy_spec.rb new file mode 100644 index 000000000..8a0423441 --- /dev/null +++ b/openstacklib/spec/defines/openstacklib_policy_spec.rb @@ -0,0 +1,23 @@ +require 'spec_helper' + +describe 'openstacklib::policy::base' do + + let :title do + 'nova-contest_is_admin' + end + + let :params do + {:file_path => '/etc/nova/policy.json', + :key => 'context_is_admin', + :value => 'foo:bar'} + end + + it 'configures the proper policy' do + should contain_augeas('/etc/nova/policy.json-context_is_admin-foo:bar').with( + 'lens' => 'Json.lns', + 'incl' => '/etc/nova/policy.json', + 'changes' => 'set dict/entry[*][.="context_is_admin"]/string foo:bar' + ) + end + +end diff --git a/openstacklib/spec/fixtures/vcr/aviator/request/with_session.yml b/openstacklib/spec/fixtures/vcr/aviator/request/with_session.yml new file mode 100644 index 000000000..2d052df25 --- /dev/null +++ b/openstacklib/spec/fixtures/vcr/aviator/request/with_session.yml @@ -0,0 +1,67 @@ +--- + http_interactions: + - request: + method: post + uri: "http://192.168.11.4:35357/v2.0/tokens" + body: + encoding: UTF-8 + string: "{\x22auth\x22:{\x22passwordCredentials\x22:{\x22username\x22:\x22admin\x22,\x22password\x22:\x22fyby-tet\x22},\x22tenantName\x22:\x22admin\x22}}" + headers: + Content-Type: + - application/json + User-Agent: + - "Faraday v0.8.8" + response: + status: + code: 200 + message: + headers: + vary: + - X-Auth-Token + content-type: + - application/json + content-length: + - "9780" + date: + - "Tue, 30 Sep 2014 06:59:48 GMT" + connection: + - close + body: + encoding: UTF-8 + string: "{\x22access\x22: {\x22token\x22: {\x22issued_at\x22: \x222014-09-30T06:59:48.338940\x22, \x22expires\x22: \x222014-09-30T07:59:48Z\x22, \x22id\x22: \x22MIIRIAYJKoZIhvcNAQcCoIIRETCCEQ0CAQExCTAHBgUrDgMCGjCCD3YGCSqGSIb3DQEHAaCCD2cEgg9jeyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAiMjAxNC0wOS0zMFQwNjo1OTo0OC4zMzg5NDAiLCAiZXhwaXJlcyI6ICIyMDE0LTA5LTMwVDA3OjU5OjQ4WiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7ImRlc2NyaXB0aW9uIjogImFkbWluIHRlbmFudCIsICJlbmFibGVkIjogdHJ1ZSwgImlkIjogImM1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgIm5hbWUiOiAiYWRtaW4ifX0sICJzZXJ2aWNlQ2F0YWxvZyI6IFt7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc0L3YyL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzQvdjIvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiMWJiNzU4NWIzMzgxNGI4Mjk4NzJlYjQ0MjAyMTg5OGEiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3NC92Mi9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJjb21wdXRlIiwgIm5hbWUiOiAibm92YSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5Njk2LyIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5Njk2LyIsICJpZCI6ICJhYTdkNDU2NTNhYjI0ZGY2YmE5ZDE4NGE1ZWRkNGYxMyIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE5Mi4xNjguMTEuNDo5Njk2LyJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJuZXR3b3JrIiwgIm5hbWUiOiAibmV1dHJvbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc2L3YyL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzYvdjIvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiMmJmNGQ5YzZiYTgwNDM1M2JjZGNlZGZjNTAxMDNiYjYiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3Ni92Mi9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWV2MiIsICJuYW1lIjogImNpbmRlcnYyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzQvdjMiLCAicmVnaW9uIjogIm9wZW5zdGFjayIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODc3NC92MyIsICJpZCI6ICIzZmQ4NDBjYmFkZDM0NmFiOTE2YjA2YWYxZjRlNWJmMCIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE5Mi4xNjguMTEuNDo4Nzc0L3YzIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImNvbXB1dGV2MyIsICJuYW1lIjogIm5vdmF2MyJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5MjkyIiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40OjkyOTIiLCAiaWQiOiAiMTJjOTM1NzkwYmM5NGE3ODljNzJmOWJiYjIxZjM5YmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6OTI5MiJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJpbWFnZSIsICJuYW1lIjogImdsYW5jZSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc3IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzciLCAiaWQiOiAiMWNlNDkwMWQ4OTE1NDIzZTk2ZGFiM2ZlMWZiMzY2M2MiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3NyJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJtZXRlcmluZyIsICJuYW1lIjogImNlaWxvbWV0ZXIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwMC92MS8iLCAicmVnaW9uIjogIm9wZW5zdGFjayIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwMC92MS8iLCAiaWQiOiAiNWU4ZjExYzk3ZTgwNDNiNWJkZTA1YmVhMmRlNDYxNmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODAwMC92MS8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY2xvdWRmb3JtYXRpb24iLCAibmFtZSI6ICJoZWF0LWNmbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc2L3YxL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzYvdjEvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiNTRiOWVjMjhiYzYwNDI1MmIwMGNmZTZlMGU0NGFhOTQiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3Ni92MS9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWUiLCAibmFtZSI6ICJjaW5kZXIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODc3My9zZXJ2aWNlcy9BZG1pbiIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4NzczL3NlcnZpY2VzL0Nsb3VkIiwgImlkIjogIjA3NWVjNzliMGJlMzQxYmFhYmUyZTliMTIwNTkxZjQ4IiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTkyLjE2OC4xMS40Ojg3NzMvc2VydmljZXMvQ2xvdWQifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiZWMyIiwgIm5hbWUiOiAibm92YV9lYzIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwNC92MS9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4MDA0L3YxL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgImlkIjogIjI2MmM0YjM3MzY3ODQ2OGE5ODU1YTNlYmM2MDE1OTYwIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTkyLjE2OC4xMS40OjgwMDQvdjEvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAib3JjaGVzdHJhdGlvbiIsICJuYW1lIjogImhlYXQifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6MzUzNTcvdjIuMCIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo1MDAwL3YyLjAiLCAiaWQiOiAiMDg5ZmRiMjIyNDk1NDZiOTlhMWU1N2FlYzBiMWU3NmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6NTAwMC92Mi4wIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImlkZW50aXR5IiwgIm5hbWUiOiAia2V5c3RvbmUifV0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJhZG1pbiIsICJyb2xlc19saW5rcyI6IFtdLCAiaWQiOiAiM2Y3NmI5NDY2NzQzNGNmM2JkYzMyM2NmMDIxYzUwZjgiLCAicm9sZXMiOiBbeyJuYW1lIjogImFkbWluIn1dLCAibmFtZSI6ICJhZG1pbiJ9LCAibWV0YWRhdGEiOiB7ImlzX2FkbWluIjogMCwgInJvbGVzIjogWyIxZThiYzVkMWYzYTQ0OGU5YTJhMWNkYTU4ZDk3ZjkyYiJdfX19MYIBgTCCAX0CAQEwXDBXMQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVW5zZXQxDjAMBgNVBAcMBVVuc2V0MQ4wDAYDVQQKDAVVbnNldDEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tAgEBMAcGBSsOAwIaMA0GCSqGSIb3DQEBAQUABIIBAHGQ0NFb0OcE74KIU9DmmvgVyYCrNwwWrwG1CObr9111AHfEr+bn6YfX1ePRUhB2KpcuBPLeIfM-RlLHNwpLzYtvKIwdj0TxIecbF9PuTkWMEZ9Kxl+KE8F4dJOnv0XnAiWZ8QzrMZOo4d+owLJmNNLE1TKfGqv8ughdcrjHtUicHT2E0AOfO3ylEhJPsazUl8XIIWQ4sMWTrs0ROMiZnWPWbomYb49LIaREHD6nDfZX+EDZbHSfPVLTYVL-+qkiIH52-lXqz-OKPCn+Lt3RzXYDzapZd8cpzVgJpTuq2YKMZ+H06yvHFCTZNN49j6kZHz0Qkn2MjbwU8sH10wA7W6k=\x22, \x22tenant\x22: {\x22description\x22: \x22admin tenant\x22, \x22enabled\x22: true, \x22id\x22: \x22c518b36fa220499b85ba9a71014ce2a5\x22, \x22name\x22: \x22admin\x22}}, \x22serviceCatalog\x22: [{\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8774/v2/c518b36fa220499b85ba9a71014ce2a5\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8774/v2/c518b36fa220499b85ba9a71014ce2a5\x22, \x22id\x22: \x221bb7585b33814b829872eb442021898a\x22, \x22publicURL\x22: \x22http://192.168.11.4:8774/v2/c518b36fa220499b85ba9a71014ce2a5\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22compute\x22, \x22name\x22: \x22nova\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:9696/\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:9696/\x22, \x22id\x22: \x22aa7d45653ab24df6ba9d184a5edd4f13\x22, \x22publicURL\x22: \x22http://192.168.11.4:9696/\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22network\x22, \x22name\x22: \x22neutron\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8776/v2/c518b36fa220499b85ba9a71014ce2a5\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8776/v2/c518b36fa220499b85ba9a71014ce2a5\x22, \x22id\x22: \x222bf4d9c6ba804353bcdcedfc50103bb6\x22, \x22publicURL\x22: \x22http://192.168.11.4:8776/v2/c518b36fa220499b85ba9a71014ce2a5\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22volumev2\x22, \x22name\x22: \x22cinderv2\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8774/v3\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8774/v3\x22, \x22id\x22: \x223fd840cbadd346ab916b06af1f4e5bf0\x22, \x22publicURL\x22: \x22http://192.168.11.4:8774/v3\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22computev3\x22, \x22name\x22: \x22novav3\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:9292\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:9292\x22, \x22id\x22: \x2212c935790bc94a789c72f9bbb21f39bc\x22, \x22publicURL\x22: \x22http://192.168.11.4:9292\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22image\x22, \x22name\x22: \x22glance\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8777\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8777\x22, \x22id\x22: \x221ce4901d8915423e96dab3fe1fb3663c\x22, \x22publicURL\x22: \x22http://192.168.11.4:8777\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22metering\x22, \x22name\x22: \x22ceilometer\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8000/v1/\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8000/v1/\x22, \x22id\x22: \x225e8f11c97e8043b5bde05bea2de4616c\x22, \x22publicURL\x22: \x22http://192.168.11.4:8000/v1/\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22cloudformation\x22, \x22name\x22: \x22heat-cfn\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8776/v1/c518b36fa220499b85ba9a71014ce2a5\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8776/v1/c518b36fa220499b85ba9a71014ce2a5\x22, \x22id\x22: \x2254b9ec28bc604252b00cfe6e0e44aa94\x22, \x22publicURL\x22: \x22http://192.168.11.4:8776/v1/c518b36fa220499b85ba9a71014ce2a5\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22volume\x22, \x22name\x22: \x22cinder\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8773/services/Admin\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8773/services/Cloud\x22, \x22id\x22: \x22075ec79b0be341baabe2e9b120591f48\x22, \x22publicURL\x22: \x22http://192.168.11.4:8773/services/Cloud\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22ec2\x22, \x22name\x22: \x22nova_ec2\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8004/v1/c518b36fa220499b85ba9a71014ce2a5\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8004/v1/c518b36fa220499b85ba9a71014ce2a5\x22, \x22id\x22: \x22262c4b373678468a9855a3ebc6015960\x22, \x22publicURL\x22: \x22http://192.168.11.4:8004/v1/c518b36fa220499b85ba9a71014ce2a5\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22orchestration\x22, \x22name\x22: \x22heat\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:35357/v2.0\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:5000/v2.0\x22, \x22id\x22: \x22089fdb22249546b99a1e57aec0b1e76c\x22, \x22publicURL\x22: \x22http://192.168.11.4:5000/v2.0\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22identity\x22, \x22name\x22: \x22keystone\x22}], \x22user\x22: {\x22username\x22: \x22admin\x22, \x22roles_links\x22: [], \x22id\x22: \x223f76b94667434cf3bdc323cf021c50f8\x22, \x22roles\x22: [{\x22name\x22: \x22admin\x22}], \x22name\x22: \x22admin\x22}, \x22metadata\x22: {\x22is_admin\x22: 0, \x22roles\x22: [\x221e8bc5d1f3a448e9a2a1cda58d97f92b\x22]}}}" + http_version: + recorded_at: "Tue, 30 Sep 2014 06:59:48 GMT" + - request: + method: get + uri: "http://172.16.33.4:35357/v2.0/tenants" + body: + encoding: US-ASCII + string: "" + headers: + Content-Type: + - application/json + User-Agent: + - "Faraday v0.8.8" + X-Auth-Token: + - "MIIRIAYJKoZIhvcNAQcCoIIRETCCEQ0CAQExCTAHBgUrDgMCGjCCD3YGCSqGSIb3DQEHAaCCD2cEgg9jeyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAiMjAxNC0wOS0zMFQwNjo1OTo0OC4zMzg5NDAiLCAiZXhwaXJlcyI6ICIyMDE0LTA5LTMwVDA3OjU5OjQ4WiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7ImRlc2NyaXB0aW9uIjogImFkbWluIHRlbmFudCIsICJlbmFibGVkIjogdHJ1ZSwgImlkIjogImM1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgIm5hbWUiOiAiYWRtaW4ifX0sICJzZXJ2aWNlQ2F0YWxvZyI6IFt7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc0L3YyL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzQvdjIvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiMWJiNzU4NWIzMzgxNGI4Mjk4NzJlYjQ0MjAyMTg5OGEiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3NC92Mi9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJjb21wdXRlIiwgIm5hbWUiOiAibm92YSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5Njk2LyIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5Njk2LyIsICJpZCI6ICJhYTdkNDU2NTNhYjI0ZGY2YmE5ZDE4NGE1ZWRkNGYxMyIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE5Mi4xNjguMTEuNDo5Njk2LyJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJuZXR3b3JrIiwgIm5hbWUiOiAibmV1dHJvbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc2L3YyL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzYvdjIvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiMmJmNGQ5YzZiYTgwNDM1M2JjZGNlZGZjNTAxMDNiYjYiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3Ni92Mi9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWV2MiIsICJuYW1lIjogImNpbmRlcnYyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzQvdjMiLCAicmVnaW9uIjogIm9wZW5zdGFjayIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODc3NC92MyIsICJpZCI6ICIzZmQ4NDBjYmFkZDM0NmFiOTE2YjA2YWYxZjRlNWJmMCIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE5Mi4xNjguMTEuNDo4Nzc0L3YzIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImNvbXB1dGV2MyIsICJuYW1lIjogIm5vdmF2MyJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5MjkyIiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40OjkyOTIiLCAiaWQiOiAiMTJjOTM1NzkwYmM5NGE3ODljNzJmOWJiYjIxZjM5YmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6OTI5MiJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJpbWFnZSIsICJuYW1lIjogImdsYW5jZSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc3IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzciLCAiaWQiOiAiMWNlNDkwMWQ4OTE1NDIzZTk2ZGFiM2ZlMWZiMzY2M2MiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3NyJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJtZXRlcmluZyIsICJuYW1lIjogImNlaWxvbWV0ZXIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwMC92MS8iLCAicmVnaW9uIjogIm9wZW5zdGFjayIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwMC92MS8iLCAiaWQiOiAiNWU4ZjExYzk3ZTgwNDNiNWJkZTA1YmVhMmRlNDYxNmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODAwMC92MS8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY2xvdWRmb3JtYXRpb24iLCAibmFtZSI6ICJoZWF0LWNmbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc2L3YxL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzYvdjEvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiNTRiOWVjMjhiYzYwNDI1MmIwMGNmZTZlMGU0NGFhOTQiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3Ni92MS9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWUiLCAibmFtZSI6ICJjaW5kZXIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODc3My9zZXJ2aWNlcy9BZG1pbiIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4NzczL3NlcnZpY2VzL0Nsb3VkIiwgImlkIjogIjA3NWVjNzliMGJlMzQxYmFhYmUyZTliMTIwNTkxZjQ4IiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTkyLjE2OC4xMS40Ojg3NzMvc2VydmljZXMvQ2xvdWQifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiZWMyIiwgIm5hbWUiOiAibm92YV9lYzIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwNC92MS9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4MDA0L3YxL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgImlkIjogIjI2MmM0YjM3MzY3ODQ2OGE5ODU1YTNlYmM2MDE1OTYwIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTkyLjE2OC4xMS40OjgwMDQvdjEvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAib3JjaGVzdHJhdGlvbiIsICJuYW1lIjogImhlYXQifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6MzUzNTcvdjIuMCIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo1MDAwL3YyLjAiLCAiaWQiOiAiMDg5ZmRiMjIyNDk1NDZiOTlhMWU1N2FlYzBiMWU3NmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6NTAwMC92Mi4wIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImlkZW50aXR5IiwgIm5hbWUiOiAia2V5c3RvbmUifV0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJhZG1pbiIsICJyb2xlc19saW5rcyI6IFtdLCAiaWQiOiAiM2Y3NmI5NDY2NzQzNGNmM2JkYzMyM2NmMDIxYzUwZjgiLCAicm9sZXMiOiBbeyJuYW1lIjogImFkbWluIn1dLCAibmFtZSI6ICJhZG1pbiJ9LCAibWV0YWRhdGEiOiB7ImlzX2FkbWluIjogMCwgInJvbGVzIjogWyIxZThiYzVkMWYzYTQ0OGU5YTJhMWNkYTU4ZDk3ZjkyYiJdfX19MYIBgTCCAX0CAQEwXDBXMQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVW5zZXQxDjAMBgNVBAcMBVVuc2V0MQ4wDAYDVQQKDAVVbnNldDEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tAgEBMAcGBSsOAwIaMA0GCSqGSIb3DQEBAQUABIIBAHGQ0NFb0OcE74KIU9DmmvgVyYCrNwwWrwG1CObr9111AHfEr+bn6YfX1ePRUhB2KpcuBPLeIfM-RlLHNwpLzYtvKIwdj0TxIecbF9PuTkWMEZ9Kxl+KE8F4dJOnv0XnAiWZ8QzrMZOo4d+owLJmNNLE1TKfGqv8ughdcrjHtUicHT2E0AOfO3ylEhJPsazUl8XIIWQ4sMWTrs0ROMiZnWPWbomYb49LIaREHD6nDfZX+EDZbHSfPVLTYVL-+qkiIH52-lXqz-OKPCn+Lt3RzXYDzapZd8cpzVgJpTuq2YKMZ+H06yvHFCTZNN49j6kZHz0Qkn2MjbwU8sH10wA7W6k=" + response: + status: + code: 200 + message: + headers: + vary: + - X-Auth-Token + content-type: + - application/json + content-length: + - "491" + date: + - "Tue, 30 Sep 2014 06:59:48 GMT" + connection: + - close + body: + encoding: UTF-8 + string: "{\x22tenants_links\x22: [], \x22tenants\x22: [{\x22description\x22: \x22Test tenant\x22, \x22enabled\x22: true, \x22id\x22: \x2234e463e2bab24f78990ca864e4a28ba2\x22, \x22name\x22: \x22test2\x22}, {\x22description\x22: \x22Tenant for the openstack services\x22, \x22enabled\x22: true, \x22id\x22: \x2268c8fcf77aff4b409cc158c0f6cbff7b\x22, \x22name\x22: \x22services\x22}, {\x22description\x22: \x22Test tenant\x22, \x22enabled\x22: true, \x22id\x22: \x22c330f1bc663648df9c1e7835a1e7a955\x22, \x22name\x22: \x22test\x22}, {\x22description\x22: \x22admin tenant\x22, \x22enabled\x22: true, \x22id\x22: \x22c518b36fa220499b85ba9a71014ce2a5\x22, \x22name\x22: \x22admin\x22}]}" + http_version: + recorded_at: "Tue, 30 Sep 2014 06:59:48 GMT" + recorded_with: "VCR 2.9.3" \ No newline at end of file diff --git a/openstacklib/spec/fixtures/vcr/aviator/request/without_session.yml b/openstacklib/spec/fixtures/vcr/aviator/request/without_session.yml new file mode 100644 index 000000000..784fb2fea --- /dev/null +++ b/openstacklib/spec/fixtures/vcr/aviator/request/without_session.yml @@ -0,0 +1,36 @@ +--- + http_interactions: + - request: + method: get + uri: "http://192.168.11.4:35357/v2.0/tenants" + body: + encoding: US-ASCII + string: "" + headers: + Content-Type: + - application/json + User-Agent: + - "Faraday v0.8.8" + X-Auth-Token: + - sosp-kyl + response: + status: + code: 200 + message: + headers: + vary: + - X-Auth-Token + content-type: + - application/json + content-length: + - "491" + date: + - "Tue, 30 Sep 2014 06:59:48 GMT" + connection: + - close + body: + encoding: UTF-8 + string: "{\x22tenants_links\x22: [], \x22tenants\x22: [{\x22description\x22: \x22Test tenant\x22, \x22enabled\x22: true, \x22id\x22: \x2234e463e2bab24f78990ca864e4a28ba2\x22, \x22name\x22: \x22test2\x22}, {\x22description\x22: \x22Tenant for the openstack services\x22, \x22enabled\x22: true, \x22id\x22: \x2268c8fcf77aff4b409cc158c0f6cbff7b\x22, \x22name\x22: \x22services\x22}, {\x22description\x22: \x22Test tenant\x22, \x22enabled\x22: true, \x22id\x22: \x22c330f1bc663648df9c1e7835a1e7a955\x22, \x22name\x22: \x22test\x22}, {\x22description\x22: \x22admin tenant\x22, \x22enabled\x22: true, \x22id\x22: \x22c518b36fa220499b85ba9a71014ce2a5\x22, \x22name\x22: \x22admin\x22}]}" + http_version: + recorded_at: "Tue, 30 Sep 2014 06:59:48 GMT" + recorded_with: "VCR 2.9.3" \ No newline at end of file diff --git a/openstacklib/spec/fixtures/vcr/aviator/session/with_password.yml b/openstacklib/spec/fixtures/vcr/aviator/session/with_password.yml new file mode 100644 index 000000000..3faa47433 --- /dev/null +++ b/openstacklib/spec/fixtures/vcr/aviator/session/with_password.yml @@ -0,0 +1,67 @@ +--- + http_interactions: + - request: + method: post + uri: "http://192.168.11.4:35357/v2.0/tokens" + body: + encoding: UTF-8 + string: "{\x22auth\x22:{\x22passwordCredentials\x22:{\x22username\x22:\x22admin\x22,\x22password\x22:\x22fyby-tet\x22},\x22tenantName\x22:\x22admin\x22}}" + headers: + Content-Type: + - application/json + User-Agent: + - "Faraday v0.8.8" + response: + status: + code: 200 + message: + headers: + vary: + - X-Auth-Token + content-type: + - application/json + content-length: + - "9780" + date: + - "Tue, 30 Sep 2014 07:16:15 GMT" + connection: + - close + body: + encoding: UTF-8 + string: "{\x22access\x22: {\x22token\x22: {\x22issued_at\x22: \x222014-09-30T07:16:15.042778\x22, \x22expires\x22: \x222014-09-30T08:16:15Z\x22, \x22id\x22: \x22MIIRIAYJKoZIhvcNAQcCoIIRETCCEQ0CAQExCTAHBgUrDgMCGjCCD3YGCSqGSIb3DQEHAaCCD2cEgg9jeyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAiMjAxNC0wOS0zMFQwNzoxNjoxNS4wNDI3NzgiLCAiZXhwaXJlcyI6ICIyMDE0LTA5LTMwVDA4OjE2OjE1WiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7ImRlc2NyaXB0aW9uIjogImFkbWluIHRlbmFudCIsICJlbmFibGVkIjogdHJ1ZSwgImlkIjogImM1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgIm5hbWUiOiAiYWRtaW4ifX0sICJzZXJ2aWNlQ2F0YWxvZyI6IFt7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc0L3YyL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzQvdjIvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiMWJiNzU4NWIzMzgxNGI4Mjk4NzJlYjQ0MjAyMTg5OGEiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3NC92Mi9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJjb21wdXRlIiwgIm5hbWUiOiAibm92YSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5Njk2LyIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5Njk2LyIsICJpZCI6ICJhYTdkNDU2NTNhYjI0ZGY2YmE5ZDE4NGE1ZWRkNGYxMyIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE5Mi4xNjguMTEuNDo5Njk2LyJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJuZXR3b3JrIiwgIm5hbWUiOiAibmV1dHJvbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc2L3YyL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzYvdjIvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiMmJmNGQ5YzZiYTgwNDM1M2JjZGNlZGZjNTAxMDNiYjYiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3Ni92Mi9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWV2MiIsICJuYW1lIjogImNpbmRlcnYyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzQvdjMiLCAicmVnaW9uIjogIm9wZW5zdGFjayIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODc3NC92MyIsICJpZCI6ICIzZmQ4NDBjYmFkZDM0NmFiOTE2YjA2YWYxZjRlNWJmMCIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE5Mi4xNjguMTEuNDo4Nzc0L3YzIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImNvbXB1dGV2MyIsICJuYW1lIjogIm5vdmF2MyJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5MjkyIiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40OjkyOTIiLCAiaWQiOiAiMTJjOTM1NzkwYmM5NGE3ODljNzJmOWJiYjIxZjM5YmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6OTI5MiJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJpbWFnZSIsICJuYW1lIjogImdsYW5jZSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc3IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzciLCAiaWQiOiAiMWNlNDkwMWQ4OTE1NDIzZTk2ZGFiM2ZlMWZiMzY2M2MiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3NyJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJtZXRlcmluZyIsICJuYW1lIjogImNlaWxvbWV0ZXIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwMC92MS8iLCAicmVnaW9uIjogIm9wZW5zdGFjayIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwMC92MS8iLCAiaWQiOiAiNWU4ZjExYzk3ZTgwNDNiNWJkZTA1YmVhMmRlNDYxNmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODAwMC92MS8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY2xvdWRmb3JtYXRpb24iLCAibmFtZSI6ICJoZWF0LWNmbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc2L3YxL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzYvdjEvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiNTRiOWVjMjhiYzYwNDI1MmIwMGNmZTZlMGU0NGFhOTQiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3Ni92MS9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWUiLCAibmFtZSI6ICJjaW5kZXIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODc3My9zZXJ2aWNlcy9BZG1pbiIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4NzczL3NlcnZpY2VzL0Nsb3VkIiwgImlkIjogIjA3NWVjNzliMGJlMzQxYmFhYmUyZTliMTIwNTkxZjQ4IiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTkyLjE2OC4xMS40Ojg3NzMvc2VydmljZXMvQ2xvdWQifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiZWMyIiwgIm5hbWUiOiAibm92YV9lYzIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwNC92MS9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4MDA0L3YxL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgImlkIjogIjI2MmM0YjM3MzY3ODQ2OGE5ODU1YTNlYmM2MDE1OTYwIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTkyLjE2OC4xMS40OjgwMDQvdjEvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAib3JjaGVzdHJhdGlvbiIsICJuYW1lIjogImhlYXQifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6MzUzNTcvdjIuMCIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo1MDAwL3YyLjAiLCAiaWQiOiAiMDg5ZmRiMjIyNDk1NDZiOTlhMWU1N2FlYzBiMWU3NmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6NTAwMC92Mi4wIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImlkZW50aXR5IiwgIm5hbWUiOiAia2V5c3RvbmUifV0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJhZG1pbiIsICJyb2xlc19saW5rcyI6IFtdLCAiaWQiOiAiM2Y3NmI5NDY2NzQzNGNmM2JkYzMyM2NmMDIxYzUwZjgiLCAicm9sZXMiOiBbeyJuYW1lIjogImFkbWluIn1dLCAibmFtZSI6ICJhZG1pbiJ9LCAibWV0YWRhdGEiOiB7ImlzX2FkbWluIjogMCwgInJvbGVzIjogWyIxZThiYzVkMWYzYTQ0OGU5YTJhMWNkYTU4ZDk3ZjkyYiJdfX19MYIBgTCCAX0CAQEwXDBXMQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVW5zZXQxDjAMBgNVBAcMBVVuc2V0MQ4wDAYDVQQKDAVVbnNldDEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tAgEBMAcGBSsOAwIaMA0GCSqGSIb3DQEBAQUABIIBAEOAl3MRmSUB+J+kRi+qRhwOrNRmj-wDqT5nJTlbafOjofSXsHG683LaipA7oPoH-ARUPDxXIZfevRue7bQQB3I4cWIUSItIPyW4xDpF+iHy3QOm+I-3v3ctze2Z3Rp0TRaYFsItTZZETsSXI28yBt9+3Dsk3a7Vv10HAZetbV1i6qu6avLcJsmN-1J3KLwCDSEvpMgDAcpzAnhba1fi+X8GrCCTz4c1uIcPfsHxX4g8gNkB4-VT0lkRmfSxdrGeRz0uN12oDqgCL64IV1mJ6Bi9Unh15QFcLwU0F8ote+joG9G29fw3WDzneXHIeEIZSBXmOAX2kQBOnZa2sZg8gls=\x22, \x22tenant\x22: {\x22description\x22: \x22admin tenant\x22, \x22enabled\x22: true, \x22id\x22: \x22c518b36fa220499b85ba9a71014ce2a5\x22, \x22name\x22: \x22admin\x22}}, \x22serviceCatalog\x22: [{\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8774/v2/c518b36fa220499b85ba9a71014ce2a5\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8774/v2/c518b36fa220499b85ba9a71014ce2a5\x22, \x22id\x22: \x221bb7585b33814b829872eb442021898a\x22, \x22publicURL\x22: \x22http://192.168.11.4:8774/v2/c518b36fa220499b85ba9a71014ce2a5\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22compute\x22, \x22name\x22: \x22nova\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:9696/\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:9696/\x22, \x22id\x22: \x22aa7d45653ab24df6ba9d184a5edd4f13\x22, \x22publicURL\x22: \x22http://192.168.11.4:9696/\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22network\x22, \x22name\x22: \x22neutron\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8776/v2/c518b36fa220499b85ba9a71014ce2a5\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8776/v2/c518b36fa220499b85ba9a71014ce2a5\x22, \x22id\x22: \x222bf4d9c6ba804353bcdcedfc50103bb6\x22, \x22publicURL\x22: \x22http://192.168.11.4:8776/v2/c518b36fa220499b85ba9a71014ce2a5\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22volumev2\x22, \x22name\x22: \x22cinderv2\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8774/v3\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8774/v3\x22, \x22id\x22: \x223fd840cbadd346ab916b06af1f4e5bf0\x22, \x22publicURL\x22: \x22http://192.168.11.4:8774/v3\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22computev3\x22, \x22name\x22: \x22novav3\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:9292\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:9292\x22, \x22id\x22: \x2212c935790bc94a789c72f9bbb21f39bc\x22, \x22publicURL\x22: \x22http://192.168.11.4:9292\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22image\x22, \x22name\x22: \x22glance\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8777\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8777\x22, \x22id\x22: \x221ce4901d8915423e96dab3fe1fb3663c\x22, \x22publicURL\x22: \x22http://192.168.11.4:8777\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22metering\x22, \x22name\x22: \x22ceilometer\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8000/v1/\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8000/v1/\x22, \x22id\x22: \x225e8f11c97e8043b5bde05bea2de4616c\x22, \x22publicURL\x22: \x22http://192.168.11.4:8000/v1/\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22cloudformation\x22, \x22name\x22: \x22heat-cfn\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8776/v1/c518b36fa220499b85ba9a71014ce2a5\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8776/v1/c518b36fa220499b85ba9a71014ce2a5\x22, \x22id\x22: \x2254b9ec28bc604252b00cfe6e0e44aa94\x22, \x22publicURL\x22: \x22http://192.168.11.4:8776/v1/c518b36fa220499b85ba9a71014ce2a5\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22volume\x22, \x22name\x22: \x22cinder\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8773/services/Admin\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8773/services/Cloud\x22, \x22id\x22: \x22075ec79b0be341baabe2e9b120591f48\x22, \x22publicURL\x22: \x22http://192.168.11.4:8773/services/Cloud\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22ec2\x22, \x22name\x22: \x22nova_ec2\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:8004/v1/c518b36fa220499b85ba9a71014ce2a5\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:8004/v1/c518b36fa220499b85ba9a71014ce2a5\x22, \x22id\x22: \x22262c4b373678468a9855a3ebc6015960\x22, \x22publicURL\x22: \x22http://192.168.11.4:8004/v1/c518b36fa220499b85ba9a71014ce2a5\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22orchestration\x22, \x22name\x22: \x22heat\x22}, {\x22endpoints\x22: [{\x22adminURL\x22: \x22http://172.16.33.4:35357/v2.0\x22, \x22region\x22: \x22openstack\x22, \x22internalURL\x22: \x22http://172.16.33.4:5000/v2.0\x22, \x22id\x22: \x22089fdb22249546b99a1e57aec0b1e76c\x22, \x22publicURL\x22: \x22http://192.168.11.4:5000/v2.0\x22}], \x22endpoints_links\x22: [], \x22type\x22: \x22identity\x22, \x22name\x22: \x22keystone\x22}], \x22user\x22: {\x22username\x22: \x22admin\x22, \x22roles_links\x22: [], \x22id\x22: \x223f76b94667434cf3bdc323cf021c50f8\x22, \x22roles\x22: [{\x22name\x22: \x22admin\x22}], \x22name\x22: \x22admin\x22}, \x22metadata\x22: {\x22is_admin\x22: 0, \x22roles\x22: [\x221e8bc5d1f3a448e9a2a1cda58d97f92b\x22]}}}" + http_version: + recorded_at: "Tue, 30 Sep 2014 07:16:15 GMT" + - request: + method: get + uri: "http://192.168.11.4:5000/v2.0/tenants" + body: + encoding: US-ASCII + string: "" + headers: + Content-Type: + - application/json + User-Agent: + - "Faraday v0.8.8" + X-Auth-Token: + - "MIIRIAYJKoZIhvcNAQcCoIIRETCCEQ0CAQExCTAHBgUrDgMCGjCCD3YGCSqGSIb3DQEHAaCCD2cEgg9jeyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAiMjAxNC0wOS0zMFQwNzoxNjoxNS4wNDI3NzgiLCAiZXhwaXJlcyI6ICIyMDE0LTA5LTMwVDA4OjE2OjE1WiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7ImRlc2NyaXB0aW9uIjogImFkbWluIHRlbmFudCIsICJlbmFibGVkIjogdHJ1ZSwgImlkIjogImM1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgIm5hbWUiOiAiYWRtaW4ifX0sICJzZXJ2aWNlQ2F0YWxvZyI6IFt7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc0L3YyL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzQvdjIvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiMWJiNzU4NWIzMzgxNGI4Mjk4NzJlYjQ0MjAyMTg5OGEiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3NC92Mi9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJjb21wdXRlIiwgIm5hbWUiOiAibm92YSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5Njk2LyIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5Njk2LyIsICJpZCI6ICJhYTdkNDU2NTNhYjI0ZGY2YmE5ZDE4NGE1ZWRkNGYxMyIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE5Mi4xNjguMTEuNDo5Njk2LyJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJuZXR3b3JrIiwgIm5hbWUiOiAibmV1dHJvbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc2L3YyL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzYvdjIvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiMmJmNGQ5YzZiYTgwNDM1M2JjZGNlZGZjNTAxMDNiYjYiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3Ni92Mi9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWV2MiIsICJuYW1lIjogImNpbmRlcnYyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzQvdjMiLCAicmVnaW9uIjogIm9wZW5zdGFjayIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODc3NC92MyIsICJpZCI6ICIzZmQ4NDBjYmFkZDM0NmFiOTE2YjA2YWYxZjRlNWJmMCIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE5Mi4xNjguMTEuNDo4Nzc0L3YzIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImNvbXB1dGV2MyIsICJuYW1lIjogIm5vdmF2MyJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo5MjkyIiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40OjkyOTIiLCAiaWQiOiAiMTJjOTM1NzkwYmM5NGE3ODljNzJmOWJiYjIxZjM5YmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6OTI5MiJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJpbWFnZSIsICJuYW1lIjogImdsYW5jZSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc3IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzciLCAiaWQiOiAiMWNlNDkwMWQ4OTE1NDIzZTk2ZGFiM2ZlMWZiMzY2M2MiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3NyJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJtZXRlcmluZyIsICJuYW1lIjogImNlaWxvbWV0ZXIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwMC92MS8iLCAicmVnaW9uIjogIm9wZW5zdGFjayIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwMC92MS8iLCAiaWQiOiAiNWU4ZjExYzk3ZTgwNDNiNWJkZTA1YmVhMmRlNDYxNmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODAwMC92MS8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY2xvdWRmb3JtYXRpb24iLCAibmFtZSI6ICJoZWF0LWNmbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4Nzc2L3YxL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgInJlZ2lvbiI6ICJvcGVuc3RhY2siLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4xNi4zMy40Ojg3NzYvdjEvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUiLCAiaWQiOiAiNTRiOWVjMjhiYzYwNDI1MmIwMGNmZTZlMGU0NGFhOTQiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6ODc3Ni92MS9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWUiLCAibmFtZSI6ICJjaW5kZXIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODc3My9zZXJ2aWNlcy9BZG1pbiIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4NzczL3NlcnZpY2VzL0Nsb3VkIiwgImlkIjogIjA3NWVjNzliMGJlMzQxYmFhYmUyZTliMTIwNTkxZjQ4IiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTkyLjE2OC4xMS40Ojg3NzMvc2VydmljZXMvQ2xvdWQifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiZWMyIiwgIm5hbWUiOiAibm92YV9lYzIifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6ODAwNC92MS9jNTE4YjM2ZmEyMjA0OTliODViYTlhNzEwMTRjZTJhNSIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo4MDA0L3YxL2M1MThiMzZmYTIyMDQ5OWI4NWJhOWE3MTAxNGNlMmE1IiwgImlkIjogIjI2MmM0YjM3MzY3ODQ2OGE5ODU1YTNlYmM2MDE1OTYwIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTkyLjE2OC4xMS40OjgwMDQvdjEvYzUxOGIzNmZhMjIwNDk5Yjg1YmE5YTcxMDE0Y2UyYTUifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAib3JjaGVzdHJhdGlvbiIsICJuYW1lIjogImhlYXQifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjE2LjMzLjQ6MzUzNTcvdjIuMCIsICJyZWdpb24iOiAib3BlbnN0YWNrIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMTYuMzMuNDo1MDAwL3YyLjAiLCAiaWQiOiAiMDg5ZmRiMjIyNDk1NDZiOTlhMWU1N2FlYzBiMWU3NmMiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjExLjQ6NTAwMC92Mi4wIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImlkZW50aXR5IiwgIm5hbWUiOiAia2V5c3RvbmUifV0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJhZG1pbiIsICJyb2xlc19saW5rcyI6IFtdLCAiaWQiOiAiM2Y3NmI5NDY2NzQzNGNmM2JkYzMyM2NmMDIxYzUwZjgiLCAicm9sZXMiOiBbeyJuYW1lIjogImFkbWluIn1dLCAibmFtZSI6ICJhZG1pbiJ9LCAibWV0YWRhdGEiOiB7ImlzX2FkbWluIjogMCwgInJvbGVzIjogWyIxZThiYzVkMWYzYTQ0OGU5YTJhMWNkYTU4ZDk3ZjkyYiJdfX19MYIBgTCCAX0CAQEwXDBXMQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVW5zZXQxDjAMBgNVBAcMBVVuc2V0MQ4wDAYDVQQKDAVVbnNldDEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tAgEBMAcGBSsOAwIaMA0GCSqGSIb3DQEBAQUABIIBAEOAl3MRmSUB+J+kRi+qRhwOrNRmj-wDqT5nJTlbafOjofSXsHG683LaipA7oPoH-ARUPDxXIZfevRue7bQQB3I4cWIUSItIPyW4xDpF+iHy3QOm+I-3v3ctze2Z3Rp0TRaYFsItTZZETsSXI28yBt9+3Dsk3a7Vv10HAZetbV1i6qu6avLcJsmN-1J3KLwCDSEvpMgDAcpzAnhba1fi+X8GrCCTz4c1uIcPfsHxX4g8gNkB4-VT0lkRmfSxdrGeRz0uN12oDqgCL64IV1mJ6Bi9Unh15QFcLwU0F8ote+joG9G29fw3WDzneXHIeEIZSBXmOAX2kQBOnZa2sZg8gls=" + response: + status: + code: 200 + message: + headers: + vary: + - X-Auth-Token + content-type: + - application/json + content-length: + - "143" + date: + - "Tue, 30 Sep 2014 07:16:15 GMT" + connection: + - close + body: + encoding: UTF-8 + string: "{\x22tenants_links\x22: [], \x22tenants\x22: [{\x22description\x22: \x22admin tenant\x22, \x22enabled\x22: true, \x22id\x22: \x22c518b36fa220499b85ba9a71014ce2a5\x22, \x22name\x22: \x22admin\x22}]}" + http_version: + recorded_at: "Tue, 30 Sep 2014 07:16:15 GMT" + recorded_with: "VCR 2.9.3" \ No newline at end of file diff --git a/openstacklib/spec/fixtures/vcr/aviator/session/with_token.yml b/openstacklib/spec/fixtures/vcr/aviator/session/with_token.yml new file mode 100644 index 000000000..784fb2fea --- /dev/null +++ b/openstacklib/spec/fixtures/vcr/aviator/session/with_token.yml @@ -0,0 +1,36 @@ +--- + http_interactions: + - request: + method: get + uri: "http://192.168.11.4:35357/v2.0/tenants" + body: + encoding: US-ASCII + string: "" + headers: + Content-Type: + - application/json + User-Agent: + - "Faraday v0.8.8" + X-Auth-Token: + - sosp-kyl + response: + status: + code: 200 + message: + headers: + vary: + - X-Auth-Token + content-type: + - application/json + content-length: + - "491" + date: + - "Tue, 30 Sep 2014 06:59:48 GMT" + connection: + - close + body: + encoding: UTF-8 + string: "{\x22tenants_links\x22: [], \x22tenants\x22: [{\x22description\x22: \x22Test tenant\x22, \x22enabled\x22: true, \x22id\x22: \x2234e463e2bab24f78990ca864e4a28ba2\x22, \x22name\x22: \x22test2\x22}, {\x22description\x22: \x22Tenant for the openstack services\x22, \x22enabled\x22: true, \x22id\x22: \x2268c8fcf77aff4b409cc158c0f6cbff7b\x22, \x22name\x22: \x22services\x22}, {\x22description\x22: \x22Test tenant\x22, \x22enabled\x22: true, \x22id\x22: \x22c330f1bc663648df9c1e7835a1e7a955\x22, \x22name\x22: \x22test\x22}, {\x22description\x22: \x22admin tenant\x22, \x22enabled\x22: true, \x22id\x22: \x22c518b36fa220499b85ba9a71014ce2a5\x22, \x22name\x22: \x22admin\x22}]}" + http_version: + recorded_at: "Tue, 30 Sep 2014 06:59:48 GMT" + recorded_with: "VCR 2.9.3" \ No newline at end of file diff --git a/openstacklib/spec/spec_helper.rb b/openstacklib/spec/spec_helper.rb index 2c6f56649..ecd609ae7 100644 --- a/openstacklib/spec/spec_helper.rb +++ b/openstacklib/spec/spec_helper.rb @@ -1 +1,7 @@ require 'puppetlabs_spec_helper/module_spec_helper' +require 'vcr' + +VCR.configure do |c| + c.cassette_library_dir = 'spec/fixtures/vcr' + c.hook_into :faraday +end diff --git a/openstacklib/spec/unit/provider/aviator_spec.rb b/openstacklib/spec/unit/provider/aviator_spec.rb new file mode 100644 index 000000000..35564e64a --- /dev/null +++ b/openstacklib/spec/unit/provider/aviator_spec.rb @@ -0,0 +1,320 @@ +# Load libraries from aviator here to simulate how they live together in a real puppet run +$LOAD_PATH.push(File.join(File.dirname(__FILE__), '..', '..', 'fixtures', 'modules', 'aviator', 'lib')) +require 'puppet' +require 'vcr' +require 'spec_helper' +require 'puppet/provider/aviator' + + +describe Puppet::Provider::Aviator do + + before(:each) do + ENV['OS_USERNAME'] = nil + ENV['OS_PASSWORD'] = nil + ENV['OS_TENANT_NAME'] = nil + ENV['OS_AUTH_URL'] = nil + end + + let(:log_file) { '/tmp/aviator_spec.log' } + + let(:type) do + Puppet::Type.newtype(:test_resource) do + newparam(:name, :namevar => true) + newparam(:auth) + newparam(:log_file) + end + end + + + shared_examples 'creating a session using environment variables' do + it 'creates an authenticated session' do + ENV['OS_USERNAME'] = 'admin' + ENV['OS_PASSWORD'] = 'fyby-tet' + ENV['OS_TENANT_NAME'] = 'admin' + ENV['OS_AUTH_URL'] = 'http://192.168.11.4:35357/v2.0' + response = nil + VCR.use_cassette('aviator/session/with_password') do + session = provider.session + response = session.identity_service.request(:list_tenants, :session_data => provider.session_data) + end + expect(response.status).to eq(200) + end + end + + shared_examples 'creating a session using a service token from keystone.conf' do + it 'creates an unauthenticated session' do + data = "[DEFAULT]\nadmin_token=sosp-kyl\nadmin_endpoint=http://192.168.11.4:35357/v2.0" + response = nil + VCR.use_cassette('aviator/session/with_token') do + # Stubbing File.read produces inconsistent results because of how IniConfig + # overrides the File class in some versions of Puppet. + # Stubbing FileType.filetype(:flat) simplifies working with IniConfig + Puppet::Util::FileType.filetype(:flat).any_instance.expects(:read).returns(StringIO.new(data).read) + session = provider.session + Puppet::Util::FileType.filetype(:flat).any_instance.unstub(:read) + response = session.identity_service.request(:list_tenants, :session_data => provider.session_data) + end + + expect(response.status).to eq(200) + end + end + + shared_examples 'it has no credentials' do + it 'fails to authenticate' do + expect{ provider.session }.to raise_error(Puppet::Error, /No credentials provided/) + end + end + + shared_examples 'making request with an existing session' do + it 'makes a successful request' do + VCR.use_cassette('aviator/request/with_session') do + session = provider.session + response = provider.request(session.identity_service, :list_tenants) + expect(response.status).to eq(200) + end + end + end + + shared_examples 'making request with injected session data' do + it 'makes a successful request' do + VCR.use_cassette('aviator/request/without_session') do + session = provider.session + response = provider.request(session.identity_service, :list_tenants) + expect(response.status).to eq(200) + end + end + end + + shared_examples 'making request with no session or session data' do + it 'fails to make a request' do + expect{ provider.request(nil, :list_tenants) }.to raise_error(Puppet::Error, /Cannot make a request/) + end + end + + describe '#session' do + + context 'with valid password credentials in parameters' do + let(:resource_attrs) do + { + :name => 'stubresource', + :auth => { + 'username' => 'admin', + 'password' => 'fyby-tet', + 'tenant_name' => 'admin', + 'host_uri' => 'http://192.168.11.4:35357/v2.0', + } + } + end + + it 'creates a session' do + provider = Puppet::Provider::Aviator.new(type.new(resource_attrs)) + response = nil + VCR.use_cassette('aviator/session/with_password') do + session = provider.session + response = session.identity_service.request(:list_tenants) + end + expect(response.status).to eq(200) + end + end + + context 'with valid openrc file in parameters' do + data = "export OS_USERNAME='admin'\nexport OS_PASSWORD='fyby-tet'\nexport OS_TENANT_NAME='admin'\nexport OS_AUTH_URL='http://192.168.11.4:35357/v2.0'" + let(:resource_attrs) do + { + :name => 'stubresource', + :auth => { + 'openrc' => '/root/openrc' + } + } + end + + it 'creates a session' do + provider = Puppet::Provider::Aviator.new(type.new(resource_attrs)) + response = nil + VCR.use_cassette('aviator/session/with_password') do + File.expects(:open).with('/root/openrc').returns(StringIO.new(data)) + session = provider.session + File.unstub(:open) # Ignore File.open calls to cassette file + response = session.identity_service.request(:list_tenants) + end + expect(response.status).to eq(200) + end + end + + context 'with valid service token in parameters' do + let(:resource_attrs) do + { + :name => 'stubresource', + :auth => { + 'service_token' => 'sosp-kyl', + 'host_uri' => 'http://192.168.11.4:35357/v2.0' + } + } + end + + subject(:session) do + provider = Puppet::Provider::Aviator.new(type.new(resource_attrs)) + VCR.use_cassette('aviator/session/with_token') do + session = provider.session + response = session.identity_service.request(:list_tenants, :session_data => provider.session_data) + end + end + + it 'creates a session' do + expect(session.status).to eq(200) + end + + end + + context 'with valid password credentials in environment variables' do + it_behaves_like 'creating a session using environment variables' do + let(:resource_attrs) do + { + :name => 'stubresource', + } + end + let(:provider) do + Puppet::Provider::Aviator.new(type.new(resource_attrs)) + end + end + end + + context 'with valid service token in keystone.conf' do + it_behaves_like 'creating a session using a service token from keystone.conf' do + let(:resource_attrs) do + { + :name => 'stubresource', + } + end + let(:provider) do + Puppet::Provider::Aviator.new(type.new(resource_attrs)) + end + end + + end + + context 'with no valid credentials' do + it_behaves_like 'it has no credentials' do + let(:resource_attrs) do + { + :name => 'stubresource', + } + end + let(:provider) { Puppet::Provider::Aviator.new(type.new(resource_attrs)) } + end + end + + end + + + describe '::session' do + + context 'with valid password credentials in environment variables' do + it_behaves_like 'creating a session using environment variables' do + let(:provider) { Puppet::Provider::Aviator.dup } + end + end + + context 'with valid service token in keystone.conf' do + it_behaves_like 'creating a session using a service token from keystone.conf' do + let(:provider) { Puppet::Provider::Aviator.dup } + end + end + + context 'with no valid credentials' do + it_behaves_like 'it has no credentials' do + let(:provider) { Puppet::Provider::Aviator.dup } + end + end + end + + describe '#request' do + context 'when a session exists' do + it_behaves_like 'making request with an existing session' do + let(:resource_attrs) do + { + :name => 'stubresource', + :auth => { + 'username' => 'admin', + 'password' => 'fyby-tet', + 'tenant_name' => 'admin', + 'host_uri' => 'http://192.168.11.4:35357/v2.0', + } + } + end + let (:provider) { Puppet::Provider::Aviator.new(type.new(resource_attrs)) } + end + end + + context 'when injecting session data' do + let(:resource_attrs) do + { + :name => 'stubresource', + :auth => { + 'service_token' => 'sosp-kyl', + 'host_uri' => 'http://192.168.11.4:35357/v2.0' + } + } + end + let(:provider) { Puppet::Provider::Aviator.new(type.new(resource_attrs)) } + it 'makes a successful request' do + provider = Puppet::Provider::Aviator.new(type.new(resource_attrs)) + VCR.use_cassette('aviator/request/without_session') do + session = provider.session + response = provider.request(session.identity_service, :list_tenants) + expect(response.status).to eq(200) + end + end + end + + context 'when there is no session or session data' do + it_behaves_like 'making request with no session or session data' do + let(:resource_attrs) do + { + :name => 'stubresource', + } + end + let(:provider) {Puppet::Provider::Aviator.new(type.new(resource_attrs)) } + end + end + end + + describe '::request' do + context 'when a session exists' do + + it_behaves_like 'making request with an existing session' do + let(:provider) { provider = Puppet::Provider::Aviator.dup } + before(:each) do + ENV['OS_USERNAME'] = 'admin' + ENV['OS_PASSWORD'] = 'fyby-tet' + ENV['OS_TENANT_NAME'] = 'admin' + ENV['OS_AUTH_URL'] = 'http://192.168.11.4:35357/v2.0' + end + end + end + + context 'when injecting session data' do + let(:session_data) do + { + :base_url => 'http://192.168.11.4:35357/v2.0', + :service_token => 'sosp-kyl' + } + end + it 'makes a successful request' do + provider = Puppet::Provider::Aviator.dup + VCR.use_cassette('aviator/request/without_session') do + session = ::Aviator::Session.new(:config => { :provider => 'openstack' }, :log_file => log_file) + provider.session_data = session_data + response = provider.request(session.identity_service, :list_tenants) + expect(response.status).to eq(200) + end + end + end + + context 'when there is no session or session data' do + it_behaves_like 'making request with no session or session data' do + let(:provider) { Puppet::Provider::Aviator.dup } + end + end + end +end diff --git a/qpid/manifests/server.pp b/qpid/manifests/server.pp index 4c688bd19..6f70835b6 100644 --- a/qpid/manifests/server.pp +++ b/qpid/manifests/server.pp @@ -4,7 +4,7 @@ class qpid::server( $config_file = '/etc/qpidd.conf', $package_name = 'qpid-cpp-server', - $package_ensure = present, + $package_ensure = latest, $service_name = 'qpidd', $service_ensure = running, $service_enable = true, diff --git a/rabbitmq/.travis.yml b/rabbitmq/.travis.yml index 079e4fc64..a40ae502e 100644 --- a/rabbitmq/.travis.yml +++ b/rabbitmq/.travis.yml @@ -1,31 +1,17 @@ --- -branches: - only: - - master language: ruby bundler_args: --without development -script: bundle exec rake spec SPEC_OPTS='--format documentation' -after_success: - - git clone -q git://github.com/puppetlabs/ghpublisher.git .forge-releng - - .forge-releng/publish -rvm: - - 1.8.7 - - 1.9.3 - - 2.0.0 - - 2.1.1 -env: - matrix: - - PUPPET_GEM_VERSION="~> 3.4.0" - - PUPPET_GEM_VERSION="~> 3.5.0" - global: - - PUBLISHER_LOGIN=puppetlabs - - secure: |- - mSHk+9ReN6wxJ9sOgySOSKEjaXOdtT9B1BtvsOGcm/FQI3p57meid3B6z37r\nNv9PbxwGb - mcZafMxNAfwr8nUb/bqxeXnZaGiY0E9pvg9aRr3g4knczn+siW+\nDkKlvM0rex2zzF0PDc - 495e61kP5cxcKNVdDJSiUPPK6fNGQ1xX8= +script: "bundle exec rake validate && bundle exec rake lint && bundle exec rake spec SPEC_OPTS='--format documentation'" matrix: - exclude: - - rvm: 2.1.1 - env: PUPPET_GEM_VERSION="~> 3.4.0" + fast_finish: true + include: + - rvm: 1.8.7 + env: PUPPET_GEM_VERSION="~> 2.7.0" FACTER_GEM_VERSION="~> 1.6.0" + - rvm: 1.8.7 + env: PUPPET_GEM_VERSION="~> 2.7.0" FACTER_GEM_VERSION="~> 1.7.0" + - rvm: 1.9.3 + env: PUPPET_GEM_VERSION="~> 3.0" + - rvm: 2.0.0 + env: PUPPET_GEM_VERSION="~> 3.0" notifications: email: false diff --git a/rabbitmq/README.md b/rabbitmq/README.md index 228340f4a..ebf879083 100644 --- a/rabbitmq/README.md +++ b/rabbitmq/README.md @@ -59,6 +59,23 @@ class { '::rabbitmq': } ``` +Or such as offline installation from intranet or local mirrors: + +```puppet +class { '::rabbitmq': + key_content => template('openstack/rabbit.pub.key'), + package_gpg_key => '/tmp/rabbit.pub.key', +} +``` + +And this one will use external package key source for any (apt/rpm) package provider: + +```puppet +class { '::rabbitmq': + package_gpg_key => 'http://www.some_site.some_domain/some_key.pub.key', +} +``` + ### Environment Variables To use RabbitMQ Environment Variables, use the parameters `environment_variables` e.g.: @@ -205,6 +222,11 @@ RabbitMQ Environment Variables in rabbitmq_env.config The erlang cookie to use for clustering - must be the same between all nodes. +###`key_content` + +Uses content method for Debian OS family. Should be a template for apt::source +class. Overrides `package_gpg_key` behavior, if enabled. Undefined by default. + ####`ldap_auth` Boolean, set to true to enable LDAP auth. @@ -248,7 +270,10 @@ be changed to latest. ####`package_gpg_key` -RPM package GPG key to import. +RPM package GPG key to import. Uses source method. Should be a URL for Debian/RedHat +OS family, or a file name for RedHat OS family. +Set to http://www.rabbitmq.com/rabbitmq-signing-key-public.asc by default. +Note, that `key_content`, if specified, would override this parameter for Debian OS family. ####`package_name` diff --git a/rabbitmq/lib/puppet/provider/rabbitmq_exchange/rabbitmqadmin.rb b/rabbitmq/lib/puppet/provider/rabbitmq_exchange/rabbitmqadmin.rb index 8e0d6ab86..0fa354db0 100644 --- a/rabbitmq/lib/puppet/provider/rabbitmq_exchange/rabbitmqadmin.rb +++ b/rabbitmq/lib/puppet/provider/rabbitmq_exchange/rabbitmqadmin.rb @@ -1,9 +1,16 @@ require 'puppet' Puppet::Type.type(:rabbitmq_exchange).provide(:rabbitmqadmin) do - commands :rabbitmqctl => '/usr/sbin/rabbitmqctl' - has_command(:rabbitmqadmin, '/usr/local/bin/rabbitmqadmin') do - environment( { 'HOME' => '' }) + if Puppet::PUPPETVERSION.to_f < 3 + commands :rabbitmqctl => 'rabbitmqctl' + commands :rabbitmqadmin => '/usr/local/bin/rabbitmqadmin' + else + has_command(:rabbitmqctl, 'rabbitmqctl') do + environment :HOME => "/tmp" + end + has_command(:rabbitmqadmin, '/usr/local/bin/rabbitmqadmin') do + environment :HOME => "/tmp" + end end defaultfor :feature => :posix diff --git a/rabbitmq/manifests/init.pp b/rabbitmq/manifests/init.pp index f91e1afc2..aa42b8127 100644 --- a/rabbitmq/manifests/init.pp +++ b/rabbitmq/manifests/init.pp @@ -53,6 +53,7 @@ $environment_variables = $rabbitmq::params::environment_variables, $config_variables = $rabbitmq::params::config_variables, $config_kernel_variables = $rabbitmq::params::config_kernel_variables, + $key_content = undef, ) inherits rabbitmq::params { validate_bool($admin_enable) @@ -113,6 +114,10 @@ fail('$ssl_only => true requires that $ssl => true') } + if $config_stomp and $ssl_stomp_port and ! $ssl { + warning('$ssl_stomp_port requires that $ssl => true and will be ignored') + } + include '::rabbitmq::install' include '::rabbitmq::config' include '::rabbitmq::service' @@ -122,8 +127,12 @@ case $::osfamily { 'RedHat', 'SUSE': { include '::rabbitmq::repo::rhel' } - 'Debian': - { include '::rabbitmq::repo::apt' } + 'Debian': { + class { '::rabbitmq::repo::apt' : + key_source => $package_gpg_key, + key_content => $key_content, + } + } default: { } } diff --git a/rabbitmq/manifests/repo/apt.pp b/rabbitmq/manifests/repo/apt.pp index 28635f678..56d08af8a 100644 --- a/rabbitmq/manifests/repo/apt.pp +++ b/rabbitmq/manifests/repo/apt.pp @@ -8,6 +8,7 @@ $include_src = false, $key = '056E8E56', $key_source = 'http://www.rabbitmq.com/rabbitmq-signing-key-public.asc', + $key_content = undef, ) { $pin = $rabbitmq::package_apt_pin @@ -21,6 +22,7 @@ include_src => $include_src, key => $key, key_source => $key_source, + key_content => $key_content, } if $pin { diff --git a/rabbitmq/spec/classes/rabbitmq_spec.rb b/rabbitmq/spec/classes/rabbitmq_spec.rb index 8c9b39a26..e82a64e6b 100644 --- a/rabbitmq/spec/classes/rabbitmq_spec.rb +++ b/rabbitmq/spec/classes/rabbitmq_spec.rb @@ -17,22 +17,12 @@ end describe 'apt::source default values' do - let(:facts) {{ :osfamily => 'Debian' }} it 'should add a repo with defaults values' do - contain_file('/etc/apt/sources.list.d/rabbitmq.list')\ - .with_content(%r|deb http\://www\.rabbitmq.com/debian/ testing main|) - end - end - - describe 'apt::source custom values' do - let(:params) { - { :location => 'http://www.foorepo.com/debian', - :release => 'unstable', - :repos => 'main' - }} - it 'should add a repo with custom new values' do - contain_file('/etc/apt/sources.list.d/rabbitmq.list')\ - .with_content(%r|deb http\://www\.foorepo.com/debian/ unstable main|) + should contain_apt__source('rabbitmq').with( { + :location => 'http://www.rabbitmq.com/debian/', + :release => 'testing', + :repos => 'main', + }) end end end @@ -235,31 +225,38 @@ describe 'node_ip_address when set' do let(:params) {{ :node_ip_address => '172.0.0.1' }} it 'should set RABBITMQ_NODE_IP_ADDRESS to specified value' do - contain_file('rabbitmq-env.config').with({ - 'content' => 'RABBITMQ_NODE_IP_ADDRESS=172.0.0.1', - }) + should contain_file('rabbitmq-env.config'). + with_content(%r{RABBITMQ_NODE_IP_ADDRESS=172\.0\.0\.1}) end end describe 'stomp by default' do it 'should not specify stomp parameters in rabbitmq.config' do - contain_file('rabbitmq.config').without({ + should contain_file('rabbitmq.config').without({ 'content' => /stomp/,}) end end describe 'stomp when set' do let(:params) {{ :config_stomp => true, :stomp_port => 5679 }} it 'should specify stomp port in rabbitmq.config' do - contain_file('rabbitmq.config').with({ - 'content' => /rabbitmq_stomp.*tcp_listeners, \[5679\]/, + should contain_file('rabbitmq.config').with({ + 'content' => /rabbitmq_stomp.*tcp_listeners, \[5679\]/m, + }) + end + end + describe 'stomp when set ssl port w/o ssl enabled' do + let(:params) {{ :config_stomp => true, :stomp_port => 5679, :ssl => false, :ssl_stomp_port => 5680 }} + it 'should not configure ssl_listeners in rabbitmq.config' do + should contain_file('rabbitmq.config').without({ + 'content' => /rabbitmq_stomp.*ssl_listeners, \[5680\]/m, }) end end describe 'stomp when set with ssl' do - let(:params) {{ :config_stomp => true, :stomp_port => 5679, :ssl_stomp_port => 5680 }} + let(:params) {{ :config_stomp => true, :stomp_port => 5679, :ssl => true, :ssl_stomp_port => 5680 }} it 'should specify stomp port and ssl stomp port in rabbitmq.config' do - contain_file('rabbitmq.config').with({ - 'content' => /rabbitmq_stomp.*tcp_listeners, \[5679\].*ssl_listeners, \[5680\]/, + should contain_file('rabbitmq.config').with({ + 'content' => /rabbitmq_stomp.*tcp_listeners, \[5679\].*ssl_listeners, \[5680\]/m, }) end end @@ -316,8 +313,8 @@ describe 'default_user and default_pass set' do let(:params) {{ :default_user => 'foo', :default_pass => 'bar' }} it 'should set default_user and default_pass to specified values' do - contain_file('rabbitmq.config').with({ - 'content' => /default_user, <<"foo">>.*default_pass, <<"bar">>/, + should contain_file('rabbitmq.config').with({ + 'content' => /default_user, <<"foo">>.*default_pass, <<"bar">>/m, }) end end @@ -332,12 +329,18 @@ } } it 'should set ssl options to specified values' do - contain_file('rabbitmq.config').with({ - 'content' => %r|ssl_listeners, \[3141\].* - ssl_options, \[\{cacertfile,"/path/to/cacert".* - certfile="/path/to/cert".* - keyfile,"/path/to/key|, - }) + should contain_file('rabbitmq.config').with_content( + %r{ssl_listeners, \[3141\]} + ) + should contain_file('rabbitmq.config').with_content( + %r{ssl_options, \[\{cacertfile,"/path/to/cacert"} + ) + should contain_file('rabbitmq.config').with_content( + %r{certfile,"/path/to/cert"} + ) + should contain_file('rabbitmq.config').with_content( + %r{keyfile,"/path/to/key"} + ) end end @@ -345,20 +348,18 @@ let(:params) { { :ssl => true, :ssl_only => true, - :ssl_management_port => 3141, + :ssl_port => 3141, :ssl_cacert => '/path/to/cacert', :ssl_cert => '/path/to/cert', :ssl_key => '/path/to/key' } } it 'should set ssl options to specified values' do - contain_file('rabbitmq.config').with({ - 'content' => %r|tcp_listeners, \[\].* - ssl_listeners, \[3141\].* - ssl_options, \[\{cacertfile,"/path/to/cacert".* - certfile="/path/to/cert".* - keyfile,"/path/to/key|, - }) + should contain_file('rabbitmq.config').with_content(%r{tcp_listeners, \[\]}) + should contain_file('rabbitmq.config').with_content(%r{ssl_listeners, \[3141\]}) + should contain_file('rabbitmq.config').with_content(%r{ssl_options, \[\{cacertfile,"/path/to/cacert"}) + should contain_file('rabbitmq.config').with_content(%r{certfile,"/path/to/cert"}) + should contain_file('rabbitmq.config').with_content(%r{keyfile,"/path/to/key}) end end @@ -373,16 +374,13 @@ } } it 'should set rabbitmq_management ssl options to specified values' do - contain_file('rabbitmq.config').with({ - 'content' => %r|\{rabbitmq_management, \[.* - \{listener, \[.* - \{port, 3141\},.* - \{ssl, true\},.* - \{ssl_opts, \[\{cacertfile, "/path/to/cacert"\},.* - \{certfile, "/path/to/cert"\},.* - \{keyfile, "/path/to/key"\}\]\}.* - \]\}|, - }) + should contain_file('rabbitmq.config').with_content(%r{rabbitmq_management, \[}) + should contain_file('rabbitmq.config').with_content(%r{listener, \[}) + should contain_file('rabbitmq.config').with_content(%r{port, 3141\}}) + should contain_file('rabbitmq.config').with_content(%r{ssl, true\}}) + should contain_file('rabbitmq.config').with_content(%r{ssl_opts, \[\{cacertfile, "/path/to/cacert"\},}) + should contain_file('rabbitmq.config').with_content(%r{certfile, "/path/to/cert"\},}) + should contain_file('rabbitmq.config').with_content(%r{keyfile, "/path/to/key"\}\]\}}) end end @@ -394,12 +392,9 @@ } } it 'should set rabbitmq_management options to specified values' do - contain_file('rabbitmq.config').with({ - 'content' => /\{rabbitmq_management, \[.* - \{listener, \[.* - \{port, 3141\},.* - \]\}/, - }) + should contain_file('rabbitmq.config').with_content(%r{rabbitmq_management, \[}) + should contain_file('rabbitmq.config').with_content(%r{listener, \[}) + should contain_file('rabbitmq.config').with_content(%r{port, 3141\}}) end end @@ -414,16 +409,13 @@ } } it 'should set rabbitmq_management ssl options to specified values' do - contain_file('rabbitmq.config').with({ - 'content' => %r|\{rabbitmq_management, \[.* - \{listener, \[.* - \{port, 3141\},.* - \{ssl, true\},.* - \{ssl_opts, \[\{cacertfile, "/path/to/cacert"\},.* - \{certfile, "/path/to/cert"\},.* - \{keyfile, "/path/to/key"\}\]\}.* - \]\}|, - }) + should contain_file('rabbitmq.config').with_content(%r{rabbitmq_management, \[}) + should contain_file('rabbitmq.config').with_content(%r{listener, \[}) + should contain_file('rabbitmq.config').with_content(%r{port, 3141\},}) + should contain_file('rabbitmq.config').with_content(%r{ssl, true\},}) + should contain_file('rabbitmq.config').with_content(%r{ssl_opts, \[\{cacertfile, "/path/to/cacert"\},}) + should contain_file('rabbitmq.config').with_content(%r{certfile, "/path/to/cert"\},}) + should contain_file('rabbitmq.config').with_content(%r{keyfile, "/path/to/key"\}\]\}}) end end @@ -435,12 +427,10 @@ } } it 'should set rabbitmq_management options to specified values' do - contain_file('rabbitmq.config').with({ - 'content' => /\{rabbitmq_management, \[.* - \{listener, \[.* - \{port, 3141\},.* - \]\}/, - }) + should contain_file('rabbitmq.config') \ + .with_content(/\{rabbitmq_management, \[/) \ + .with_content(/\{listener, \[/) \ + .with_content(/\{port, 3141\}/) end end diff --git a/rabbitmq/templates/rabbitmq.config.erb b/rabbitmq/templates/rabbitmq.config.erb index 5f0bef51b..8f6b14f75 100644 --- a/rabbitmq/templates/rabbitmq.config.erb +++ b/rabbitmq/templates/rabbitmq.config.erb @@ -54,7 +54,7 @@ % Configure the Stomp Plugin listening port {rabbitmq_stomp, [ {tcp_listeners, [<%= @stomp_port %>]} - <%- if @ssl_stomp_port -%>, + <%- if @ssl && @ssl_stomp_port -%>, {ssl_listeners, [<%= @ssl_stomp_port %>]} <%- end -%> ]} diff --git a/sahara/.fixtures.yml b/sahara/.fixtures.yml new file mode 100644 index 000000000..27ce6bb1e --- /dev/null +++ b/sahara/.fixtures.yml @@ -0,0 +1,19 @@ +fixtures: + repositories: + 'inifile': 'git://github.com/puppetlabs/puppetlabs-inifile' + 'keystone': 'git://github.com/stackforge/puppet-keystone.git' + 'mysql': + repo: 'git://github.com/puppetlabs/puppetlabs-mysql.git' + ref: 'origin/2.2.x' + 'openstacklib': 'git://github.com/stackforge/puppet-openstacklib.git' + 'postgresql': + repo: "git://github.com/puppetlabs/puppet-postgresql.git" + ref: '2.5.0' + 'qpid': 'git://github.com/dprince/puppet-qpid.git' + 'rabbitmq': + repo: 'git://github.com/puppetlabs/puppetlabs-rabbitmq' + ref: 'origin/2.x' + 'stdlib': 'git://github.com/puppetlabs/puppetlabs-stdlib.git' + 'sysctl': 'git://github.com/duritong/puppet-sysctl.git' + symlinks: + 'sahara': "#{source_dir}" diff --git a/sahara/Gemfile b/sahara/Gemfile index 64123b053..d965fa900 100644 --- a/sahara/Gemfile +++ b/sahara/Gemfile @@ -3,7 +3,10 @@ source 'https://rubygems.org' group :development, :test do gem 'puppetlabs_spec_helper', :require => false gem 'puppet-lint', '~> 0.3.2' - gem 'rake', '~> 10.1.1' + gem 'rake', '10.1.1' + gem 'rspec', '< 2.99' + gem 'json' + gem 'webmock' end if puppetversion = ENV['PUPPET_GEM_VERSION'] @@ -11,3 +14,5 @@ if puppetversion = ENV['PUPPET_GEM_VERSION'] else gem 'puppet', :require => false end + +# vim:ft=ruby diff --git a/sahara/Modulefile b/sahara/Modulefile deleted file mode 100644 index 550de0195..000000000 --- a/sahara/Modulefile +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2013 Zürcher Hochschule für Angewandte Wissenschaften -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -name 'puppet-sahara' -version '0.0.1' -source 'https://github.com/stackforge/puppet-sahara' -author 'andy@edmonds.be' -license 'Apache License, Version 2.0' -summary 'Installs the sahara backend' -description 'Installs the sahara backend and can also install the sahara UI in OpenStack horizon' -project_page 'https://github.com/stackforge/puppet-sahara' - -## Add dependencies, if any: -# dependency 'username/name', '>= 1.2.0' diff --git a/sahara/lib/puppet/provider/sahara_config/ini_settings.rb b/sahara/lib/puppet/provider/sahara_config/ini_settings.rb new file mode 100644 index 000000000..b7826f4b5 --- /dev/null +++ b/sahara/lib/puppet/provider/sahara_config/ini_settings.rb @@ -0,0 +1,32 @@ +Puppet::Type.type(:sahara_config).provide( + :ini_setting, + :parent => Puppet::Type.type(:ini_setting).provider(:ruby) +) do + + # the setting is always default + # this if for backwards compat with the old puppet providers for sahara_config + def section + resource[:name].split('/', 2)[0] + end + + # assumes that the name was the setting + # this is to maintain backwards compat with the the older + # stuff + def setting + resource[:name].split('/', 2)[1] + end + + def separator + '=' + end + + def self.file_path + '/etc/sahara/sahara.conf' + end + + # this needs to be removed. This has been replaced with the class method + def file_path + self.class.file_path + end + +end diff --git a/sahara/lib/puppet/type/sahara_config.rb b/sahara/lib/puppet/type/sahara_config.rb new file mode 100644 index 000000000..eae05a719 --- /dev/null +++ b/sahara/lib/puppet/type/sahara_config.rb @@ -0,0 +1,42 @@ +Puppet::Type.newtype(:sahara_config) do + + ensurable + + newparam(:name, :namevar => true) do + newvalues(/\S+\/\S+/) + end + + newproperty(:value) do + desc 'The value of the setting to be defined.' + munge do |value| + value = value.to_s.strip + value.capitalize! if value =~ /^(true|false)$/i + value + end + newvalues(/^[\S ]*$/) + + def is_to_s( currentvalue ) + if resource.secret? + return '[old secret redacted]' + else + return currentvalue + end + end + + def should_to_s( newvalue ) + if resource.secret? + return '[new secret redacted]' + else + return newvalue + end + end + end + + newparam(:secret, :boolean => true) do + desc 'Whether to hide the value from Puppet logs. Defaults to `false`.' + + newvalues(:true, :false) + + defaultto false + end +end diff --git a/sahara/metadata.json b/sahara/metadata.json index 0ac1f7837..58a3564b9 100644 --- a/sahara/metadata.json +++ b/sahara/metadata.json @@ -1,13 +1,33 @@ { - "name": "puppet-sahara", - "version": "0.0.1", - "summary": "Installs the sahara backend", - "author": "andy@edmonds.be", - "description": "Installs the sahara backend and can also install the sahara UI in OpenStack horizon", - "dependencies": [], - "types": [], - "checksums": {}, - "source": "https://github.com/stackforge/puppet-sahara", - "project_page": "https://github.com/stackforge/puppet-sahara", - "license": "Apache License, Version 2.0" -} \ No newline at end of file + "name": "stackforge-sahara", + "version": "4.0.0", + "author": "StackForge Contributors", + "summary": "Puppet module for OpenStack Sahara", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-sahara.git", + "project_page": "https://launchpad.net/puppet-sahara", + "issues_url": "https://bugs.launchpad.net/puppet-sahara", + "requirements": [ + { "name": "pe","version_requirement": "3.2.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": ["7"] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": ["6","7"] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": ["14.04"] + } + ], + "description": "Installs and configures OpenStack Sahara (Data Processing).", + "dependencies": [ + { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, + { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0" } + ] +} diff --git a/sahara/spec/spec_helper.rb b/sahara/spec/spec_helper.rb index eb3ad3df1..33926ec44 100644 --- a/sahara/spec/spec_helper.rb +++ b/sahara/spec/spec_helper.rb @@ -12,21 +12,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +# -dir = File.expand_path(File.dirname(__FILE__)) -$LOAD_PATH.unshift File.join(dir, 'lib') - -require 'mocha' -require 'puppet' -require 'rspec' -require 'spec/autorun' +require 'puppetlabs_spec_helper/module_spec_helper' -Spec::Runner.configure do |config| - config.mock_with :mocha +RSpec.configure do |c| + c.alias_it_should_behave_like_to :it_configures, 'configures' + c.alias_it_should_behave_like_to :it_raises, 'raises' end -# We need this because the RAL uses 'should' as a method. This -# allows us the same behaviour but with a different method name. -class Object - alias :must :should -end diff --git a/sahara/spec/unit/provider/sahara_config/ini_setting_spec.rb b/sahara/spec/unit/provider/sahara_config/ini_setting_spec.rb new file mode 100644 index 000000000..8bc6ec4db --- /dev/null +++ b/sahara/spec/unit/provider/sahara_config/ini_setting_spec.rb @@ -0,0 +1,38 @@ +# +# these tests are a little concerning b/c they are hacking around the +# modulepath, so these tests will not catch issues that may eventually arise +# related to loading these plugins. +# I could not, for the life of me, figure out how to programatcally set the modulepath +$LOAD_PATH.push( + File.join( + File.dirname(__FILE__), + '..', + '..', + '..', + 'fixtures', + 'modules', + 'inifile', + 'lib') +) +require 'spec_helper' +provider_class = Puppet::Type.type(:sahara_config).provider(:ini_setting) +describe provider_class do + + it 'should default to the default setting when no other one is specified' do + resource = Puppet::Type::Sahara_config.new( + {:name => 'DEFAULT/foo', :value => 'bar'} + ) + provider = provider_class.new(resource) + provider.section.should == 'DEFAULT' + provider.setting.should == 'foo' + end + + it 'should allow setting to be set explicitly' do + resource = Puppet::Type::Sahara_config.new( + {:name => 'dude/whoa', :value => 'bar'} + ) + provider = provider_class.new(resource) + provider.section.should == 'dude' + provider.setting.should == 'whoa' + end +end diff --git a/sahara/spec/unit/type/sahara_config_spec.rb b/sahara/spec/unit/type/sahara_config_spec.rb new file mode 100644 index 000000000..20937eab4 --- /dev/null +++ b/sahara/spec/unit/type/sahara_config_spec.rb @@ -0,0 +1,52 @@ +require 'puppet' +require 'puppet/type/sahara_config' +describe 'Puppet::Type.type(:sahara_config)' do + before :each do + @sahara_config = Puppet::Type.type(:sahara_config).new(:name => 'DEFAULT/foo', :value => 'bar') + end + + it 'should require a name' do + expect { + Puppet::Type.type(:sahara_config).new({}) + }.to raise_error(Puppet::Error, 'Title or name must be provided') + end + + it 'should not expect a name with whitespace' do + expect { + Puppet::Type.type(:sahara_config).new(:name => 'f oo') + }.to raise_error(Puppet::Error, /Parameter name failed/) + end + + it 'should fail when there is no section' do + expect { + Puppet::Type.type(:sahara_config).new(:name => 'foo') + }.to raise_error(Puppet::Error, /Parameter name failed/) + end + + it 'should not require a value when ensure is absent' do + Puppet::Type.type(:sahara_config).new(:name => 'DEFAULT/foo', :ensure => :absent) + end + + it 'should accept a valid value' do + @sahara_config[:value] = 'bar' + @sahara_config[:value].should == 'bar' + end + + it 'should not accept a value with whitespace' do + @sahara_config[:value] = 'b ar' + @sahara_config[:value].should == 'b ar' + end + + it 'should accept valid ensure values' do + @sahara_config[:ensure] = :present + @sahara_config[:ensure].should == :present + @sahara_config[:ensure] = :absent + @sahara_config[:ensure].should == :absent + end + + it 'should not accept invalid ensure values' do + expect { + @sahara_config[:ensure] = :latest + }.to raise_error(Puppet::Error, /Invalid value/) + end +end diff --git a/swift/Modulefile b/swift/Modulefile deleted file mode 100644 index 40aa14bdf..000000000 --- a/swift/Modulefile +++ /dev/null @@ -1,18 +0,0 @@ -name 'puppetlabs-swift' -version '4.0.0' -author 'Puppet Labs and StackForge Contributors' -license 'Apache License 2.0' -summary 'Puppet module for OpenStack Swift' -description 'Installs and configures OpenStack Swift (Object Storage).' -project_page 'https://launchpad.net/puppet-swift' -source 'https://github.com/stackforge/puppet-swift' - -dependency 'puppetlabs/inifile', '>=1.0.0 <2.0.0' -dependency 'puppetlabs/keystone', '>=4.0.0 <5.0.0' -dependency 'puppetlabs/mysql','>=0.6.1 <3.0.0' -dependency 'puppetlabs/rsync', '>=0.2.0 <1.0.0' -dependency 'puppetlabs/stdlib', '>=3.2.0' -dependency 'puppetlabs/xinetd', '>=1.0.1 <2.0.0' -dependency 'puppetlabs/concat', '>=1.0.0 <2.0.0' -dependency 'saz/memcached', '>=2.0.2 <3.0.0' -dependency 'saz/ssh', '>=1.0.2 <2.0.0' diff --git a/swift/manifests/proxy.pp b/swift/manifests/proxy.pp index 41b1463bd..ff7405cb6 100644 --- a/swift/manifests/proxy.pp +++ b/swift/manifests/proxy.pp @@ -34,6 +34,9 @@ # Optional but requires write_affinity to be set. Defaults to undef. # [*package_ensure*] Ensure state of the swift proxy package. # Optional. Defaults to present. +# [*log_name*] +# Configures log_name for swift proxy-server. +# Optional. Defaults to proxy-server # # == Examples # @@ -59,6 +62,7 @@ $log_level = 'INFO', $log_facility = 'LOG_LOCAL1', $log_handoffs = true, + $log_name = 'proxy-server', $read_affinity = undef, $write_affinity = undef, $write_affinity_node_count = undef, diff --git a/swift/manifests/storage/server.pp b/swift/manifests/storage/server.pp index 80f85db1d..06d68aa7f 100644 --- a/swift/manifests/storage/server.pp +++ b/swift/manifests/storage/server.pp @@ -21,6 +21,7 @@ $log_facility = 'LOG_LOCAL2', $log_level = 'INFO', $log_address = '/dev/log', + $log_name = "${type}-server", # this parameters needs to be specified after type and name $config_file_path = "${type}-server/${name}.conf" ) { diff --git a/swift/metadata.json b/swift/metadata.json new file mode 100644 index 000000000..4273eeb0d --- /dev/null +++ b/swift/metadata.json @@ -0,0 +1,43 @@ +{ + "name": "stackforge-swift", + "version": "5.0.0", + "author": "Puppet Labs and StackForge Contributors", + "summary": "Puppet module for OpenStack Swift", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-swift.git", + "project_page": "https://launchpad.net/puppet-swift", + "issues_url": "https://bugs.launchpad.net/puppet-swift", + "requirements": [ + { "name": "pe","version_requirement": "3.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": ["7"] + }, + { + "operatingsystem": "Fedora", + "operatingsystemrelease": ["20"] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": ["6.5","7"] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": ["12.04","14.04"] + } + ], + "description": "Installs and configures OpenStack Swift (Object Storage).", + "dependencies": [ + { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "puppetlabs/rsync", "version_requirement": ">=0.2.0 <1.0.0" }, + { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, + { "name": "puppetlabs/xinetd", "version_requirement": ">=1.0.1 <2.0.0" }, + { "name": "puppetlabs/concat", "version_requirement": ">=1.0.0 <2.0.0" }, + { "name": "saz/memcached", "version_requirement": ">=2.0.2 <3.0.0" }, + { "name": "saz/ssh", "version_requirement": ">=1.0.2 <2.0.0" } + ] +} diff --git a/swift/spec/classes/swift_proxy_spec.rb b/swift/spec/classes/swift_proxy_spec.rb index 7fcd9d52b..a0b6f64b8 100644 --- a/swift/spec/classes/swift_proxy_spec.rb +++ b/swift/spec/classes/swift_proxy_spec.rb @@ -99,6 +99,7 @@ class { swift: swift_hash_suffix => string }" :allow_account_management => false, :account_autocreate => false, :log_level => 'DEBUG', + :log_name => 'swift-proxy-server', :read_affinity => 'r1z1=100, r1=200', :write_affinity => 'r1', :write_affinity_node_count => '2 * replicas', @@ -116,6 +117,7 @@ class { swift: swift_hash_suffix => string }" 'pipeline = swauth proxy-server', '[app:proxy-server]', 'use = egg:swift#proxy', + 'set log_name = swift-proxy-server', 'allow_account_management = false', 'account_autocreate = false', 'read_affinity = r1z1=100, r1=200', diff --git a/swift/spec/defines/swift_storage_server_spec.rb b/swift/spec/defines/swift_storage_server_spec.rb index fd78ef602..2376e7673 100644 --- a/swift/spec/defines/swift_storage_server_spec.rb +++ b/swift/spec/defines/swift_storage_server_spec.rb @@ -151,6 +151,9 @@ class { 'swift::storage': storage_local_net_ip => '10.0.0.1' }" it { should contain_file(fragment_file) \ .with_content(/^user\s*=\s*swift\s*$/) } + it { should contain_file(fragment_file) \ + .with_content(/^set log_name\s*=\s*#{t}-server\s*$/) + } it { should contain_file(fragment_file) \ .with_content(/^set log_facility\s*=\s*LOG_LOCAL2\s*$/) } diff --git a/swift/templates/account-server.conf.erb b/swift/templates/account-server.conf.erb index d6adaa05e..ae625f293 100644 --- a/swift/templates/account-server.conf.erb +++ b/swift/templates/account-server.conf.erb @@ -12,7 +12,7 @@ pipeline = <%= @pipeline.to_a.join(' ') %> [app:account-server] use = egg:swift#account -set log_name = account-server +set log_name = <%= @log_name %> set log_facility = <%= @log_facility %> set log_level = <%= @log_level %> set log_requests = True diff --git a/swift/templates/container-server.conf.erb b/swift/templates/container-server.conf.erb index 23f2998cd..f5bd84466 100644 --- a/swift/templates/container-server.conf.erb +++ b/swift/templates/container-server.conf.erb @@ -14,7 +14,7 @@ pipeline = <%= @pipeline.to_a.join(' ') %> [app:container-server] allow_versions = <%= @allow_versions %> use = egg:swift#container -set log_name = @container-server +set log_name = <%= @log_name %> set log_facility = <%= @log_facility %> set log_level = <%= @log_level %> set log_requests = True diff --git a/swift/templates/object-server.conf.erb b/swift/templates/object-server.conf.erb index 2a47c990b..47ce5e32c 100644 --- a/swift/templates/object-server.conf.erb +++ b/swift/templates/object-server.conf.erb @@ -12,7 +12,7 @@ pipeline = <%= @pipeline.to_a.join(' ') %> [app:object-server] use = egg:swift#object -set log_name = object-server +set log_name = <%= @log_name %> set log_facility = <%= @log_facility %> set log_level = <%= @log_level %> set log_requests = True diff --git a/swift/templates/proxy-server.conf.erb b/swift/templates/proxy-server.conf.erb index 08454b8f9..d59cb22ef 100644 --- a/swift/templates/proxy-server.conf.erb +++ b/swift/templates/proxy-server.conf.erb @@ -25,7 +25,7 @@ pipeline = <%= @pipeline.to_a.join(' ') %> [app:proxy-server] use = egg:swift#proxy -set log_name = proxy-server +set log_name = <%= @log_name %> set log_facility = <%= @log_facility %> set log_level = <%= @log_level %> set log_address = <%= @log_address %> diff --git a/vswitch/Modulefile b/vswitch/Modulefile deleted file mode 100644 index 5a6665a94..000000000 --- a/vswitch/Modulefile +++ /dev/null @@ -1,8 +0,0 @@ -name 'puppetlabs-vswitch' -version '0.3.0' -source 'https://github.com/stackforge/puppet-vswitch' -author 'Endre Karlson, Dan Bode, Ian Wells, Gilles Dubreuil' -license 'Apache License 2.0' -summary 'Puppet module for OpenVSwitch and other vSwitches' -description 'Puppet module to install and configure the OpenVSwitch and other vSwitches' -project_page 'https://github.com/stackforge/puppet-vswitch' diff --git a/vswitch/metadata.json b/vswitch/metadata.json new file mode 100644 index 000000000..69c3b704e --- /dev/null +++ b/vswitch/metadata.json @@ -0,0 +1,38 @@ +{ + "name": "stackforge-vswitch", + "version": "1.0.0", + "source": "https://github.com/stackforge/puppet-vswitch", + "author": "Endre Karlson, Dan Bode and StackForge Contributors", + "license": "Apache License 2.0", + "source": "git://github.com/stackforge/puppet-vswitch.git", + "project_page": "https://launchpad.net/puppet-vswitch", + "issues_url": "https://bugs.launchpad.net/puppet-vswitch", + "summary": "A module for providing things (ports, bridges) to vSwitches (OVS)", + "operatingsystem_support": [ + { + "operatingsystem":"Fedora", + "operatingsystemrelease": [ "20" ] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": [ "6", "7" ] + }, + { + "operatingsystem": "CentOS", + "operatingsystemrelease": [ "6", "7" ] + }, + { + "operatingsystem": "Debian", + "operatingsystemrelease": [ "6", "7" ] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": [ "12.04", "14.04" ] + } + ], + "requirements": [ + { "name": "pe","version_requirement": "3.x" }, + { "name": "puppet","version_requirement": "3.x" } + ], + "dependencies": [] +}