diff --git a/packaging/openstack-neutron/0089-Fix-errors-in-lbaas-L7-policy-implemetation.patch b/packaging/openstack-neutron/0089-Fix-errors-in-lbaas-L7-policy-implemetation.patch new file mode 100644 index 0000000..7e5827b --- /dev/null +++ b/packaging/openstack-neutron/0089-Fix-errors-in-lbaas-L7-policy-implemetation.patch @@ -0,0 +1,42 @@ +From 5eba9bb33fe64268951cd749296c176676c0ab0e Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Tue, 7 Mar 2017 10:10:02 +0800 +Subject: [PATCH 89/89] Fix errors in lbaas L7 policy implemetation + +Fixes: 67091dd5a ("Implement lbaas L7 policy rule model") + +Signed-off-by: Hunt Xu +--- + .../alembic_migrations/versions/222931b3859d_add_lbaas_l7_tables.py | 2 +- + neutron/extensions/loadbalancer_l7.py | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/neutron/db/migration/alembic_migrations/versions/222931b3859d_add_lbaas_l7_tables.py b/neutron/db/migration/alembic_migrations/versions/222931b3859d_add_lbaas_l7_tables.py +index b517e3c0e..5c4a547d6 100644 +--- a/neutron/db/migration/alembic_migrations/versions/222931b3859d_add_lbaas_l7_tables.py ++++ b/neutron/db/migration/alembic_migrations/versions/222931b3859d_add_lbaas_l7_tables.py +@@ -38,7 +38,7 @@ def upgrade(): + 'l7policies', + sa.Column('tenant_id', sa.String(length=255), nullable=False), + sa.Column('id', sa.String(length=36), nullable=False), +- sa.Column('pool_id', sa.String(length=36), nullable=False), ++ sa.Column('pool_id', sa.String(length=36), nullable=True), + sa.Column('priority', sa.Integer, nullable=False), + sa.Column('action', sa.Enum(*actions), nullable=False), + sa.Column('key', sa.String(length=255), nullable=True), +diff --git a/neutron/extensions/loadbalancer_l7.py b/neutron/extensions/loadbalancer_l7.py +index 909f1b25b..57aa51202 100644 +--- a/neutron/extensions/loadbalancer_l7.py ++++ b/neutron/extensions/loadbalancer_l7.py +@@ -230,7 +230,7 @@ class Loadbalancer_l7(extensions.ExtensionDescriptor): + return resources + + def update_attributes_map(self, attributes): +- super(Loadbalancer, self).update_attributes_map( ++ super(Loadbalancer_l7, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): +-- +2.12.0 + diff --git a/packaging/openstack-neutron/0090-iptables_firewall-use-wrap-chains-and-rules-for-mete.patch b/packaging/openstack-neutron/0090-iptables_firewall-use-wrap-chains-and-rules-for-mete.patch new file mode 100644 index 0000000..476e57e --- /dev/null +++ b/packaging/openstack-neutron/0090-iptables_firewall-use-wrap-chains-and-rules-for-mete.patch @@ -0,0 +1,121 @@ +From 9c324b301aa51bbea0860643202ccd7b85f78da7 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Thu, 9 Mar 2017 12:04:21 +0800 +Subject: [PATCH 90/94] iptables_firewall: use wrap chains and rules for + metering + +Non-wrap chains/rules may cause problems. + +Fixes: redmine #9154 +Fixes: deaf40836 ("iptables_firewall: add firewall rules to meter instance +port stats") + +Signed-off-by: Hunt Xu +--- + neutron/agent/linux/iptables_firewall.py | 44 ++++++++++++++------------------ + 1 file changed, 19 insertions(+), 25 deletions(-) + +diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py +index e0a1da757..c755cb99b 100644 +--- a/neutron/agent/linux/iptables_firewall.py ++++ b/neutron/agent/linux/iptables_firewall.py +@@ -185,46 +185,40 @@ class IptablesFirewallDriver(firewall.FirewallDriver): + # Only support IPv4 + chains = self._metering_chain_names(port, direction) + for m_chain_name in chains: +- self.iptables.ipv4['filter'].add_chain(m_chain_name, wrap=False) ++ self.iptables.ipv4['filter'].add_chain(m_chain_name) + + metering_chain, counting_in_chain, counting_chain = chains + # Jump to the original security group chain +- orig_chain_name = self.iptables.ipv4['filter']._wrap_target_chain( +- '$' + chain_name, True) +- jump_rule = '-j %s' % orig_chain_name +- self.iptables.ipv4['filter'].add_rule(metering_chain, jump_rule, +- wrap=False) ++ jump_rule = '-j $%s' % chain_name ++ self.iptables.ipv4['filter'].add_rule(metering_chain, jump_rule) ++ + # Jump to the counting chains + counting_rules = [] + tmp_direction = IPSET_DIRECTION[direction] + if self.enable_ipset: + counting_rules += [ +- '-m set --match-set %s %s -j %s' % ( ++ '-m set --match-set %s %s -j $%s' % ( + PRIVATE_IPSET_NAME, tmp_direction, counting_in_chain + ) + ] + else: + counting_rules += [ +- '--%s %s -j %s' % ( ++ '--%s %s -j $%s' % ( + tmp_direction, private_net, counting_in_chain + ) + for private_net in self.private_nets + ] +- counting_rules += ['-j %s' % counting_chain] ++ counting_rules += ['-j $%s' % counting_chain] + for rule in counting_rules: +- self.iptables.ipv4['filter'].add_rule(metering_chain, rule, +- wrap=False) ++ self.iptables.ipv4['filter'].add_rule(metering_chain, rule) + # Count the counting chain +- self.iptables.ipv4['filter'].add_rule(counting_in_chain, '', +- wrap=False) +- self.iptables.ipv4['filter'].add_rule(counting_chain, '', +- wrap=False) ++ self.iptables.ipv4['filter'].add_rule(counting_in_chain, '') ++ self.iptables.ipv4['filter'].add_rule(counting_chain, '') + return metering_chain + + def _remove_metering_chains(self, port, direction): + for m_chain_name in self._metering_chain_names(port, direction): +- self.iptables.ipv4['filter'].ensure_remove_chain( +- m_chain_name, wrap=False) ++ self.iptables.ipv4['filter'].ensure_remove_chain(m_chain_name) + + def _add_chain(self, port, direction): + chain_name = self._port_chain_name(port, direction) +@@ -247,10 +241,10 @@ class IptablesFirewallDriver(firewall.FirewallDriver): + + # jump to the chain based on the device + jump_rules = [ +- ['-m physdev --%s %s --physdev-is-bridged -j %s' % ( ++ ['-m physdev --%s %s --physdev-is-bridged -j $%s' % ( + self.IPTABLES_DIRECTION[direction], device, j_chain_name) + ] +- for j_chain_name in (metering_chain_name, '$' + chain_name)] ++ for j_chain_name in (metering_chain_name, chain_name)] + self._add_rule_to_chain_v4v6(SG_CHAIN, *jump_rules) + + if direction == EGRESS_DIRECTION: +@@ -573,9 +567,9 @@ class IptablesFirewallDriver(firewall.FirewallDriver): + def _metering_chain_names(self, port, direction): + return [ + iptables_manager.get_chain_name( +- '%s%s' % (prefix + direction + '-', port['device'][3:]), +- wrap=False +- ) for prefix in ('metering-', 'counting-in-', 'counting-')] ++ '%s%s%s' % ( ++ prefix, CHAIN_NAME_PREFIX[direction], port['device'][3:]) ++ ) for prefix in ('m', 'c', 'C')] + + def filter_defer_apply_on(self): + if not self._defer_apply: +@@ -637,9 +631,9 @@ class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver): + def _metering_chain_names(self, port, direction): + return [ + iptables_manager.get_chain_name( +- '%s%s' % (prefix + direction + '-', port['device']), +- wrap=False +- ) for prefix in ('metering-', 'counting-in-', 'counting-')] ++ '%s%s%s' % ( ++ prefix, CHAIN_NAME_PREFIX[direction], port['device']) ++ ) for prefix in ('m', 'c', 'C')] + + def _get_device_name(self, port): + return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN] +-- +2.12.1 + diff --git a/packaging/openstack-neutron/0091-firewall_l3_agent-only-get-hosted-routers-info.patch b/packaging/openstack-neutron/0091-firewall_l3_agent-only-get-hosted-routers-info.patch new file mode 100644 index 0000000..ed4b852 --- /dev/null +++ b/packaging/openstack-neutron/0091-firewall_l3_agent-only-get-hosted-routers-info.patch @@ -0,0 +1,37 @@ +From 003a20ecc07831fc2f3059e9cb8d76d2108851d8 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Fri, 10 Mar 2017 10:02:18 +0800 +Subject: [PATCH 91/94] firewall_l3_agent: only get hosted routers' info + +Fixes: redmine #9588 + +Signed-off-by: Hunt Xu +--- + .../services/firewall/agents/l3reference/firewall_l3_agent.py | 11 +++++++---- + 1 file changed, 7 insertions(+), 4 deletions(-) + +diff --git a/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py b/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py +index 0d994ef5a..bedabe2bc 100644 +--- a/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py ++++ b/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py +@@ -119,10 +119,13 @@ class FWaaSL3AgentRpcCallback(api.FWaaSAgentRpcCallbackMixin): + LOG.debug(_("%(func_name)s from agent for fw: %(fwid)s"), + {'func_name': func_name, 'fwid': fw['id']}) + try: +- routers = self.plugin_rpc.get_routers(context) +- router_info_list = self._get_router_info_list_for_tenant( +- routers, +- fw['tenant_id']) ++ router_ids = self.router_info.keys() ++ router_info_list = [] ++ if router_ids: ++ routers = self.plugin_rpc.get_routers(context, router_ids) ++ router_info_list = self._get_router_info_list_for_tenant( ++ routers, ++ fw['tenant_id']) + if not router_info_list: + LOG.debug(_('No Routers on tenant: %s'), fw['tenant_id']) + # fw was created before any routers were added, and if a +-- +2.12.1 + diff --git a/packaging/openstack-neutron/0092-metering-update-external-device-of-metering-iptables.patch b/packaging/openstack-neutron/0092-metering-update-external-device-of-metering-iptables.patch new file mode 100644 index 0000000..d77db24 --- /dev/null +++ b/packaging/openstack-neutron/0092-metering-update-external-device-of-metering-iptables.patch @@ -0,0 +1,43 @@ +From 1e238adafcad8e91c2080a95a0d5d3b97a39befd Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Thu, 16 Mar 2017 14:07:57 +0800 +Subject: [PATCH 92/94] metering: update external device of metering iptables + rules + +This is required by the new mechanism introduced in commit ec41bdd6f ( +"l3_agent: implement EayunStack floating ip mechanism"). There will be +multiple neutron ports connected to the external network in a router +with the new mechanism. So neutron-metering iptables rules should handle +those ports of floatingips as well. + +Fixes: redmine #9641 + +Signed-off-by: Hunt Xu +--- + neutron/services/metering/drivers/iptables/iptables_driver.py | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +diff --git a/neutron/services/metering/drivers/iptables/iptables_driver.py b/neutron/services/metering/drivers/iptables/iptables_driver.py +index 9137530bf..1610bbe71 100644 +--- a/neutron/services/metering/drivers/iptables/iptables_driver.py ++++ b/neutron/services/metering/drivers/iptables/iptables_driver.py +@@ -133,15 +133,12 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + if router_id in self.routers: + del self.routers[router_id] + +- def get_external_device_name(self, port_id): +- return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] +- + def _process_metering_label_rules(self, rm, rules, label_chain, + rules_chain): + im = rm.iptables_manager + if not rm.router['gw_port_id']: + return +- ext_dev = self.get_external_device_name(rm.router['gw_port_id']) ++ ext_dev = "%s+" % EXTERNAL_DEV_PREFIX + + for rule in rules: + remote_ip = rule['remote_ip_prefix'] +-- +2.12.1 + diff --git a/packaging/openstack-neutron/0093-Fix-lbaas-l7-implement-many-errors.patch b/packaging/openstack-neutron/0093-Fix-lbaas-l7-implement-many-errors.patch new file mode 100644 index 0000000..855bd2e --- /dev/null +++ b/packaging/openstack-neutron/0093-Fix-lbaas-l7-implement-many-errors.patch @@ -0,0 +1,476 @@ +From 1eb4c1307a1083be9c114f638eb941dfd7344274 Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Thu, 9 Mar 2017 11:48:14 +0800 +Subject: [PATCH 93/94] Fix lbaas l7 implement many errors + +Fixes: redmin #9518 +Fixes: 67091dd5a ("Implement lbaas L7 policy rule model") + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + neutron/db/loadbalancer/loadbalancer_db.py | 42 +++++++-------- + neutron/extensions/loadbalancer_l7.py | 28 ++++------ + .../services/loadbalancer/agent/agent_manager.py | 4 +- + .../drivers/common/agent_driver_base.py | 4 +- + .../services/loadbalancer/drivers/haproxy/cfg.py | 29 ++++++++--- + .../drivers/haproxy/namespace_driver.py | 2 +- + neutron/services/loadbalancer/plugin.py | 60 +++++++++++----------- + 7 files changed, 87 insertions(+), 82 deletions(-) + +diff --git a/neutron/db/loadbalancer/loadbalancer_db.py b/neutron/db/loadbalancer/loadbalancer_db.py +index c00a68a97..c4addd0b5 100644 +--- a/neutron/db/loadbalancer/loadbalancer_db.py ++++ b/neutron/db/loadbalancer/loadbalancer_db.py +@@ -919,19 +919,19 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + ] + return self._fields(res, fields) + +- def create_l7policy(self, context, policy): +- p = policy['l7policy'] ++ def create_l7policy(self, context, l7policy): ++ p = l7policy['l7policy'] + + tenant_id = self._get_tenant_id_for_create(context, p) + with context.session.begin(subtransactions=True): + policy_db = L7policy(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + pool_id=p['pool_id'], +- prority=p['prority'], ++ priority=p['priority'], + action=p['action'], + key=p['key'], + value=p['value'], +- admin_state_up=v['admin_state_up']) ++ admin_state_up=p['admin_state_up']) + context.session.add(policy_db) + + return self._make_l7policy_dict(policy_db) +@@ -940,8 +940,8 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + policy = self._get_resource(context, L7policy, id) + return self._make_l7policy_dict(policy, fields) + +- def update_l7policy(self, context, id, policy): +- p = policy['l7policy'] ++ def update_l7policy(self, context, id, l7policy): ++ p = l7policy['l7policy'] + with context.session.begin(subtransactions=True): + db = self._get_resource(context, L7policy, id) + if p: +@@ -982,8 +982,8 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + + return self._fields(res, fields) + +- def create_l7rule(self, context, rule): +- r = rule['l7rule'] ++ def create_l7rule(self, context, l7rule): ++ r = l7rule['l7rule'] + + tenant_id = self._get_tenant_id_for_create(context, r) + with context.session.begin(subtransactions=True): +@@ -994,7 +994,7 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + compare_value=r['compare_value'], + key=r['key'], + value=r['value'], +- admin_state_up=v['admin_state_up']) ++ admin_state_up=r['admin_state_up']) + context.session.add(rule_db) + + return self._make_l7rule_dict(rule_db) +@@ -1003,11 +1003,11 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + rule = self._get_resource(context, L7rule, id) + return self._make_l7rule_dict(rule, fields) + +- def update_l7rule(self, context, id, rule): +- r = rule['l7rule'] ++ def update_l7rule(self, context, id, l7rule): ++ r = l7rule['l7rule'] + with context.session.begin(subtransactions=True): + db = self._get_resource(context, L7rule, id) +- if p: ++ if r: + db.update(r) + + return self._make_l7rule_dict(db) +@@ -1025,11 +1025,11 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + raise loadbalancer_l7.L7ruleInUse(l7rule_id=id) + context.session.delete(db) + +- def create_l7policy_l7rule(self, context, rule, l7policy_id): +- add_rule = rule['rule'] ++ def create_l7policy_l7rule(self, context, l7rule, l7policy_id): ++ add_rule = l7rule['l7rule'] + tenant_id = self._get_tenant_id_for_create(context, add_rule) + with context.session.begin(subtransactions=True): +- assoc_qry = context.session.query(L7policyRuleAssociation) ++ assoc_qry = context.session.query(L7policyL7ruleAssociation) + assoc = assoc_qry.filter_by(policy_id=l7policy_id, + rule_id=add_rule['id']).first() + if assoc: +@@ -1041,13 +1041,9 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + if l7policy['tenant_id'] != tenant_id: + raise n_exc.NotAuthorized() + +- assoc = L7policyRuleAssociation(policy_id=l7policy_id, +- rule_id=add_rule['id']) ++ assoc = L7policyL7ruleAssociation(policy_id=l7policy_id, ++ rule_id=add_rule['id']) + context.session.add(assoc) +- rules = [ +- policy_rule_assoc['rule_id'] +- for policy_rule_assoc in l7policy['policy_rule_assoc'] +- ] + + res = {'policy_id': l7policy_id, + 'rule_id': add_rule['id'], +@@ -1056,7 +1052,7 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + + def _get_l7policy_l7rule(self, context, id, policy_id): + try: +- assoc_qry = context.session.query(L7policyRuleAssociation) ++ assoc_qry = context.session.query(L7policyL7ruleAssociation) + return assoc_qry.filter_by(policy_id=policy_id, rule_id=id).one() + except exc.NoResultFound: + raise loadbalancer_l7.L7policyRuleAssociationNotFound( +@@ -1068,7 +1064,7 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + context.session.delete(assoc) + + def get_l7policy_l7rule(self, context, id, l7policy_id, fields=None): +- policy_rule = self._get_l7policy_l7rule(context, id, l7policy_id) ++ self._get_l7policy_l7rule(context, id, l7policy_id) + # need to add tenant_id for admin_or_owner policy check to pass + rule = self.get_l7rule(context, id) + res = {'policy_id': l7policy_id, +diff --git a/neutron/extensions/loadbalancer_l7.py b/neutron/extensions/loadbalancer_l7.py +index 57aa51202..f3d369a18 100644 +--- a/neutron/extensions/loadbalancer_l7.py ++++ b/neutron/extensions/loadbalancer_l7.py +@@ -57,7 +57,7 @@ class L7ruleTypeKeyValueNotSupport(qexception.BadRequest): + + class L7ruleCompareTypeValueNotSupport(qexception.BadRequest): + message = _("L7rule compare_type %(l7rule_compare_type)s with " +- "compare value %(l7_rule_compare_value)s dose not support") ++ "compare value %(l7rule_compare_value)s dose not support") + + + class L7policyRuleAssociationExists(qexception.BadRequest): +@@ -80,15 +80,9 @@ RESOURCE_ATTRIBUTE_MAP = { + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, +- 'name': {'allow_post': True, 'allow_put': True, +- 'validate': {'type:string': None}, +- 'default': '', +- 'is_visible': True}, +- 'description': {'allow_post': True, 'allow_put': True, +- 'validate': {'type:string': None}, +- 'is_visible': True, 'default': ''}, + 'pool_id': {'allow_post': True, 'allow_put': True, +- 'validate': {'type:uuid': None}, ++ 'validate': {'type:uuid_or_none': None}, ++ 'default': None, + 'is_visible': True}, + 'priority': {'allow_post': True, 'allow_put': True, + 'validate': {'type:range': [0, 255]}, +@@ -108,6 +102,8 @@ RESOURCE_ATTRIBUTE_MAP = { + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, ++ 'rules': {'allow_post': False, 'allow_put': False, ++ 'is_visible': True}, + }, + 'l7rules': { + 'id': {'allow_post': False, 'allow_put': False, +@@ -246,11 +242,11 @@ class LoadbalancerL7Base(object): + """ + + @abc.abstractmethod +- def create_l7policy(self, context, policy): ++ def create_l7policy(self, context, l7policy): + pass + + @abc.abstractmethod +- def update_l7policy(self, context, id, policy): ++ def update_l7policy(self, context, id, l7policy): + pass + + @abc.abstractmethod +@@ -266,11 +262,11 @@ class LoadbalancerL7Base(object): + pass + + @abc.abstractmethod +- def create_l7rule(self, context, rule): ++ def create_l7rule(self, context, l7rule): + pass + + @abc.abstractmethod +- def update_l7rule(self, context, id, policy): ++ def update_l7rule(self, context, id, l7rule): + pass + + @abc.abstractmethod +@@ -286,11 +282,7 @@ class LoadbalancerL7Base(object): + pass + + @abc.abstractmethod +- def delete_l7rule(self, context, id): +- pass +- +- @abc.abstractmethod +- def create_l7policy_l7rule(self, context, rule, l7policy_id): ++ def create_l7policy_l7rule(self, context, l7rule, l7policy_id): + pass + + @abc.abstractmethod +diff --git a/neutron/services/loadbalancer/agent/agent_manager.py b/neutron/services/loadbalancer/agent/agent_manager.py +index e675cee14..3ae4aa07c 100644 +--- a/neutron/services/loadbalancer/agent/agent_manager.py ++++ b/neutron/services/loadbalancer/agent/agent_manager.py +@@ -345,9 +345,9 @@ class LbaasAgentManager(n_rpc.RpcCallback, periodic_task.PeriodicTasks): + driver = self._get_driver(l7policy['pool_id']) + driver.delete_l7policy(l7policy) + +- def update_l7rule(self, context, old_rule, rule, pool_id): ++ def update_l7rule(self, context, old_l7rule, l7rule, pool_id): + driver = self._get_driver(pool_id) +- driver.update_l7rule(old_rule, rule. pool_id) ++ driver.update_l7rule(old_l7rule, l7rule, pool_id) + + def create_l7policy_l7rule(self, context, l7policy): + driver = self._get_driver(l7policy['pool_id']) +diff --git a/neutron/services/loadbalancer/drivers/common/agent_driver_base.py b/neutron/services/loadbalancer/drivers/common/agent_driver_base.py +index d2453084d..5c24f30c3 100644 +--- a/neutron/services/loadbalancer/drivers/common/agent_driver_base.py ++++ b/neutron/services/loadbalancer/drivers/common/agent_driver_base.py +@@ -122,7 +122,7 @@ class LoadBalancerCallbacks(n_rpc.RpcCallback): + # policy and rules + retval['l7policies'] = [ + { +- 'policy': self.plugin._make_policy_dict(policy), ++ 'policy': self.plugin._make_l7policy_dict(policy), + 'rules': [ + self.plugin._make_l7rule_dict( + policy_rule_assoc.rule +@@ -540,7 +540,7 @@ class AgentDriverBase(abstract_driver.LoadBalancerAbstractDriver): + if ((old_policy['pool_id'] or policy['pool_id']) and + old_policy['pool_id'] == policy['pool_id']): + agent = self.get_pool_agent(context, policy['pool_id']) +- return self.agent_rpc.update_l7policy(context, policy, ++ return self.agent_rpc.update_l7policy(context, old_policy, policy, + agent['host']) + + # l7policy pool change, delete and create +diff --git a/neutron/services/loadbalancer/drivers/haproxy/cfg.py b/neutron/services/loadbalancer/drivers/haproxy/cfg.py +index a450a4217..0cee42c46 100644 +--- a/neutron/services/loadbalancer/drivers/haproxy/cfg.py ++++ b/neutron/services/loadbalancer/drivers/haproxy/cfg.py +@@ -47,7 +47,7 @@ STATS_MAP = { + } + + ACL_TYPE_MAP = { +- 'backServerId': 'be_id %(value)s', ++ 'backendServerId': 'srv_id', + } + + ACL_COMPARE_MAP = { +@@ -62,7 +62,7 @@ POLICY_ACTION_MAP = { + + ACTIVE_PENDING_STATUSES = qconstants.ACTIVE_PENDING_STATUSES + INACTIVE = qconstants.INACTIVE +-ACL_RULE_ID_LENGTH = 10 ++ACL_RULE_ID_LENGTH = 9 + ACL_RULE_NAME_LENGTH = 12 + + +@@ -148,13 +148,18 @@ def _get_acl_name(rule): + return ('acl_' + rule['id'])[:ACL_RULE_NAME_LENGTH] + + +-def _get_acl_member_id(rule): +- return int(('0x' + rule['value'])[:ACL_RULE_ID_LENGTH]) ++def _get_acl_member_id(id): ++ # Max id is 2**31 -1 ++ return int(('0x' + id)[:ACL_RULE_ID_LENGTH], base=16) ++ ++ ++def _update_backserver_value(rule): ++ rule['value'] = _get_acl_member_id(rule['value']) + + + def _build_acl(rule): + type_value_convert_map = { +- 'backServerId': _get_acl_member_id, ++ 'backendServerId': _update_backserver_value, + } + + acl_name = 'acl %s' % _get_acl_name(rule) +@@ -165,7 +170,7 @@ def _build_acl(rule): + acl_match = ACL_TYPE_MAP[rule['type']] % rule + acl_compare = ACL_COMPARE_MAP[rule['compare_type']] % rule + +- return ' '.jion([acl_name, acl_match, acl_compare]) ++ return ' '.join([acl_name, acl_match, acl_compare]) + + + def _build_policy_action(policy, rule): +@@ -179,13 +184,23 @@ def _build_policy_action(policy, rule): + return acl + + ++def _sort_policy_by_priority(policies): ++ def _cmp_policies(a, b): ++ return int(a['policy']['priority']) - int(b['policy']['priority']) ++ ++ policies.sort(cmp=_cmp_policies) ++ return policies ++ ++ + def _build_policy_and_acl(config): + opts = [] + need_add_server_id = False + policies = config['l7policies'] ++ policies = _sort_policy_by_priority(policies) ++ + for policy in policies: + for rule in policy['rules']: +- if rule['type'] == 'backServerId': ++ if rule['type'] == 'backendServerId': + need_add_server_id = True + + opts.append(_build_acl(rule)) +diff --git a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py +index 2db456399..7a5b315f6 100644 +--- a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py ++++ b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py +@@ -427,7 +427,7 @@ class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver): + def delete_l7policy(self, l7policy): + self._refresh_device(l7policy['pool_id']) + +- def update_l7rule(self, old_rule, rule, pool_id): ++ def update_l7rule(self, old_l7rule, l7rule, pool_id): + self._refresh_device(pool_id) + + def create_l7policy_l7rule(self, l7policy): +diff --git a/neutron/services/loadbalancer/plugin.py b/neutron/services/loadbalancer/plugin.py +index 733930f0a..bc994e14a 100644 +--- a/neutron/services/loadbalancer/plugin.py ++++ b/neutron/services/loadbalancer/plugin.py +@@ -19,6 +19,7 @@ from neutron import context + from neutron.db.loadbalancer import loadbalancer_db as ldb + from neutron.db import servicetype_db as st_db + from neutron.extensions import loadbalancer ++from neutron.extensions import loadbalancer_l7 + from neutron.openstack.common import excutils + from neutron.openstack.common import log as logging + from neutron.plugins.common import constants +@@ -356,34 +357,34 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb, + # check by action + if not action_check_map.get(policy['action'])(policy): + raise loadbalancer_l7.L7policyActionKeyValueNotSupport( +- l7policy_action=p['action'], +- l7policy_key=p['key'], +- l7policy_value=p['value'] ++ l7policy_action=policy['action'], ++ l7policy_key=policy['key'], ++ l7policy_value=policy['value'] + ) + +- def create_l7policy(self, context, policy): +- p = policy['l7policy'] ++ def create_l7policy(self, context, l7policy): ++ p = l7policy['l7policy'] + # check policy action and key/value + self._check_policy_action_key_value(p) +- p = super(LoadBalancerPlugin, self).create_l7policy(context, policy) ++ p = super(LoadBalancerPlugin, self).create_l7policy(context, l7policy) + if p['pool_id']: + driver = self._get_driver_for_pool(context, p['pool_id']) + driver.create_l7policy(context, p, p['pool_id']) + return p + +- def update_l7policy(self, context, id, policy): ++ def update_l7policy(self, context, id, l7policy): + # TODO only allow update for same pool provider +- old_policy = self.get_l7policy(context, id) +- update_policy = super(LoadBalancerPlugin, self).update_l7policy( +- context, policy) +- if update_policy['pool_id'] or old_policy['pool_id']: ++ old_l7policy = self.get_l7policy(context, id) ++ update_l7policy = super(LoadBalancerPlugin, self).update_l7policy( ++ context, id, l7policy) ++ if update_l7policy['pool_id'] or old_l7policy['pool_id']: + driver = self._get_driver_for_pool(context, +- update_policy['pool_id']) +- driver.update_l7policy(context, old_policy, update_policy) +- return update_policy ++ update_l7policy['pool_id']) ++ driver.update_l7policy(context, old_l7policy, update_l7policy) ++ return update_l7policy + + def delete_l7policy(self, context, id): +- policy = self.get_l7policy(id) ++ policy = self.get_l7policy(context, id) + super(LoadBalancerPlugin, self).delete_l7policy(context, id) + if policy['pool_id']: + driver = self._get_driver_for_pool(context, policy['pool_id']) +@@ -429,26 +430,27 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb, + l7rule_compare_value=r['compare_value'] + ) + +- def create_l7rule(self, context, rule): +- r = rule['rule'] ++ def create_l7rule(self, context, l7rule): ++ r = l7rule['l7rule'] + self._check_rule_type_key_value(context, r) + self._check_rule_compare_type_and_value(r) +- return super(LoadBalancerPlugin, self).create_l7rule(context, rule) ++ return super(LoadBalancerPlugin, self).create_l7rule(context, l7rule) + +- def update_l7rule(self, context, id, rule): +- rule_res = self.get_rule(id) +- if 'compare_value' in rule: +- rule_res['compare_value'] = rule['compare_value'] ++ def update_l7rule(self, context, id, l7rule): ++ rule_res = self.get_l7rule(context, id) ++ if 'compare_value' in l7rule['l7rule']: ++ rule_res['compare_value'] = l7rule['l7rule']['compare_value'] + self._check_rule_compare_type_and_value(rule_res) + +- if 'value' in rule: +- rule_res['value'] = rule['value'] ++ if 'value' in l7rule['l7rule']: ++ rule_res['value'] = l7rule['l7rule']['value'] + self._check_rule_type_key_value(context, rule_res) +- res = super(LoadBalancerPlugin, self).update_l7rule(context, id, rule) ++ res = super(LoadBalancerPlugin, self).update_l7rule(context, id, ++ l7rule) + + with context.session.begin(subtransactions=True): + qry = context.session.query( +- ldb.L7policyRuleAssociation ++ ldb.L7policyL7ruleAssociation + ).filter_by(rule_id=id).join(ldb.L7policy) + for assoc in qry: + if assoc.policy['pool_id']: +@@ -458,9 +460,9 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb, + assoc.policy['pool_id']) + return res + +- def create_l7policy_l7rule(self, context, rule, l7policy_id): ++ def create_l7policy_l7rule(self, context, l7rule, l7policy_id): + res = super(LoadBalancerPlugin, self).create_l7policy_l7rule( +- context, rule, l7policy_id) ++ context, l7rule, l7policy_id) + + policy = self.get_l7policy(context, l7policy_id) + if policy['pool_id']: +@@ -473,5 +475,5 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb, + context, id, l7policy_id) + policy = self.get_l7policy(context, l7policy_id) + if policy['pool_id']: +- driver = self._get_driver_for_pool(policy['pool_id']) ++ driver = self._get_driver_for_pool(context, policy['pool_id']) + driver.delete_l7policy_l7rule(context, policy, policy['pool_id']) +-- +2.12.1 + diff --git a/packaging/openstack-neutron/0094-Enable-egress-qos-to-be-set-on-floatingip-ports.patch b/packaging/openstack-neutron/0094-Enable-egress-qos-to-be-set-on-floatingip-ports.patch new file mode 100644 index 0000000..29d107f --- /dev/null +++ b/packaging/openstack-neutron/0094-Enable-egress-qos-to-be-set-on-floatingip-ports.patch @@ -0,0 +1,88 @@ +From 77c4fda6aca7685c92e465a5d09e3730045cfc56 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Wed, 8 Mar 2017 18:27:44 +0800 +Subject: [PATCH 94/94] Enable egress qos to be set on floatingip ports + +Fixes: redmine #9642 + +Signed-off-by: Hunt Xu +--- + neutron/db/qos/qos_db.py | 34 ++++++++++++++++++++++++++-------- + 1 file changed, 26 insertions(+), 8 deletions(-) + +diff --git a/neutron/db/qos/qos_db.py b/neutron/db/qos/qos_db.py +index e0b6aac67..d3c463f5d 100644 +--- a/neutron/db/qos/qos_db.py ++++ b/neutron/db/qos/qos_db.py +@@ -217,10 +217,17 @@ class QosDb(ext_qos.QosPluginBase, base_db.CommonDbMixin): + try: + if target_type == 'port': + target = self._core_plugin._get_port(context, target_id) +- if not target['device_owner'].startswith('compute'): ++ device_owner = target['device_owner'] ++ valid_port_target = False ++ if device_owner == n_constants.DEVICE_OWNER_FLOATINGIP: ++ if qos_direction == 'egress': ++ valid_port_target = True ++ elif device_owner.startswith('compute'): ++ valid_port_target = True ++ if not valid_port_target: + raise ext_qos.QosInvalidPortType( + port_id=target_id, +- port_type=target['device_owner']) ++ port_type=device_owner) + ret['port_id'] = target_id + elif target_type == 'router': + target = self._l3_plugin._get_router(context, target_id) +@@ -660,6 +667,10 @@ class QosDb(ext_qos.QosPluginBase, base_db.CommonDbMixin): + + class QosPluginRpcDbMixin(object): + ++ @staticmethod ++ def _is_owner_floatingip(device_owner): ++ return device_owner == n_constants.DEVICE_OWNER_FLOATINGIP ++ + def _get_devices_for_qos(self, qos): + if qos.router: + if qos.direction == 'egress': +@@ -677,7 +688,10 @@ class QosPluginRpcDbMixin(object): + ] + elif qos.port: + ports = [qos.port_id] +- prefix = 'qvb' if qos.direction == 'egress' else 'qvo' ++ if self._is_owner_floatingip(qos.port.device_owner): ++ prefix = 'qg-' ++ else: ++ prefix = 'qvb' if qos.direction == 'egress' else 'qvo' + return [("%s%s" % (prefix, port))[:NIC_NAME_LEN] for port in ports] + + def _make_qos_filter_dict_for_agent(self, qos_filter): +@@ -805,16 +819,20 @@ class QosPluginRpcDbMixin(object): + namespace = None + if qos.router_id: + namespace = 'qrouter-' + qos.router_id +- if namespace not in qoss_on_host: +- qoss_on_host[namespace] = [] + elif qos.port_id: +- if '_root' not in qoss_on_host: +- qoss_on_host['_root'] = [] +- namespace = '_root' ++ if self._is_owner_floatingip(qos.port.device_owner): ++ fips = self._l3_plugin.get_floatingips( ++ context, filters={'port_id': qos.port_id}) ++ if fips: ++ namespace = 'qrouter-' + fips[0]['router_id'] ++ else: ++ namespace = '_root' + + if namespace: + qos_for_agent = self._get_qos_for_agent(context, qos) + if qos_for_agent: ++ if namespace not in qoss_on_host: ++ qoss_on_host[namespace] = [] + qoss_on_host[namespace].append(qos_for_agent) + + return qoss_on_host +-- +2.12.1 + diff --git a/packaging/openstack-neutron/0095-Add-extra-action-for-lb-session-persistence.patch b/packaging/openstack-neutron/0095-Add-extra-action-for-lb-session-persistence.patch new file mode 100644 index 0000000..5d048a6 --- /dev/null +++ b/packaging/openstack-neutron/0095-Add-extra-action-for-lb-session-persistence.patch @@ -0,0 +1,317 @@ +From 95816af3ae49569e438f680757722241a0b89230 Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Wed, 29 Mar 2017 16:13:12 +0800 +Subject: [PATCH 95/97] Add extra-action for lb session persistence + +Fixes: redmine #9667 + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + neutron/api/v2/attributes.py | 15 ++++++ + neutron/db/loadbalancer/loadbalancer_db.py | 15 ++++-- + .../7dc5a7c3d759_add_extra_action_for_lb_vip.py | 40 +++++++++++++++ + .../db/migration/alembic_migrations/versions/HEAD | 2 +- + neutron/extensions/loadbalancer.py | 5 +- + .../services/loadbalancer/drivers/haproxy/cfg.py | 59 ++++++++++++++++++++-- + 6 files changed, 127 insertions(+), 9 deletions(-) + create mode 100644 neutron/db/migration/alembic_migrations/versions/7dc5a7c3d759_add_extra_action_for_lb_vip.py + +diff --git a/neutron/api/v2/attributes.py b/neutron/api/v2/attributes.py +index 267b7a428..c4a255d3a 100644 +--- a/neutron/api/v2/attributes.py ++++ b/neutron/api/v2/attributes.py +@@ -19,6 +19,7 @@ import re + from neutron.common import constants + from neutron.common import exceptions as n_exc + from neutron.openstack.common import log as logging ++from neutron.openstack.common import jsonutils + from neutron.openstack.common import uuidutils + + +@@ -103,6 +104,19 @@ def _validate_string(data, max_len=None): + return msg + + ++def _validate_json_string(data, max_len=None): ++ if data is not None: ++ msg = _validate_string(data, max_len=max_len) ++ if not msg: ++ # valid json ++ try: ++ jsonutils.loads(data) ++ except (ValueError, TypeError): ++ msg = (_("'%(data)s' is not an json string format") % ++ {'data': data}) ++ return msg ++ ++ + def _validate_boolean(data, valid_values=None): + try: + convert_to_boolean(data) +@@ -582,6 +596,7 @@ validators = {'type:dict': _validate_dict, + 'type:regex_or_none': _validate_regex_or_none, + 'type:string': _validate_string, + 'type:string_or_none': _validate_string_or_none, ++ 'type:json_string': _validate_json_string, + 'type:not_empty_string': _validate_not_empty_string, + 'type:not_empty_string_or_none': + _validate_not_empty_string_or_none, +diff --git a/neutron/db/loadbalancer/loadbalancer_db.py b/neutron/db/loadbalancer/loadbalancer_db.py +index c4addd0b5..061735b24 100644 +--- a/neutron/db/loadbalancer/loadbalancer_db.py ++++ b/neutron/db/loadbalancer/loadbalancer_db.py +@@ -49,6 +49,7 @@ class SessionPersistence(model_base.BASEV2): + name="sesssionpersistences_type"), + nullable=False) + cookie_name = sa.Column(sa.String(1024)) ++ extra_actions = sa.Column(sa.String(1024), nullable=True) + + + class PoolStatistics(model_base.BASEV2): +@@ -320,6 +321,9 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + + if vip['session_persistence']['type'] == 'APP_COOKIE': + s_p['cookie_name'] = vip['session_persistence']['cookie_name'] ++ # Make PEP8 happy ++ vip_session_persistence = vip['session_persistence'] ++ s_p['extra_actions'] = vip_session_persistence['extra_actions'] + + res['session_persistence'] = s_p + +@@ -335,9 +339,10 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + raise ValueError(_("'cookie_name' should be specified for this" + " type of session persistence.")) + else: +- if 'cookie_name' in info: +- raise ValueError(_("'cookie_name' is not allowed for this type" +- " of session persistence")) ++ if 'cookie_name' in info or 'extra_actions' in info: ++ raise ValueError(_("'cookie_name' or 'extra_actions' is not" ++ "allowed for this type" ++ "of session persistence")) + + def _create_session_persistence_db(self, session_info, vip_id): + self._check_session_persistence_info(session_info) +@@ -345,6 +350,7 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + sesspersist_db = SessionPersistence( + type=session_info['type'], + cookie_name=session_info.get('cookie_name'), ++ extra_actions=session_info.get('extra_actions'), + vip_id=vip_id) + return sesspersist_db + +@@ -362,6 +368,8 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + # an existing value in the database. + if 'cookie_name' not in info: + info['cookie_name'] = None ++ if 'extra_actions' not in info: ++ info['extra_actions'] = None + + if sesspersist_db: + sesspersist_db.update(info) +@@ -369,6 +377,7 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + sesspersist_db = SessionPersistence( + type=info['type'], + cookie_name=info['cookie_name'], ++ extra_actions=info['extra_actions'], + vip_id=vip_id) + context.session.add(sesspersist_db) + # Update vip table +diff --git a/neutron/db/migration/alembic_migrations/versions/7dc5a7c3d759_add_extra_action_for_lb_vip.py b/neutron/db/migration/alembic_migrations/versions/7dc5a7c3d759_add_extra_action_for_lb_vip.py +new file mode 100644 +index 000000000..fdf786c0e +--- /dev/null ++++ b/neutron/db/migration/alembic_migrations/versions/7dc5a7c3d759_add_extra_action_for_lb_vip.py +@@ -0,0 +1,40 @@ ++# Copyright 2017 OpenStack Foundation ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++ ++"""add extra_action for lb vip ++ ++Revision ID: 7dc5a7c3d759 ++Revises: 222931b3859d ++Create Date: 2017-02-29 23:57:10.409817 ++ ++""" ++ ++# revision identifiers, used by Alembic. ++revision = '7dc5a7c3d759' ++down_revision = '222931b3859d' ++ ++from alembic import op ++import sqlalchemy as sa ++ ++ ++def upgrade(): ++ op.add_column( ++ 'sessionpersistences', ++ sa.Column('extra_actions', sa.String(1024), nullable=True) ++ ) ++ ++ ++def downgrade(): ++ op.drop_column('sessionpersistences', 'extra_actions') +diff --git a/neutron/db/migration/alembic_migrations/versions/HEAD b/neutron/db/migration/alembic_migrations/versions/HEAD +index 34e14c767..81eed9065 100644 +--- a/neutron/db/migration/alembic_migrations/versions/HEAD ++++ b/neutron/db/migration/alembic_migrations/versions/HEAD +@@ -1 +1 @@ +-222931b3859d ++7dc5a7c3d759 +diff --git a/neutron/extensions/loadbalancer.py b/neutron/extensions/loadbalancer.py +index 1e5796788..c8a8ff1f3 100644 +--- a/neutron/extensions/loadbalancer.py ++++ b/neutron/extensions/loadbalancer.py +@@ -157,7 +157,10 @@ RESOURCE_ATTRIBUTE_MAP = { + 'SOURCE_IP'], + 'required': True}, + 'cookie_name': {'type:string': None, +- 'required': False}}}, ++ 'required': False}, ++ 'extra_actions': { ++ 'type:json_string': None, ++ 'required': False}}}, + 'is_visible': True}, + 'connection_limit': {'allow_post': True, 'allow_put': True, + 'default': -1, +diff --git a/neutron/services/loadbalancer/drivers/haproxy/cfg.py b/neutron/services/loadbalancer/drivers/haproxy/cfg.py +index 0cee42c46..ca9899ce7 100644 +--- a/neutron/services/loadbalancer/drivers/haproxy/cfg.py ++++ b/neutron/services/loadbalancer/drivers/haproxy/cfg.py +@@ -17,6 +17,7 @@ import netaddr + from six import moves + + from neutron.agent.linux import utils ++from neutron.openstack.common import jsonutils + from neutron.plugins.common import constants as qconstants + from neutron.services.loadbalancer import constants + +@@ -208,6 +209,35 @@ def _build_policy_and_acl(config): + return need_add_server_id, opts + + ++def _build_extra_action_for_member(extra_action, member): ++ opts = [] ++ ++ # extra_action format: {'set_cookie_for_member': {'max_age': 15}} ++ member_cookie_params = extra_action.get('set_cookie_for_member') ++ if member_cookie_params and 'max_age' in member_cookie_params: ++ # build acl and policy ++ # set cookie for member acl and policy template ++ rule = { ++ 'id': member['id'], ++ 'value': member['id'], ++ 'type': 'backendServerId', ++ 'compare_type': 'integerEq', ++ 'compare_value': _get_acl_member_id(member['id']), ++ } ++ policy = { ++ 'action': 'addHeader', ++ 'value': ( ++ 'Set-Cookie: %(cookie_name)s=%(id)s; Max-Age=%(max_age)s' % ++ {'cookie_name': extra_action['cookie_name'], ++ 'id': member['id'], ++ 'max_age': member_cookie_params['max_age']}), ++ } ++ opts.append(_build_acl(rule)) ++ opts.append(_build_policy_action(policy, rule)) ++ ++ return opts ++ ++ + def _build_backend(config): + protocol = config['pool']['protocol'] + lb_method = config['pool']['lb_method'] +@@ -216,6 +246,7 @@ def _build_backend(config): + 'mode %s' % PROTOCOL_MAP[protocol], + 'balance %s' % BALANCE_MAP.get(lb_method, 'roundrobin') + ] ++ extra_opts = [] + + if protocol == constants.PROTOCOL_HTTP: + opts.append('option forwardfor') +@@ -225,7 +256,7 @@ def _build_backend(config): + opts.extend(health_opts) + + # add session persistence (if available) +- persist_opts = _get_session_persistence(config) ++ extra_action, persist_opts = _get_session_persistence(config) + opts.extend(persist_opts) + + # backup members need resort +@@ -235,6 +266,7 @@ def _build_backend(config): + opts.extend(policy_opts) + + # add the members ++ member_opts = [] + for member in config['members']: + if ((member['status'] in ACTIVE_PENDING_STATUSES or + member['status'] == INACTIVE) +@@ -244,12 +276,23 @@ def _build_backend(config): + if member['priority'] < 256: + server += ' backup' + ++ if extra_action: ++ extra_opts.extend( ++ _build_extra_action_for_member(extra_action, member) ++ ) ++ need_server_id = True ++ + if need_server_id: + server += ' id %d' % _get_acl_member_id(member['id']) + + if _has_http_cookie_persistence(config): + server += ' cookie %d' % config['members'].index(member) +- opts.append(server) ++ member_opts.append(server) ++ ++ # add extra action opts ++ opts.extend(extra_opts) ++ # add member opts ++ opts.extend(member_opts) + + return itertools.chain( + ['backend %s' % config['pool']['id']], +@@ -295,8 +338,10 @@ def _get_server_health_option(config): + + def _get_session_persistence(config): + persistence = config['vip'].get('session_persistence') ++ extra_action = {} ++ + if not persistence: +- return [] ++ return extra_action, [] + + opts = [] + if persistence['type'] == constants.SESSION_PERSISTENCE_SOURCE_IP: +@@ -310,7 +355,13 @@ def _get_session_persistence(config): + opts.append('appsession %s len 56 timeout 3h' % + persistence['cookie_name']) + +- return opts ++ # convert to dict if exists ++ if persistence.get('extra_actions'): ++ extra_action = jsonutils.loads(persistence.get('extra_actions')) ++ # push cookie_name to extra_action ++ extra_action['cookie_name'] = persistence.get('cookie_name') ++ ++ return extra_action, opts + + + def _has_http_cookie_persistence(config): +-- +2.12.2 + diff --git a/packaging/openstack-neutron/0096-iptables_firewall-minor-fix-for-_setup_metering_chai.patch b/packaging/openstack-neutron/0096-iptables_firewall-minor-fix-for-_setup_metering_chai.patch new file mode 100644 index 0000000..6eb2390 --- /dev/null +++ b/packaging/openstack-neutron/0096-iptables_firewall-minor-fix-for-_setup_metering_chai.patch @@ -0,0 +1,31 @@ +From 76ec0802797e21aadc4c3d730b99d87223c4c9e9 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Thu, 23 Mar 2017 20:14:48 +0800 +Subject: [PATCH 96/97] iptables_firewall: minor fix for _setup_metering_chains + +Fixes: redmine #9154 +Fixes: deaf40836 ("iptables_firewall: add firewall rules to meter instance +port stats") +Fixes: 9c324b301 ("iptables_firewall: use wrap chains and rules for metering) + +Signed-off-by: Hunt Xu +--- + neutron/agent/linux/iptables_firewall.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py +index c755cb99b..b0bfb208a 100644 +--- a/neutron/agent/linux/iptables_firewall.py ++++ b/neutron/agent/linux/iptables_firewall.py +@@ -181,7 +181,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver): + def _setup_metering_chains(self, port, direction, chain_name): + if not port['device_owner'].startswith('compute:'): + # Only meter instances' ports +- return '$' + chain_name ++ return chain_name + # Only support IPv4 + chains = self._metering_chain_names(port, direction) + for m_chain_name in chains: +-- +2.12.2 + diff --git a/packaging/openstack-neutron/0097-Validate-http_method-and-url_path-for-lbaas-health-m.patch b/packaging/openstack-neutron/0097-Validate-http_method-and-url_path-for-lbaas-health-m.patch new file mode 100644 index 0000000..cb0f7e6 --- /dev/null +++ b/packaging/openstack-neutron/0097-Validate-http_method-and-url_path-for-lbaas-health-m.patch @@ -0,0 +1,85 @@ +From fe606e05ab63cfa566022bdea108e7b88d1c58df Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Wed, 19 Apr 2017 11:17:48 +0800 +Subject: [PATCH 97/97] Validate http_method and url_path for lbaas health + monitor + +Inspiration from https://review.openstack.org/#/c/270629/ + +Fixes: redmine #9861 + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + neutron/extensions/loadbalancer.py | 7 +++++-- + neutron/services/loadbalancer/constants.py | 26 ++++++++++++++++++++++++++ + 2 files changed, 31 insertions(+), 2 deletions(-) + +diff --git a/neutron/extensions/loadbalancer.py b/neutron/extensions/loadbalancer.py +index 1e5796788..cbdf495ae 100644 +--- a/neutron/extensions/loadbalancer.py ++++ b/neutron/extensions/loadbalancer.py +@@ -26,6 +26,7 @@ from neutron.common import exceptions as qexception + from neutron import manager + from neutron.plugins.common import constants + from neutron.services import service_base ++from neutron.services.loadbalancer import constants as lb_const + + + # Loadbalancer Exceptions +@@ -283,11 +284,13 @@ RESOURCE_ATTRIBUTE_MAP = { + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'http_method': {'allow_post': True, 'allow_put': True, +- 'validate': {'type:string': None}, ++ 'validate': {'type:values': ++ lb_const.SUPPORTED_HTTP_METHODS}, + 'default': 'GET', + 'is_visible': True}, + 'url_path': {'allow_post': True, 'allow_put': True, +- 'validate': {'type:string': None}, ++ 'validate': {'type:regex_or_none': ++ lb_const.SUPPORTED_URL_PATH}, + 'default': '/', + 'is_visible': True}, + 'expected_codes': {'allow_post': True, 'allow_put': True, +diff --git a/neutron/services/loadbalancer/constants.py b/neutron/services/loadbalancer/constants.py +index 0f834467b..5e414fd43 100644 +--- a/neutron/services/loadbalancer/constants.py ++++ b/neutron/services/loadbalancer/constants.py +@@ -26,6 +26,32 @@ HEALTH_MONITOR_TCP = 'TCP' + HEALTH_MONITOR_HTTP = 'HTTP' + HEALTH_MONITOR_HTTPS = 'HTTPS' + ++HTTP_METHOD_GET = 'GET' ++HTTP_METHOD_HEAD = 'HEAD' ++HTTP_METHOD_POST = 'POST' ++HTTP_METHOD_PUT = 'PUT' ++HTTP_METHOD_DELETE = 'DELETE' ++HTTP_METHOD_TRACE = 'TRACE' ++HTTP_METHOD_OPTIONS = 'OPTIONS' ++HTTP_METHOD_CONNECT = 'CONNECT' ++HTTP_METHOD_PATCH = 'PATCH' ++ ++SUPPORTED_HTTP_METHODS = (HTTP_METHOD_GET, HTTP_METHOD_HEAD, HTTP_METHOD_POST, ++ HTTP_METHOD_PUT, HTTP_METHOD_DELETE, ++ HTTP_METHOD_TRACE, HTTP_METHOD_OPTIONS, ++ HTTP_METHOD_CONNECT, HTTP_METHOD_PATCH) ++ ++# URL path regex according to RFC 3986 ++# Format: path = "/" *( "/" segment ) ++# segment = *pchar ++# pchar = unreserved / pct-encoded / sub-delims / ":" / "@" ++# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" ++# pct-encoded = "%" HEXDIG HEXDIG ++# sub-delims = "!" / "$" / "&" / "'" / "(" / ")" ++# / "*" / "+" / "," / ";" / "=" ++SUPPORTED_URL_PATH = ( ++ "^(/([a-zA-Z0-9-._~!$&\'()*+,;=:@]|(%[a-fA-F0-9]{2}))*)+$") ++ + SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP' + SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE' + SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE' +-- +2.12.2 + diff --git a/packaging/openstack-neutron/0098-Enable-ES-port-metering-on-all-sg-enabled-ports.patch b/packaging/openstack-neutron/0098-Enable-ES-port-metering-on-all-sg-enabled-ports.patch new file mode 100644 index 0000000..51e0700 --- /dev/null +++ b/packaging/openstack-neutron/0098-Enable-ES-port-metering-on-all-sg-enabled-ports.patch @@ -0,0 +1,29 @@ +From 9703f20001559049153be54bdae656d082e54661 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Fri, 28 Apr 2017 09:45:47 +0800 +Subject: [PATCH] Enable ES port metering on all sg-enabled ports + +Fixes: redmine #9968 + +Signed-off-by: Hunt Xu +--- + neutron/agent/linux/iptables_firewall.py | 3 --- + 1 file changed, 3 deletions(-) + +diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py +index b0bfb208a..8297fdf14 100644 +--- a/neutron/agent/linux/iptables_firewall.py ++++ b/neutron/agent/linux/iptables_firewall.py +@@ -179,9 +179,6 @@ class IptablesFirewallDriver(firewall.FirewallDriver): + return port['device'] + + def _setup_metering_chains(self, port, direction, chain_name): +- if not port['device_owner'].startswith('compute:'): +- # Only meter instances' ports +- return chain_name + # Only support IPv4 + chains = self._metering_chain_names(port, direction) + for m_chain_name in chains: +-- +2.12.2 + diff --git a/packaging/openstack-neutron/0099-Configuration-option-for-whether-to-use-ES-port-mete.patch b/packaging/openstack-neutron/0099-Configuration-option-for-whether-to-use-ES-port-mete.patch new file mode 100644 index 0000000..792aadb --- /dev/null +++ b/packaging/openstack-neutron/0099-Configuration-option-for-whether-to-use-ES-port-mete.patch @@ -0,0 +1,46 @@ +From 211de1c34cc858394a9bc0a6ac77b1891bf86f86 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Fri, 28 Apr 2017 11:24:21 +0800 +Subject: [PATCH] Configuration option for whether to use ES port metering + +Fixes: redmine #9970 + +Signed-off-by: Hunt Xu +--- + neutron/agent/linux/iptables_firewall.py | 2 ++ + neutron/agent/securitygroups_rpc.py | 6 +++++- + 2 files changed, 7 insertions(+), 1 deletion(-) + +diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py +index 8297fdf14..a6254d88d 100644 +--- a/neutron/agent/linux/iptables_firewall.py ++++ b/neutron/agent/linux/iptables_firewall.py +@@ -179,6 +179,8 @@ class IptablesFirewallDriver(firewall.FirewallDriver): + return port['device'] + + def _setup_metering_chains(self, port, direction, chain_name): ++ if not cfg.CONF.SECURITYGROUP.enable_es_port_metering: ++ return chain_name + # Only support IPv4 + chains = self._metering_chain_names(port, direction) + for m_chain_name in chains: +diff --git a/neutron/agent/securitygroups_rpc.py b/neutron/agent/securitygroups_rpc.py +index 892011ab2..53f35d54b 100644 +--- a/neutron/agent/securitygroups_rpc.py ++++ b/neutron/agent/securitygroups_rpc.py +@@ -48,7 +48,11 @@ security_group_opts = [ + '10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16', # RFC 1918 + '169.254.0.0/16' # RFC 3927 + ], +- help=_('IP addresses that should be recognized as private.')) ++ help=_('IP addresses that should be recognized as private.')), ++ cfg.BoolOpt( ++ 'enable_es_port_metering', ++ default=False, ++ help=_('Whether to enable extra iptables rules for port metering.')), + ] + cfg.CONF.register_opts(security_group_opts, 'SECURITYGROUP') + +-- +2.12.2 + diff --git a/packaging/openstack-neutron/0100-Fix-enable-update-l7policy-value-attribute.patch b/packaging/openstack-neutron/0100-Fix-enable-update-l7policy-value-attribute.patch new file mode 100644 index 0000000..fff68b8 --- /dev/null +++ b/packaging/openstack-neutron/0100-Fix-enable-update-l7policy-value-attribute.patch @@ -0,0 +1,29 @@ +From 8f351075ad1f143d6201fc5059a3cd10d2de73f9 Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Wed, 3 May 2017 16:19:23 +0800 +Subject: [PATCH] Fix enable update l7policy value attribute + +Fixes: redmine #9989 + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + neutron/extensions/loadbalancer_l7.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/neutron/extensions/loadbalancer_l7.py b/neutron/extensions/loadbalancer_l7.py +index f3d369a18..7f92c9d0b 100644 +--- a/neutron/extensions/loadbalancer_l7.py ++++ b/neutron/extensions/loadbalancer_l7.py +@@ -95,7 +95,7 @@ RESOURCE_ATTRIBUTE_MAP = { + 'key': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string_or_none': None}, + 'default': None, 'is_visible': True}, +- 'value': {'allow_post': True, 'allow_put': False, ++ 'value': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string_or_none': None}, + 'default': None, 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, +-- +2.12.2 + diff --git a/packaging/openstack-neutron/0101-l3_db-update-GatewayInUseByFloatingIp-check.patch b/packaging/openstack-neutron/0101-l3_db-update-GatewayInUseByFloatingIp-check.patch new file mode 100644 index 0000000..878cafe --- /dev/null +++ b/packaging/openstack-neutron/0101-l3_db-update-GatewayInUseByFloatingIp-check.patch @@ -0,0 +1,45 @@ +From 7df501d0af9055a8904433f6a18015f3d9837102 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Wed, 3 May 2017 18:38:17 +0800 +Subject: [PATCH] l3_db: update GatewayInUseByFloatingIp check + +With EayunStack floatingip mechanism, floatingip no longer depend on +router gateway. + +As with EayunStack floatingip mechanism, the floatingip port will be set +up on hosts and thus its status will be ACTIVE. So we do the filter +using these ports' statuses to identify whether a router gateway is +actually in use by any floatingip. + +Fixes: redmine #9990 + +Signed-off-by: Hunt Xu +--- + neutron/db/l3_db.py | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/neutron/db/l3_db.py b/neutron/db/l3_db.py +index f535ac90e..d5ccc1d91 100644 +--- a/neutron/db/l3_db.py ++++ b/neutron/db/l3_db.py +@@ -342,9 +342,14 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): + if not port_requires_deletion: + return + admin_ctx = context.elevated() +- +- if self.get_floatingips_count( +- admin_ctx, {'router_id': [router_id]}): ++ fip_qry = context.session.query(FloatingIP) ++ fip_qry = fip_qry.join( ++ models_v2.Port, ++ FloatingIP.floating_port_id == models_v2.Port.id) ++ fip_qry = fip_qry.filter( ++ models_v2.Port.status == l3_constants.PORT_STATUS_DOWN, ++ FloatingIP.router_id == router_id) ++ if fip_qry.all(): + raise l3.RouterExternalGatewayInUseByFloatingIp( + router_id=router_id, net_id=router.gw_port['network_id']) + with context.session.begin(subtransactions=True): +-- +2.12.2 + diff --git a/packaging/openstack-neutron/0102-Fix-fip-port-qos-namespace-selection-in-sync_qos.patch b/packaging/openstack-neutron/0102-Fix-fip-port-qos-namespace-selection-in-sync_qos.patch new file mode 100644 index 0000000..3061039 --- /dev/null +++ b/packaging/openstack-neutron/0102-Fix-fip-port-qos-namespace-selection-in-sync_qos.patch @@ -0,0 +1,44 @@ +From c62b21be60a80a57475323433aa4060ba1aad5d8 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Fri, 5 May 2017 18:07:51 +0800 +Subject: [PATCH] Fix fip port qos namespace selection in sync_qos + +Fixes: redmine #10008 + +Signed-off-by: Hunt Xu +--- + neutron/db/qos/qos_db.py | 11 +++++++---- + 1 file changed, 7 insertions(+), 4 deletions(-) + +diff --git a/neutron/db/qos/qos_db.py b/neutron/db/qos/qos_db.py +index d3c463f5d..b3a465ccb 100644 +--- a/neutron/db/qos/qos_db.py ++++ b/neutron/db/qos/qos_db.py +@@ -25,6 +25,7 @@ from neutron.db import common_db_mixin as base_db + from neutron.db import l3_agentschedulers_db as l3_agent_db + from neutron.db import l3_db + from neutron.extensions import agent as ext_agent ++from neutron.extensions import l3 as ext_l3 + from neutron.extensions import qos as ext_qos + from neutron.openstack.common import uuidutils + from neutron.openstack.common import log as logging +@@ -821,10 +822,12 @@ class QosPluginRpcDbMixin(object): + namespace = 'qrouter-' + qos.router_id + elif qos.port_id: + if self._is_owner_floatingip(qos.port.device_owner): +- fips = self._l3_plugin.get_floatingips( +- context, filters={'port_id': qos.port_id}) +- if fips: +- namespace = 'qrouter-' + fips[0]['router_id'] ++ try: ++ fip = self._l3_plugin.get_floatingip( ++ context, qos.port.device_id) ++ except ext_l3.FloatingIPNotFound: ++ continue ++ namespace = 'qrouter-' + fip['router_id'] + else: + namespace = '_root' + +-- +2.12.2 + diff --git a/packaging/openstack-neutron/0103-ES-fip-setup-ip-rule-for-floatingip-itself.patch b/packaging/openstack-neutron/0103-ES-fip-setup-ip-rule-for-floatingip-itself.patch new file mode 100644 index 0000000..4580376 --- /dev/null +++ b/packaging/openstack-neutron/0103-ES-fip-setup-ip-rule-for-floatingip-itself.patch @@ -0,0 +1,90 @@ +From 59faba8b196cb3d70033ad4f24ee565be7d369c4 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Wed, 3 May 2017 12:33:12 +0800 +Subject: [PATCH] ES fip: setup ip rule for floatingip itself + +Floatingip needs to respond to arp requests, thus a specific ip rule is +needed for rp_filter to be passed even though the router is not +connected to the external network. + +Before this commit, we set rp_filter to 1. However that is insufficient +because when the router is not connected to external, rp_filter will +still fail. + +As a result of this change, we can now use RFC3074 strict mode reverse +path filtering because packets using the floatingip as its source will +now lookup the route table for that floatingip. This brings some +security improvements. Also, sending gratuitous arp packets upon +floatingip set up is no longer required. + +Fixes: redmine #9982 + +Signed-off-by: Hunt Xu +--- + neutron/agent/l3_agent.py | 22 ++++++++++++---------- + 1 file changed, 12 insertions(+), 10 deletions(-) + +diff --git a/neutron/agent/l3_agent.py b/neutron/agent/l3_agent.py +index 413dbbd51..86d87735e 100644 +--- a/neutron/agent/l3_agent.py ++++ b/neutron/agent/l3_agent.py +@@ -731,9 +731,9 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, + # interfaces on the same subnet + ip_wrapper.netns.execute(['sysctl', '-w', + 'net.ipv4.conf.default.arp_ignore=1']) +- # RFC3704 Loose Reverse Path ++ # RFC3704 Strict Reverse Path + ip_wrapper.netns.execute(['sysctl', '-w', +- 'net.ipv4.conf.default.rp_filter=2']) ++ 'net.ipv4.conf.default.rp_filter=1']) + + def _create_router_namespace(self, ri): + self._create_namespace(ri.ns_name) +@@ -1236,17 +1236,19 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, + + return fip_statuses + +- def _es_process_ip_rules(self, ri, fixed_addrs): ++ def _es_process_ip_rules(self, ri, fip_map): + ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + existing_ips = ns_ipr.list_from_rules() ++ fixed_ips = fip_map.keys() ++ floating_ips = fip_map.values() + +- for ip in existing_ips - fixed_addrs: +- table = netaddr.IPNetwork(ip).value +- ns_ipr.delete_rule_from(ip, table) ++ for ip in existing_ips - set(fixed_ips + floating_ips): ++ ns_ipr.delete_rule_from(ip, None) + +- for ip in fixed_addrs - existing_ips: ++ for ip in set(fixed_ips) - existing_ips: + table = netaddr.IPNetwork(ip).value + ns_ipr.add_rule_from(ip, table) ++ ns_ipr.add_rule_from(fip_map[ip], table) + + def _es_add_floating_ip(self, ri, fip): + addr_added = False +@@ -1295,8 +1297,6 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, + device.route.add_gateway(ex_gateway, table=table) + # Don't touch the main table + device.route.delete_onlink_route(fip_subnet) +- self._send_gratuitous_arp_packet( +- ri.ns_name, interface_name, fip['floating_ip_address']) + + ri.es_fips_dict[fip['id']] = fip + +@@ -1325,7 +1325,9 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, + floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) + + self._es_process_ip_rules( +- ri, set([fip['fixed_ip_address'] for fip in floating_ips])) ++ ri, { ++ fip['fixed_ip_address']: fip['floating_ip_address'] ++ for fip in floating_ips}) + + for fip in floating_ips: + fip_statuses[fip['id']] = self._es_add_floating_ip(ri, fip) +-- +2.12.2 + diff --git a/packaging/openstack-neutron/0104-Fix-error-when-update-l7policy-with-pool_id-None.patch b/packaging/openstack-neutron/0104-Fix-error-when-update-l7policy-with-pool_id-None.patch new file mode 100644 index 0000000..5b50e68 --- /dev/null +++ b/packaging/openstack-neutron/0104-Fix-error-when-update-l7policy-with-pool_id-None.patch @@ -0,0 +1,34 @@ +From 16f54067865ce61bb42046597403514623d8e120 Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Thu, 4 May 2017 18:18:07 +0800 +Subject: [PATCH] Fix error when update l7policy with pool_id None + +Fixes: redmine #9998 + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + neutron/services/loadbalancer/plugin.py | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/neutron/services/loadbalancer/plugin.py b/neutron/services/loadbalancer/plugin.py +index bc994e14a..a6bdbfd2b 100644 +--- a/neutron/services/loadbalancer/plugin.py ++++ b/neutron/services/loadbalancer/plugin.py +@@ -377,9 +377,10 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb, + old_l7policy = self.get_l7policy(context, id) + update_l7policy = super(LoadBalancerPlugin, self).update_l7policy( + context, id, l7policy) +- if update_l7policy['pool_id'] or old_l7policy['pool_id']: +- driver = self._get_driver_for_pool(context, +- update_l7policy['pool_id']) ++ ++ pool_id = update_l7policy['pool_id'] or old_l7policy['pool_id'] ++ if pool_id: ++ driver = self._get_driver_for_pool(context, pool_id) + driver.update_l7policy(context, old_l7policy, update_l7policy) + return update_l7policy + +-- +2.12.2 + diff --git a/packaging/openstack-neutron/0105-es-metering-fix-port-selection-when-tcp_port-is-spec.patch b/packaging/openstack-neutron/0105-es-metering-fix-port-selection-when-tcp_port-is-spec.patch new file mode 100644 index 0000000..a561bea --- /dev/null +++ b/packaging/openstack-neutron/0105-es-metering-fix-port-selection-when-tcp_port-is-spec.patch @@ -0,0 +1,38 @@ +From 3281feb860dde54befa07f0aacf99c2190a819fe Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Thu, 11 May 2017 11:46:20 +0800 +Subject: [PATCH] es-metering: fix port selection when tcp_port is specified + +Fixes: redmine #10055 + +Signed-off-by: Hunt Xu +--- + neutron/services/metering/drivers/iptables/es_iptables_driver.py | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/neutron/services/metering/drivers/iptables/es_iptables_driver.py b/neutron/services/metering/drivers/iptables/es_iptables_driver.py +index 5854a13f7..9c2040053 100644 +--- a/neutron/services/metering/drivers/iptables/es_iptables_driver.py ++++ b/neutron/services/metering/drivers/iptables/es_iptables_driver.py +@@ -109,15 +109,17 @@ class EsIptablesMeteringDriver(iptables_driver.IptablesMeteringDriver): + if label['direction'] == 'ingress': + rule_parts += ['-m mark --mark %s' % ES_METERING_MARK] + rule_dir = '-d' ++ port_selector = '--dport' + else: + rule_parts += ['-o %s+' % iptables_driver.EXTERNAL_DEV_PREFIX] + rule_dir = '-s' ++ port_selector = '--sport' + + if label['internal_ip'] is not None: + rule_parts += ['%s %s' % (rule_dir, label['internal_ip'])] + + if label['tcp_port'] is not None: +- rule_parts += ['-p tcp --dport %s' % label['tcp_port']] ++ rule_parts += ['-p tcp %s %s' % (port_selector, label['tcp_port'])] + + rule_parts += ['-j %s' % label_chain] + +-- +2.13.0 + diff --git a/packaging/openstack-neutron/0106-Optimize-haproxy-driver-port_to_pool_id-dict.patch b/packaging/openstack-neutron/0106-Optimize-haproxy-driver-port_to_pool_id-dict.patch new file mode 100644 index 0000000..aaca8d1 --- /dev/null +++ b/packaging/openstack-neutron/0106-Optimize-haproxy-driver-port_to_pool_id-dict.patch @@ -0,0 +1,40 @@ +From 50fba6053e3a90d75e39c9828c3e4ff08b388a64 Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Thu, 11 May 2017 10:55:12 +0800 +Subject: [PATCH] Optimize haproxy driver port_to_pool_id dict + +Fixes: redmine #10056 + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py +index 7a5b315f6..1d72b3093 100644 +--- a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py ++++ b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py +@@ -121,8 +121,8 @@ class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver): + # remember the pool<>port mapping + self.pool_to_port_id[pool_id] = logical_config['vip']['port']['id'] + if port_id not in self.port_to_pool_id: +- self.port_to_pool_id[port_id] = [] +- self.port_to_pool_id[port_id].append(pool_id) ++ self.port_to_pool_id[port_id] = set() ++ self.port_to_pool_id[port_id].add(pool_id) + + @n_utils.synchronized('haproxy-driver') + def undeploy_instance(self, pool_id, cleanup_namespace=False): +@@ -138,7 +138,7 @@ class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver): + namespace = get_ns_name(port_id) + ns = ip_lib.IPWrapper(self.root_helper, namespace) + +- self.port_to_pool_id[port_id].remove(pool_id) ++ self.port_to_pool_id[port_id].discard(pool_id) + if not self.port_to_pool_id[port_id]: + # last pool deteled + self._unplug(namespace, port_id) +-- +2.13.0 + diff --git a/packaging/openstack-neutron/0107-OpenFlow-EW-DVR-be-more-torelant-when-syncing-dvr-po.patch b/packaging/openstack-neutron/0107-OpenFlow-EW-DVR-be-more-torelant-when-syncing-dvr-po.patch new file mode 100644 index 0000000..9cdb917 --- /dev/null +++ b/packaging/openstack-neutron/0107-OpenFlow-EW-DVR-be-more-torelant-when-syncing-dvr-po.patch @@ -0,0 +1,35 @@ +From b6086598bba039777d503d28772b0184d6731004 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Fri, 12 May 2017 11:06:30 +0800 +Subject: [PATCH] OpenFlow EW DVR: be more torelant when syncing dvr ports + +TrivialFix +Related: redmine #9118 + +Signed-off-by: Hunt Xu +--- + neutron/plugins/openvswitch/agent/openflow_ew_dvr_agent.py | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/neutron/plugins/openvswitch/agent/openflow_ew_dvr_agent.py b/neutron/plugins/openvswitch/agent/openflow_ew_dvr_agent.py +index c1cab1dfd..1dfce3c15 100644 +--- a/neutron/plugins/openvswitch/agent/openflow_ew_dvr_agent.py ++++ b/neutron/plugins/openvswitch/agent/openflow_ew_dvr_agent.py +@@ -373,8 +373,12 @@ class OFEWDVRAgent(object): + + LOG.debug("Started OpenFlow EW DVR sync_dvr_ports()") + +- sync_dvrs = self.plugin_rpc.get_openflow_ew_dvrs( +- self.context, self.host) ++ try: ++ sync_dvrs = self.plugin_rpc.get_openflow_ew_dvrs( ++ self.context, self.host) ++ except Exception: ++ LOG.exception("Error syncing dvr ports") ++ return + LOG.debug("L2 Agent OF-EW DVR: Received response for " + "get_openflow_ew_dvrs() from plugin: %r", sync_dvrs) + +-- +2.13.0 + diff --git a/packaging/openstack-neutron/0108-Add-check-if-extra-actions-params-is-correct.patch b/packaging/openstack-neutron/0108-Add-check-if-extra-actions-params-is-correct.patch new file mode 100644 index 0000000..24d2ee1 --- /dev/null +++ b/packaging/openstack-neutron/0108-Add-check-if-extra-actions-params-is-correct.patch @@ -0,0 +1,99 @@ +From 9e5332c0dec70a02535c1b00e14777f877731a11 Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Wed, 31 May 2017 17:30:42 +0800 +Subject: [PATCH] Add check if extra actions params is correct + +Fixes: redmine #10217 + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + neutron/db/loadbalancer/loadbalancer_db.py | 32 ++++++++++++++++++++++++++++++ + neutron/extensions/loadbalancer.py | 14 +++++++++++++ + 2 files changed, 46 insertions(+) + +diff --git a/neutron/db/loadbalancer/loadbalancer_db.py b/neutron/db/loadbalancer/loadbalancer_db.py +index 061735b24..1b2a368a6 100644 +--- a/neutron/db/loadbalancer/loadbalancer_db.py ++++ b/neutron/db/loadbalancer/loadbalancer_db.py +@@ -29,6 +29,7 @@ from neutron.extensions import loadbalancer + from neutron.extensions import loadbalancer_l7 + from neutron import manager + from neutron.openstack.common import excutils ++from neutron.openstack.common import jsonutils + from neutron.openstack.common import log as logging + from neutron.openstack.common import uuidutils + from neutron.plugins.common import constants +@@ -329,6 +330,35 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + + return self._fields(res, fields) + ++ def _check_extra_action_info(self, action_info): ++ action_info = jsonutils.loads(action_info) ++ if not isinstance(action_info, dict): ++ raise loadbalancer.ExtraActionsInvalid() ++ ++ # no operation ++ def _noop_and_warning(key, value): ++ LOG.warning(_("Not supported action key %(key)s " ++ "and value %(value)s."), ++ {'key': key, 'value': value}) ++ ++ def _check_max_age(key, value): ++ if not isinstance(value, dict): ++ raise loadbalancer.ExtraActionsSetCookieForMemberInvalid() ++ if 'max_age' in value: ++ try: ++ int(value.get('max_age')) ++ except ValueError: ++ raise loadbalancer.ExtraActionsMaxAgeInvalid( ++ max_age=value.get('max_age')) ++ else: ++ raise loadbalancer.ExtraActionsSetCookieForMemberInvalid() ++ ++ support_actions = { ++ 'set_cookie_for_member': _check_max_age ++ } ++ for k, v in action_info.iteritems(): ++ support_actions.get(k, _noop_and_warning)(k, v) ++ + def _check_session_persistence_info(self, info): + """Performs sanity check on session persistence info. + +@@ -338,6 +368,8 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + if not info.get('cookie_name'): + raise ValueError(_("'cookie_name' should be specified for this" + " type of session persistence.")) ++ if info.get('extra_actions'): ++ self._check_extra_action_info(info.get('extra_actions')) + else: + if 'cookie_name' in info or 'extra_actions' in info: + raise ValueError(_("'cookie_name' or 'extra_actions' is not" +diff --git a/neutron/extensions/loadbalancer.py b/neutron/extensions/loadbalancer.py +index 3c6ecb8ff..5f3589681 100644 +--- a/neutron/extensions/loadbalancer.py ++++ b/neutron/extensions/loadbalancer.py +@@ -46,6 +46,20 @@ class VipExists(qexception.NeutronException): + message = _("Another Vip already exists for pool %(pool_id)s") + + ++class ExtraActionsInvalid(qexception.BadRequest): ++ message = _("Extra action shoud be a JSON object") ++ ++ ++class ExtraActionsSetCookieForMemberInvalid(qexception.BadRequest): ++ message = _("Extra action set_cookie_for_member " ++ "shoud be a JSON object") ++ ++ ++class ExtraActionsMaxAgeInvalid(qexception.BadRequest): ++ message = _("Extra action set_cookie_for_member " ++ "with invalid max_age %(max_age)s") ++ ++ + class PoolNotFound(qexception.NotFound): + message = _("Pool %(pool_id)s could not be found") + +-- +2.13.0 + diff --git a/packaging/openstack-neutron/0109-Add-status_changed-notification-for-some-components.patch b/packaging/openstack-neutron/0109-Add-status_changed-notification-for-some-components.patch new file mode 100644 index 0000000..662778b --- /dev/null +++ b/packaging/openstack-neutron/0109-Add-status_changed-notification-for-some-components.patch @@ -0,0 +1,225 @@ +From 05799039082848c3bbae36e9e8b3e89536ec0ced Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Thu, 25 May 2017 18:58:04 +0800 +Subject: [PATCH 109/118] Add status_changed notification for some components + +Supported components: + * Firewall + * Loadbalancer Pool + * Loadbalancer VIP + * Loadbalancer Member + * Loadbalancer Health Monitor + * VPN Service + * IPsec Site Connection + * Port used for PPTP VPN + +Fixes: redmine #10220 + +Signed-off-by: Hunt Xu +--- + neutron/db/loadbalancer/loadbalancer_db.py | 2 + + neutron/db/vpn/vpn_db.py | 4 + + neutron/notifiers/eayun.py | 93 ++++++++++++++++++++++ + neutron/services/firewall/fwaas_plugin.py | 2 + + .../drivers/common/agent_driver_base.py | 2 + + 5 files changed, 103 insertions(+) + create mode 100644 neutron/notifiers/eayun.py + +diff --git a/neutron/db/loadbalancer/loadbalancer_db.py b/neutron/db/loadbalancer/loadbalancer_db.py +index 061735b24..9598a1233 100644 +--- a/neutron/db/loadbalancer/loadbalancer_db.py ++++ b/neutron/db/loadbalancer/loadbalancer_db.py +@@ -28,6 +28,7 @@ from neutron.db import servicetype_db as st_db + from neutron.extensions import loadbalancer + from neutron.extensions import loadbalancer_l7 + from neutron import manager ++from neutron.notifiers.eayun import eayun_notify + from neutron.openstack.common import excutils + from neutron.openstack.common import jsonutils + from neutron.openstack.common import log as logging +@@ -243,6 +244,7 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + def _core_plugin(self): + return manager.NeutronManager.get_plugin() + ++ @eayun_notify('LB_MEMBER', Member) + def update_status(self, context, model, id, status, + status_description=None): + with context.session.begin(subtransactions=True): +diff --git a/neutron/db/vpn/vpn_db.py b/neutron/db/vpn/vpn_db.py +index d50d4bd6f..6034bbc3a 100644 +--- a/neutron/db/vpn/vpn_db.py ++++ b/neutron/db/vpn/vpn_db.py +@@ -30,6 +30,7 @@ from neutron.db import servicetype_db as st_db + from neutron.db.vpn import vpn_validator + from neutron.extensions import vpnaas + from neutron import manager ++from neutron.notifiers.eayun import eayun_notify + from neutron.openstack.common import excutils + from neutron.openstack.common import log as logging + from neutron.openstack.common import uuidutils +@@ -805,6 +806,7 @@ class VPNPluginRpcDbMixin(): + l3_agent_db.RouterL3AgentBinding.l3_agent_id == agent.id) + return query + ++ @eayun_notify(constants.VPN) + def update_status_by_agent(self, context, service_status_info_list): + """Updating vpnservice and vpnconnection status. + +@@ -842,6 +844,7 @@ class VPNPluginRpcDbMixin(): + context, conn_id, conn['status'], + conn['updated_pending_status']) + ++ @eayun_notify('PPTP') + def set_vpnservice_status(self, context, vpnservice_id, status): + with context.session.begin(subtransactions=True): + try: +@@ -851,6 +854,7 @@ class VPNPluginRpcDbMixin(): + LOG.warn(_('vpnservice %s in db is already deleted'), + vpnservice_db['id']) + ++ @eayun_notify('PPTP_ports') + def update_pptp_status_by_agent( + self, context, host, + pptp_processes_status, credentials, updated_ports, +diff --git a/neutron/notifiers/eayun.py b/neutron/notifiers/eayun.py +new file mode 100644 +index 000000000..5c09c4774 +--- /dev/null ++++ b/neutron/notifiers/eayun.py +@@ -0,0 +1,93 @@ ++from neutron.common import rpc as n_rpc ++from neutron.common import constants as n_constants ++from neutron.plugins.common import constants ++ ++ ++class Notifier(object): ++ ++ def __init__(self): ++ self._notifier = n_rpc.get_notifier('eayun') ++ ++ def status_changed(self, context, resource, resource_id, status): ++ self._notifier.info( ++ context, resource + '.status.changed', ++ {resource: {'id': resource_id, 'status': status}}) ++ ++ ++_notifier = Notifier() ++ ++ ++def eayun_notify(service, obj_model=None): ++ def handle_func(func): ++ def handle_firewall( ++ fw_rpc_callback, context, firewall_id, status, **kwargs ++ ): ++ ret = func( ++ fw_rpc_callback, context, firewall_id, status, **kwargs) ++ _notifier.status_changed(context, 'firewall', firewall_id, status) ++ return ret ++ ++ def handle_ipsec_vpns( ++ vpn_plugin, context, service_status_info_list ++ ): ++ func(vpn_plugin, context, service_status_info_list) ++ for vpnservice in service_status_info_list: ++ _notifier.status_changed( ++ context, 'vpnservice', ++ vpnservice['id'], vpnservice['status']) ++ for conn_id, conn in vpnservice[ ++ 'ipsec_site_connections' ++ ].items(): ++ _notifier.status_changed( ++ context, 'ipsec_site_connection', ++ conn_id, conn['status']) ++ ++ def handle_loadbalancer( ++ lb_rpc_callback, context, obj_type, obj_id, status ++ ): ++ func(lb_rpc_callback, context, obj_type, obj_id, status) ++ if obj_type != 'member': ++ _notifier.status_changed(context, obj_type, obj_id, status) ++ ++ def handle_lb_member( ++ lb_plugin, context, model, obj_id, status, **kwargs ++ ): ++ func(lb_plugin, context, model, obj_id, status, **kwargs) ++ if issubclass(model, obj_model): ++ _notifier.status_changed(context, 'member', obj_id, status) ++ ++ def handle_pptp_vpn( ++ vpn_plugin, context, vpnservice_id, status ++ ): ++ func(vpn_plugin, context, vpnservice_id, status) ++ _notifier.status_changed( ++ context, 'vpnservice', vpnservice_id, status) ++ ++ def handle_pptp_ports( ++ vpn_plugin, context, host, pptp_processes_status, ++ credentials, updated_ports, provider ++ ): ++ func(vpn_plugin, context, host, pptp_processes_status, ++ credentials, updated_ports, provider) ++ for port_id, status in updated_ports.iteritems(): ++ port_status = n_constants.PORT_STATUS_DOWN ++ if status: ++ port_status = n_constants.PORT_STATUS_ACTIVE ++ _notifier.status_changed( ++ context, 'pptp_port', port_id, port_status) ++ ++ if service == constants.FIREWALL: ++ return handle_firewall ++ elif service == constants.VPN: ++ return handle_ipsec_vpns ++ elif service == constants.LOADBALANCER: ++ return handle_loadbalancer ++ elif service == 'LB_MEMBER': ++ return handle_lb_member ++ elif service == 'PPTP': ++ return handle_pptp_vpn ++ elif service == 'PPTP_ports': ++ return handle_pptp_ports ++ else: ++ raise NotImplementedError ++ return handle_func +diff --git a/neutron/services/firewall/fwaas_plugin.py b/neutron/services/firewall/fwaas_plugin.py +index 4d7aaf833..e99a46f20 100644 +--- a/neutron/services/firewall/fwaas_plugin.py ++++ b/neutron/services/firewall/fwaas_plugin.py +@@ -24,6 +24,7 @@ from neutron.db.firewall import firewall_db + from neutron.db.firewall import targetrouters_db + from neutron.extensions import firewall as fw_ext + from neutron.extensions.firewall_target_routers import FW_TARGET_ROUTERS ++from neutron.notifiers.eayun import eayun_notify + from neutron.openstack.common import log as logging + from neutron.plugins.common import constants as const + +@@ -38,6 +39,7 @@ class FirewallCallbacks(n_rpc.RpcCallback): + super(FirewallCallbacks, self).__init__() + self.plugin = plugin + ++ @eayun_notify(const.FIREWALL) + def set_firewall_status(self, context, firewall_id, status, **kwargs): + """Agent uses this to set a firewall's status.""" + LOG.debug(_("set_firewall_status() called")) +diff --git a/neutron/services/loadbalancer/drivers/common/agent_driver_base.py b/neutron/services/loadbalancer/drivers/common/agent_driver_base.py +index 5c24f30c3..dd1028eb9 100644 +--- a/neutron/services/loadbalancer/drivers/common/agent_driver_base.py ++++ b/neutron/services/loadbalancer/drivers/common/agent_driver_base.py +@@ -25,6 +25,7 @@ from neutron.db import agents_db + from neutron.db.loadbalancer import loadbalancer_db + from neutron.extensions import lbaas_agentscheduler + from neutron.extensions import portbindings ++from neutron.notifiers.eayun import eayun_notify + from neutron.openstack.common import importutils + from neutron.openstack.common import log as logging + from neutron.plugins.common import constants +@@ -158,6 +159,7 @@ class LoadBalancerCallbacks(n_rpc.RpcCallback): + if hm.status in constants.ACTIVE_PENDING_STATUSES: + hm.status = constants.ACTIVE + ++ @eayun_notify(constants.LOADBALANCER) + def update_status(self, context, obj_type, obj_id, status): + model_mapping = { + 'pool': loadbalancer_db.Pool, +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0110-FWaaS-apply-firewall-rules-to-router-ingress-traffic.patch b/packaging/openstack-neutron/0110-FWaaS-apply-firewall-rules-to-router-ingress-traffic.patch new file mode 100644 index 0000000..35a0619 --- /dev/null +++ b/packaging/openstack-neutron/0110-FWaaS-apply-firewall-rules-to-router-ingress-traffic.patch @@ -0,0 +1,51 @@ +From b4e776a67ec88c72b18d91ef6680aa9809751c5e Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Tue, 6 Jun 2017 11:33:04 +0800 +Subject: [PATCH 110/118] FWaaS: apply firewall rules to router ingress traffic + +Fixes: redmine #10238 + +Signed-off-by: Hunt Xu +--- + neutron/services/firewall/drivers/linux/iptables_fwaas.py | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +diff --git a/neutron/services/firewall/drivers/linux/iptables_fwaas.py b/neutron/services/firewall/drivers/linux/iptables_fwaas.py +index b7d3a67f1..18e5fec6f 100644 +--- a/neutron/services/firewall/drivers/linux/iptables_fwaas.py ++++ b/neutron/services/firewall/drivers/linux/iptables_fwaas.py +@@ -39,6 +39,7 @@ IP_VER_TAG = {IPV4: 'v4', + IPV6: 'v6'} + + INTERNAL_DEV_PREFIX = 'qr-' ++EXTERNAL_DEV_PREFIX = 'qg-' + SNAT_INT_DEV_PREFIX = 'sg-' + ROUTER_2_FIP_DEV_PREFIX = 'rfp-' + +@@ -265,6 +266,11 @@ class IptablesFwaasDriver(fwaas_base.FwaasDriverBase): + if_prefix, bname, chain_name)] + self._add_rules_to_chain(ipt_mgr, + ver, 'FORWARD', jump_rule) ++ if direction == INGRESS_DIRECTION: ++ jump_rule = ['-i %s+ -j %s-%s' % ( ++ EXTERNAL_DEV_PREFIX, bname, chain_name)] ++ self._add_rules_to_chain( ++ ipt_mgr, ver, 'INPUT', jump_rule) + + #jump to DROP_ALL policy + chain_name = iptables_manager.get_chain_name(FWAAS_DEFAULT_CHAIN) +@@ -278,6 +284,11 @@ class IptablesFwaasDriver(fwaas_base.FwaasDriverBase): + self._add_rules_to_chain(ipt_mgr, IPV4, 'FORWARD', jump_rule) + self._add_rules_to_chain(ipt_mgr, IPV6, 'FORWARD', jump_rule) + ++ jump_rule = [ ++ '-i %s+ -j %s-%s' % (EXTERNAL_DEV_PREFIX, bname, chain_name)] ++ self._add_rules_to_chain(ipt_mgr, IPV4, 'INPUT', jump_rule) ++ self._add_rules_to_chain(ipt_mgr, IPV6, 'INPUT', jump_rule) ++ + def _convert_fwaas_to_iptables_rule(self, rule): + action = rule.get('action') == 'allow' and 'ACCEPT' or 'DROP' + args = [self._protocol_arg(rule.get('protocol')), +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0111-FWaaS-support-some-more-protocols-in-FW-rules.patch b/packaging/openstack-neutron/0111-FWaaS-support-some-more-protocols-in-FW-rules.patch new file mode 100644 index 0000000..acd1930 --- /dev/null +++ b/packaging/openstack-neutron/0111-FWaaS-support-some-more-protocols-in-FW-rules.patch @@ -0,0 +1,65 @@ +From d26e41e4c070e0dd687bd960aaae893959b62c5b Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Tue, 6 Jun 2017 16:59:29 +0800 +Subject: [PATCH 111/118] FWaaS: support some more protocols in FW rules + +* GRE (47) +* ESP (50) +* AH (51) +* SCTP (132) + +Fixes: redmine #10240 + +Signed-off-by: Hunt Xu +--- + neutron/extensions/firewall.py | 4 +++- + neutron/plugins/common/constants.py | 4 ++++ + neutron/services/firewall/drivers/linux/iptables_fwaas.py | 2 +- + 3 files changed, 8 insertions(+), 2 deletions(-) + +diff --git a/neutron/extensions/firewall.py b/neutron/extensions/firewall.py +index eb038147e..02ac56078 100644 +--- a/neutron/extensions/firewall.py ++++ b/neutron/extensions/firewall.py +@@ -151,7 +151,9 @@ class FirewallRuleConflict(qexception.Conflict): + "another tenant %(tenant_id)s") + + +-fw_valid_protocol_values = [None, constants.TCP, constants.UDP, constants.ICMP] ++fw_valid_protocol_values = [None, constants.TCP, constants.UDP, constants.ICMP ++ constants.SCTP, constants.GRE, ++ constants.ESP, constants.AH] + fw_valid_action_values = [constants.FWAAS_ALLOW, constants.FWAAS_DENY] + + +diff --git a/neutron/plugins/common/constants.py b/neutron/plugins/common/constants.py +index 5e435ace7..110addb4d 100644 +--- a/neutron/plugins/common/constants.py ++++ b/neutron/plugins/common/constants.py +@@ -72,6 +72,10 @@ FWAAS_DENY = "deny" + TCP = "tcp" + UDP = "udp" + ICMP = "icmp" ++SCTP = "sctp" ++GRE = "gre" ++ESP = "esp" ++AH = "ah" + + # Network Type constants + TYPE_FLAT = 'flat' +diff --git a/neutron/services/firewall/drivers/linux/iptables_fwaas.py b/neutron/services/firewall/drivers/linux/iptables_fwaas.py +index b7d3a67f1..bae0c7ef6 100644 +--- a/neutron/services/firewall/drivers/linux/iptables_fwaas.py ++++ b/neutron/services/firewall/drivers/linux/iptables_fwaas.py +@@ -311,7 +311,7 @@ class IptablesFwaasDriver(fwaas_base.FwaasDriverBase): + return '' + + def _port_arg(self, direction, protocol, port): +- if not (protocol in ['udp', 'tcp'] and port): ++ if not (protocol in ['udp', 'tcp', 'sctp'] and port): + return '' + return '--%s %s' % (direction, port) + +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0112-Fix-firewall-port-range-compare-error.patch b/packaging/openstack-neutron/0112-Fix-firewall-port-range-compare-error.patch new file mode 100644 index 0000000..95e85e9 --- /dev/null +++ b/packaging/openstack-neutron/0112-Fix-firewall-port-range-compare-error.patch @@ -0,0 +1,30 @@ +From f64cbcf2ae1e8ad96f99203945b87c8505ab489e Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Wed, 7 Jun 2017 16:33:25 +0800 +Subject: [PATCH 112/118] Fix firewall port range compare error + +Fixes: redmine #10246 + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + neutron/extensions/firewall.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/neutron/extensions/firewall.py b/neutron/extensions/firewall.py +index 02ac56078..e6e13db42 100644 +--- a/neutron/extensions/firewall.py ++++ b/neutron/extensions/firewall.py +@@ -206,7 +206,8 @@ def _validate_port_range(data, key_specs=None): + msg = _("Invalid port '%s'") % p + LOG.debug(msg) + return msg +- if len(ports) > 2 or ports[0] > ports[-1]: ++ ++ if len(ports) > 2 or int(ports[0]) > int(ports[-1]): + msg = _("Invalid port range '%s'") % ports + return msg + +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0113-metering-properly-do-data-filtering-within-some-APIs.patch b/packaging/openstack-neutron/0113-metering-properly-do-data-filtering-within-some-APIs.patch new file mode 100644 index 0000000..1dcd1cb --- /dev/null +++ b/packaging/openstack-neutron/0113-metering-properly-do-data-filtering-within-some-APIs.patch @@ -0,0 +1,74 @@ +From d22988095ec094f9981d05bb5801874e9a5c7709 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Wed, 14 Jun 2017 12:19:09 +0800 +Subject: [PATCH 113/118] metering: properly do data filtering within some APIs + +This prevents getting and sending unrelated data to metering agents. + +Fixes: redmine #10261 + +Signed-off-by: Hunt Xu +--- + neutron/db/metering/metering_db.py | 1 + + neutron/services/metering/metering_plugin.py | 11 +++++------ + 2 files changed, 6 insertions(+), 6 deletions(-) + +diff --git a/neutron/db/metering/metering_db.py b/neutron/db/metering/metering_db.py +index adef1af6b..94d5d3ddc 100644 +--- a/neutron/db/metering/metering_db.py ++++ b/neutron/db/metering/metering_db.py +@@ -188,6 +188,7 @@ class MeteringDbMixin(metering.MeteringPluginBase, + raise metering.MeteringLabelRuleNotFound(rule_id=rule_id) + + context.session.delete(rule) ++ return rule + + def _get_metering_rules_dict(self, metering_label): + rules = [] +diff --git a/neutron/services/metering/metering_plugin.py b/neutron/services/metering/metering_plugin.py +index 6abb29708..cd4c564db 100644 +--- a/neutron/services/metering/metering_plugin.py ++++ b/neutron/services/metering/metering_plugin.py +@@ -42,7 +42,7 @@ class MeteringPlugin(metering_db.MeteringDbMixin, + context, metering_label) + + data = metering_db.MeteringDbMixin.get_sync_data_metering( +- self, context) ++ self, context, label['id']) + self.meter_rpc.add_metering_label(context, data) + + return label +@@ -62,7 +62,7 @@ class MeteringPlugin(metering_db.MeteringDbMixin, + context, metering_label_rule) + + data = metering_db.MeteringDbMixin.get_sync_data_metering( +- self, context) ++ self, context, rule['metering_label_id']) + self.meter_rpc.update_metering_label_rules(context, data) + + return rule +@@ -71,18 +71,17 @@ class MeteringPlugin(metering_db.MeteringDbMixin, + rule = super(MeteringPlugin, self).delete_metering_label_rule( + context, rule_id) + ++ label_id = rule['metering_label_id'] + data = metering_db.MeteringDbMixin.get_sync_data_metering( +- self, context) ++ self, context, label_id) + self.meter_rpc.update_metering_label_rules(context, data) + +- return rule +- + def create_es_metering_label(self, context, es_metering_label): + label = super(MeteringPlugin, self).create_es_metering_label( + context, es_metering_label) + + data = es_metering_db.EsMeteringDbMixin.get_sync_data_metering( +- self, context) ++ self, context, label_id=label['id']) + self.meter_rpc.add_es_metering_label(context, data) + + return label +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0114-Loadbalancer-enable-a-created-free-port-to-be-used-b.patch b/packaging/openstack-neutron/0114-Loadbalancer-enable-a-created-free-port-to-be-used-b.patch new file mode 100644 index 0000000..bc43252 --- /dev/null +++ b/packaging/openstack-neutron/0114-Loadbalancer-enable-a-created-free-port-to-be-used-b.patch @@ -0,0 +1,125 @@ +From 6182b24aef2a650a96e16b9fe35fb4fec0714384 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Fri, 16 Jun 2017 18:25:32 +0800 +Subject: [PATCH 114/118] Loadbalancer: enable a created free port to be used + by new VIP + +With this commit, when creating a new loadbalancer VIP, an exsiting free +Neutron port can be used as the VIP port. +(Port is specified using subnet_id and ip_address.) + +Fixes: redmine #10286 + +Signed-off-by: Hunt Xu +--- + neutron/db/loadbalancer/loadbalancer_db.py | 65 ++++++++++++++++-------------- + neutron/extensions/loadbalancer.py | 4 ++ + 2 files changed, 39 insertions(+), 30 deletions(-) + +diff --git a/neutron/db/loadbalancer/loadbalancer_db.py b/neutron/db/loadbalancer/loadbalancer_db.py +index 3decaef83..d21ae2c6d 100644 +--- a/neutron/db/loadbalancer/loadbalancer_db.py ++++ b/neutron/db/loadbalancer/loadbalancer_db.py +@@ -423,49 +423,54 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + sess_qry = context.session.query(SessionPersistence) + sess_qry.filter_by(vip_id=vip_id).delete() + +- def _vip_port_has_exist(self, context, vip_db, ip_addr): +- port_filter = {'fixed_ips': {'ip_address': [ip_addr]}} ++ def _get_vip_port(self, context, vip_db, fixed_ip): ++ port_filter = {'fixed_ips': {'ip_address': [fixed_ip['ip_address']], ++ 'subnet_id': [fixed_ip['subnet_id']]}} + + ports = self._core_plugin.get_ports(context, filters=port_filter) + if ports: +- # verify port id has exist in VIP +- vips = self.get_vips(context, +- filters={'port_id': [ports[0]['id']]}) +- if vips: +- # verify vip listen on different L4 port +- for vip in vips: +- if vip_db.protocol_port == vip['protocol_port']: +- raise loadbalancer.ProtocolPortInUse( +- proto_port=vip['protocol_port'], vip=vip['id']) +- return ports[0] ++ port = ports[0] ++ port_id = port['id'] ++ device_owner = port['device_owner'] ++ ++ # port is free or port is owned by neutron:LOADBALANCER ++ valid_device_owners = ['', 'neutron:' + constants.LOADBALANCER] ++ if device_owner not in valid_device_owners: ++ raise loadbalancer.PortNotOwnedByLB(port_id=port_id) ++ ++ # verify vip listen on different L4 port ++ vips = self.get_vips(context, filters={'port_id': [port_id]}) ++ for vip in vips: ++ if vip_db.protocol_port == vip['protocol_port']: ++ raise loadbalancer.ProtocolPortInUse( ++ proto_port=vip['protocol_port'], vip=vip['id']) ++ return port + return None + + def _create_port_for_vip(self, context, vip_db, subnet_id, ip_address): + # resolve subnet and create port + subnet = self._core_plugin.get_subnet(context, subnet_id) + fixed_ip = {'subnet_id': subnet['id']} +- need_create_port = True ++ port = None + + if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED: + fixed_ip['ip_address'] = ip_address +- # check if vip port has exist +- port = self._vip_port_has_exist(context, vip_db, ip_address) +- if port: +- need_create_port = False +- +- port_data = { +- 'tenant_id': vip_db.tenant_id, +- 'name': 'vip-' + vip_db.id, +- 'network_id': subnet['network_id'], +- 'mac_address': attributes.ATTR_NOT_SPECIFIED, +- 'admin_state_up': False, +- 'device_id': '', +- 'device_owner': '', +- 'fixed_ips': [fixed_ip] +- } +- +- if need_create_port: ++ # Use an existing port if no confliction ++ port = self._get_vip_port(context, vip_db, fixed_ip) ++ ++ if not port: ++ port_data = { ++ 'tenant_id': vip_db.tenant_id, ++ 'name': 'vip-' + vip_db.id, ++ 'network_id': subnet['network_id'], ++ 'mac_address': attributes.ATTR_NOT_SPECIFIED, ++ 'admin_state_up': False, ++ 'device_id': '', ++ 'device_owner': '', ++ 'fixed_ips': [fixed_ip] ++ } + port = self._core_plugin.create_port(context, {'port': port_data}) ++ + vip_db.port_id = port['id'] + # explicitly sync session with db + context.session.flush() +diff --git a/neutron/extensions/loadbalancer.py b/neutron/extensions/loadbalancer.py +index 5f3589681..cc170d266 100644 +--- a/neutron/extensions/loadbalancer.py ++++ b/neutron/extensions/loadbalancer.py +@@ -113,6 +113,10 @@ class ProtocolPortInUse(qexception.BadRequest): + message = _("VIP %(vip)s has bound to the protocol port %(proto_port)s") + + ++class PortNotOwnedByLB(qexception.BadRequest): ++ message = _("Port %(port_id)s not owned by LOADBALANCER.") ++ ++ + class PoolNotBoundToAgent(qexception.BadRequest): + message = _("Pool %(pool)s has not bound to agent %(agent)s") + +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0115-Fix-exception-message-format-error.patch b/packaging/openstack-neutron/0115-Fix-exception-message-format-error.patch new file mode 100644 index 0000000..845dd59 --- /dev/null +++ b/packaging/openstack-neutron/0115-Fix-exception-message-format-error.patch @@ -0,0 +1,51 @@ +From ac5cd299fe4c8552ff9f92d669227813a0b2bcde Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Mon, 26 Jun 2017 16:12:01 +0800 +Subject: [PATCH 115/118] Fix exception message format error + +Fixes: redmine #10312 +Signed-off-by: Hunt Xu +--- + neutron/extensions/loadbalancer_l7.py | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/neutron/extensions/loadbalancer_l7.py b/neutron/extensions/loadbalancer_l7.py +index 7f92c9d0b..92f5a77fa 100644 +--- a/neutron/extensions/loadbalancer_l7.py ++++ b/neutron/extensions/loadbalancer_l7.py +@@ -39,7 +39,7 @@ class L7policyInUse(qexception.BadRequest): + + class L7policyActionKeyValueNotSupport(qexception.BadRequest): + message = _("L7policy action %(l7policy_action)s with key %(l7policy_key)s" +- "and value %(l7policy_value)s does not support") ++ " and value %(l7policy_value)s does not support") + + + class L7ruleNotFound(qexception.NotFound): +@@ -51,7 +51,7 @@ class L7ruleInUse(qexception.NotFound): + + + class L7ruleTypeKeyValueNotSupport(qexception.BadRequest): +- message = _("L7rule type %(l7rule_type)s with key %(l7rule_key)s" ++ message = _("L7rule type %(l7rule_type)s with key %(l7rule_key)s " + "and value %(l7rule_value)s dose not support") + + +@@ -61,12 +61,12 @@ class L7ruleCompareTypeValueNotSupport(qexception.BadRequest): + + + class L7policyRuleAssociationExists(qexception.BadRequest): +- message = _("L7policy %(policy_id)s is already associated" ++ message = _("L7policy %(policy_id)s is already associated " + "with L7rule %(rule_id)s") + + + class L7policyRuleAssociationNotFound(qexception.NotFound): +- message = _("L7policy %(policy_id)s is not associated" ++ message = _("L7policy %(policy_id)s is not associated " + "with L7rule %(rule_id)s") + + +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0116-Add-monitor-address-and-port-for-lbaas-member.patch b/packaging/openstack-neutron/0116-Add-monitor-address-and-port-for-lbaas-member.patch new file mode 100644 index 0000000..e8082ce --- /dev/null +++ b/packaging/openstack-neutron/0116-Add-monitor-address-and-port-for-lbaas-member.patch @@ -0,0 +1,152 @@ +From 35f487ba73d7bcb98e6d975ccbaae41537a43d54 Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Thu, 4 May 2017 14:19:01 +0800 +Subject: [PATCH 116/118] Add monitor address and port for lbaas member + +This allow lbaas member health check different +IP and Port tuple + +Fixes: redmine #9977 + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + neutron/db/loadbalancer/loadbalancer_db.py | 6 +++ + ...tor_address_and_port_column_for_lbaas_member.py | 45 ++++++++++++++++++++++ + .../db/migration/alembic_migrations/versions/HEAD | 2 +- + neutron/extensions/loadbalancer.py | 11 +++++- + .../services/loadbalancer/drivers/haproxy/cfg.py | 6 +++ + 5 files changed, 68 insertions(+), 2 deletions(-) + create mode 100644 neutron/db/migration/alembic_migrations/versions/0ffcc7f9a449_add_monitor_address_and_port_column_for_lbaas_member.py + +diff --git a/neutron/db/loadbalancer/loadbalancer_db.py b/neutron/db/loadbalancer/loadbalancer_db.py +index 061735b24..a96187b39 100644 +--- a/neutron/db/loadbalancer/loadbalancer_db.py ++++ b/neutron/db/loadbalancer/loadbalancer_db.py +@@ -108,6 +108,8 @@ class Member(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, + weight = sa.Column(sa.Integer, nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + priority = sa.Column(sa.Integer, nullable=False, default=256) ++ monitor_address = sa.Column(sa.String(64), nullable=True) ++ monitor_port = sa.Column(sa.Integer, nullable=True) + + + class Pool(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, +@@ -778,6 +780,8 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + 'weight': member['weight'], + 'admin_state_up': member['admin_state_up'], + 'priority': member['priority'], ++ 'monitor_address': member['monitor_address'], ++ 'monitor_port': member['monitor_port'], + 'status': member['status'], + 'status_description': member['status_description']} + +@@ -797,6 +801,8 @@ class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + address=v['address'], + protocol_port=v['protocol_port'], + weight=v['weight'], ++ monitor_address=v['monitor_address'], ++ monitor_port=v['monitor_port'], + admin_state_up=v['admin_state_up'], + status=constants.PENDING_CREATE) + if attributes.is_attr_set(v['priority']): +diff --git a/neutron/db/migration/alembic_migrations/versions/0ffcc7f9a449_add_monitor_address_and_port_column_for_lbaas_member.py b/neutron/db/migration/alembic_migrations/versions/0ffcc7f9a449_add_monitor_address_and_port_column_for_lbaas_member.py +new file mode 100644 +index 000000000..bb7197c57 +--- /dev/null ++++ b/neutron/db/migration/alembic_migrations/versions/0ffcc7f9a449_add_monitor_address_and_port_column_for_lbaas_member.py +@@ -0,0 +1,45 @@ ++# Copyright 2017 OpenStack Foundation ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++ ++"""add priority column for lbaas member table ++ ++Revision ID: 0ffcc7f9a449 ++Revises: 7dc5a7c3d759 ++Create Date: 2017-05-11 23:57:10.409817 ++ ++""" ++ ++# revision identifiers, used by Alembic. ++revision = '0ffcc7f9a449' ++down_revision = '7dc5a7c3d759' ++ ++from alembic import op ++import sqlalchemy as sa ++ ++ ++def upgrade(): ++ op.add_column( ++ 'members', ++ sa.Column('monitor_address', sa.String(64), nullable=True) ++ ) ++ op.add_column( ++ 'members', ++ sa.Column('monitor_port', sa.Integer, nullable=True) ++ ) ++ ++ ++def downgrade(): ++ op.drop_column('members', 'monitor_address') ++ op.drop_column('members', 'monitor_port') +diff --git a/neutron/db/migration/alembic_migrations/versions/HEAD b/neutron/db/migration/alembic_migrations/versions/HEAD +index 81eed9065..a2117f3c1 100644 +--- a/neutron/db/migration/alembic_migrations/versions/HEAD ++++ b/neutron/db/migration/alembic_migrations/versions/HEAD +@@ -1 +1 @@ +-7dc5a7c3d759 ++0ffcc7f9a449 +diff --git a/neutron/extensions/loadbalancer.py b/neutron/extensions/loadbalancer.py +index 3c6ecb8ff..7db9841c6 100644 +--- a/neutron/extensions/loadbalancer.py ++++ b/neutron/extensions/loadbalancer.py +@@ -260,7 +260,16 @@ RESOURCE_ATTRIBUTE_MAP = { + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'status_description': {'allow_post': False, 'allow_put': False, +- 'is_visible': True} ++ 'is_visible': True}, ++ 'monitor_address': {'allow_post': True, 'allow_put': True, ++ 'validate': {'type:ip_address_or_none': None}, ++ 'default': None, ++ 'is_visible': True}, ++ 'monitor_port': {'allow_post': True, 'allow_put': True, ++ 'validate': {'type:range_or_none': [1, 65535]}, ++ 'default': None, ++ 'convert_to': attr.convert_to_int_if_not_none, ++ 'is_visible': True} + }, + 'health_monitors': { + 'id': {'allow_post': False, 'allow_put': False, +diff --git a/neutron/services/loadbalancer/drivers/haproxy/cfg.py b/neutron/services/loadbalancer/drivers/haproxy/cfg.py +index ca9899ce7..cecac69b8 100644 +--- a/neutron/services/loadbalancer/drivers/haproxy/cfg.py ++++ b/neutron/services/loadbalancer/drivers/haproxy/cfg.py +@@ -285,6 +285,12 @@ def _build_backend(config): + if need_server_id: + server += ' id %d' % _get_acl_member_id(member['id']) + ++ # add health check address and port opt ++ if member['monitor_address'] is not None: ++ server += ' addr %s' % member['monitor_address'] ++ if member['monitor_port'] is not None: ++ server += ' port %s' % member['monitor_port'] ++ + if _has_http_cookie_persistence(config): + server += ' cookie %d' % config['members'].index(member) + member_opts.append(server) +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0117-Fix-exception-error-when-l7rule-delete.patch b/packaging/openstack-neutron/0117-Fix-exception-error-when-l7rule-delete.patch new file mode 100644 index 0000000..db61682 --- /dev/null +++ b/packaging/openstack-neutron/0117-Fix-exception-error-when-l7rule-delete.patch @@ -0,0 +1,29 @@ +From c9dc08807c7dd61fb9e6e3a143648307ab9cc851 Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Thu, 29 Jun 2017 13:01:28 +0800 +Subject: [PATCH 117/118] Fix exception error when l7rule delete + +Fixes: redmine #10380 + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + neutron/extensions/loadbalancer_l7.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/neutron/extensions/loadbalancer_l7.py b/neutron/extensions/loadbalancer_l7.py +index 7f92c9d0b..d0a00fb67 100644 +--- a/neutron/extensions/loadbalancer_l7.py ++++ b/neutron/extensions/loadbalancer_l7.py +@@ -46,7 +46,7 @@ class L7ruleNotFound(qexception.NotFound): + message = _("L7rule %(l7rule_id)s could not be found") + + +-class L7ruleInUse(qexception.NotFound): ++class L7ruleInUse(qexception.InUse): + message = _("L7rule %(l7rule_id)s still in use") + + +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0118-Port-don-t-check-max-fixed_ips-quota-for-dhcp-agent-.patch b/packaging/openstack-neutron/0118-Port-don-t-check-max-fixed_ips-quota-for-dhcp-agent-.patch new file mode 100644 index 0000000..22040c9 --- /dev/null +++ b/packaging/openstack-neutron/0118-Port-don-t-check-max-fixed_ips-quota-for-dhcp-agent-.patch @@ -0,0 +1,222 @@ +From 9c722b0cb6dc7e1a32729c55585ba388fec370d8 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Tue, 4 Jul 2017 14:03:37 +0800 +Subject: [PATCH 118/118] Port: don't check max fixed_ips quota for dhcp-agent + callbacks + +DHCP ports may have more fixed_ips than the normal quota value set by +max_fixed_ips_per_port. This commit skips the check when port is created +or updated by the dhcp-agent. + +Fixes: redmine #10437 + +Signed-off-by: Hunt Xu +--- + neutron/api/rpc/handlers/dhcp_rpc.py | 12 ++++++++---- + neutron/db/db_base_plugin_v2.py | 38 ++++++++++++++++++++++++------------ + neutron/plugins/ml2/plugin.py | 12 +++++++----- + 3 files changed, 40 insertions(+), 22 deletions(-) + +diff --git a/neutron/api/rpc/handlers/dhcp_rpc.py b/neutron/api/rpc/handlers/dhcp_rpc.py +index 56016be70..045233844 100644 +--- a/neutron/api/rpc/handlers/dhcp_rpc.py ++++ b/neutron/api/rpc/handlers/dhcp_rpc.py +@@ -58,9 +58,11 @@ class DhcpRpcCallback(n_rpc.RpcCallback): + """Perform port operations taking care of concurrency issues.""" + try: + if action == 'create_port': +- return plugin.create_port(context, port) ++ return plugin.create_port(context, port, ++ check_fixed_ips_amount=False) + elif action == 'update_port': +- return plugin.update_port(context, port['id'], port['port']) ++ return plugin.update_port(context, port['id'], port['port'], ++ check_fixed_ips_amount=False) + else: + msg = _('Unrecognized action') + raise n_exc.Invalid(message=msg) +@@ -172,7 +174,8 @@ class DhcpRpcCallback(n_rpc.RpcCallback): + [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + + retval = plugin.update_port(context, port['id'], +- dict(port=port)) ++ dict(port=port), ++ check_fixed_ips_amount=False) + + except n_exc.NotFound as e: + LOG.warning(e) +@@ -247,7 +250,8 @@ class DhcpRpcCallback(n_rpc.RpcCallback): + if fixed_ips[i]['subnet_id'] == subnet_id: + del fixed_ips[i] + break +- plugin.update_port(context, port['id'], dict(port=port)) ++ plugin.update_port(context, port['id'], dict(port=port), ++ check_fixed_ips_amount=False) + + def update_lease_expiration(self, context, **kwargs): + """Release the fixed_ip associated the subnet on a port.""" +diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py +index 9582efed3..f213438cc 100644 +--- a/neutron/db/db_base_plugin_v2.py ++++ b/neutron/db/db_base_plugin_v2.py +@@ -394,7 +394,8 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + return True + return False + +- def _test_fixed_ips_for_port(self, context, network_id, fixed_ips): ++ def _test_fixed_ips_for_port(self, context, network_id, fixed_ips, ++ check_fixed_ips_amount=True): + """Test fixed IPs for port. + + Check that configured subnets are valid prior to allocating any +@@ -454,7 +455,10 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + 'ip_address': fixed['ip_address']}) + else: + fixed_ip_set.append({'subnet_id': subnet_id}) +- if len(fixed_ip_set) > cfg.CONF.max_fixed_ips_per_port: ++ if ( ++ check_fixed_ips_amount and ++ len(fixed_ip_set) > cfg.CONF.max_fixed_ips_per_port ++ ): + msg = _('Exceeded maximim amount of fixed ips per port') + raise n_exc.InvalidInput(error_message=msg) + return fixed_ip_set +@@ -480,14 +484,17 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + return ips + + def _update_ips_for_port(self, context, network_id, port_id, original_ips, +- new_ips): ++ new_ips, check_fixed_ips_amount=True): + """Add or remove IPs from the port.""" + ips = [] + # These ips are still on the port and haven't been removed + prev_ips = [] + + # the new_ips contain all of the fixed_ips that are to be updated +- if len(new_ips) > cfg.CONF.max_fixed_ips_per_port: ++ if ( ++ check_fixed_ips_amount and ++ len(new_ips) > cfg.CONF.max_fixed_ips_per_port ++ ): + msg = _('Exceeded maximim amount of fixed ips per port') + raise n_exc.InvalidInput(error_message=msg) + +@@ -501,7 +508,9 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + prev_ips.append(original_ip) + + # Check if the IP's to add are OK +- to_add = self._test_fixed_ips_for_port(context, network_id, new_ips) ++ to_add = self._test_fixed_ips_for_port( ++ context, network_id, new_ips, ++ check_fixed_ips_amount=check_fixed_ips_amount) + for ip in original_ips: + LOG.debug(_("Port update. Hold %s"), ip) + NeutronDbPluginV2._delete_ip_allocation(context, +@@ -514,7 +523,8 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + ips = self._allocate_fixed_ips(context, to_add) + return ips, prev_ips + +- def _allocate_ips_for_port(self, context, port): ++ def _allocate_ips_for_port(self, context, port, ++ check_fixed_ips_amount=True): + """Allocate IP addresses for the port. + + If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP +@@ -526,9 +536,9 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + + fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED + if fixed_configured: +- configured_ips = self._test_fixed_ips_for_port(context, +- p["network_id"], +- p['fixed_ips']) ++ configured_ips = self._test_fixed_ips_for_port( ++ context, p["network_id"], p['fixed_ips'], ++ check_fixed_ips_amount=check_fixed_ips_amount) + ips = self._allocate_fixed_ips(context, configured_ips) + else: + filter = {'network_id': [p['network_id']]} +@@ -1290,7 +1300,7 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + def create_port_bulk(self, context, ports): + return self._create_bulk('port', context, ports) + +- def create_port(self, context, port): ++ def create_port(self, context, port, check_fixed_ips_amount=True): + p = port['port'] + port_id = p.get('id') or uuidutils.generate_uuid() + network_id = p['network_id'] +@@ -1338,7 +1348,8 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + context.session.add(db_port) + + # Update the IP's for the port +- ips = self._allocate_ips_for_port(context, port) ++ ips = self._allocate_ips_for_port( ++ context, port, check_fixed_ips_amount=check_fixed_ips_amount) + if ips: + for ip in ips: + ip_address = ip['ip_address'] +@@ -1348,7 +1359,7 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + + return self._make_port_dict(db_port, process_extensions=False) + +- def update_port(self, context, id, port): ++ def update_port(self, context, id, port, check_fixed_ips_amount=True): + p = port['port'] + + changed_ips = False +@@ -1378,7 +1389,8 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + original = self._make_port_dict(port, process_extensions=False) + added_ips, prev_ips = self._update_ips_for_port( + context, port["network_id"], id, original["fixed_ips"], +- p['fixed_ips']) ++ p['fixed_ips'], ++ check_fixed_ips_amount=check_fixed_ips_amount) + + # Update ips if necessary + for ip in added_ips: +diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py +index d1b411cde..9d37c871e 100644 +--- a/neutron/plugins/ml2/plugin.py ++++ b/neutron/plugins/ml2/plugin.py +@@ -778,7 +778,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, + # the fact that an error occurred. + LOG.error(_("mechanism_manager.delete_subnet_postcommit failed")) + +- def create_port(self, context, port): ++ def create_port(self, context, port, check_fixed_ips_amount=True): + attrs = port['port'] + attrs['status'] = const.PORT_STATUS_DOWN + +@@ -787,7 +787,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) +- result = super(Ml2Plugin, self).create_port(context, port) ++ result = super(Ml2Plugin, self).create_port( ++ context, port, check_fixed_ips_amount=check_fixed_ips_amount) + self.extension_manager.process_create_port(session, attrs, result) + self._process_port_create_security_group(context, result, sgids) + network = self.get_network(context, result['network_id']) +@@ -829,7 +830,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, + self.delete_port(context, result['id']) + return bound_context._port + +- def update_port(self, context, id, port): ++ def update_port(self, context, id, port, check_fixed_ips_amount=True): + attrs = port['port'] + need_port_update_notify = False + +@@ -845,8 +846,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, + if not port_db: + raise exc.PortNotFound(port_id=id) + original_port = self._make_port_dict(port_db) +- updated_port = super(Ml2Plugin, self).update_port(context, id, +- port) ++ updated_port = super(Ml2Plugin, self).update_port( ++ context, id, port, ++ check_fixed_ips_amount=check_fixed_ips_amount) + self.extension_manager.process_update_port(session, attrs, + original_port) + if addr_pair.ADDRESS_PAIRS in port['port']: +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0119-EW-DVR-fix-issues-related-to-hosted-ports.patch b/packaging/openstack-neutron/0119-EW-DVR-fix-issues-related-to-hosted-ports.patch new file mode 100644 index 0000000..dc7439c --- /dev/null +++ b/packaging/openstack-neutron/0119-EW-DVR-fix-issues-related-to-hosted-ports.patch @@ -0,0 +1,52 @@ +From a35c490e92ae06c43aa7255de17005833db122bf Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Thu, 13 Jul 2017 13:50:53 +0800 +Subject: [PATCH] EW DVR: fix issues related to hosted ports + +Hosted ports may not be ready when configuring rules. With this commit a +chance is given for the rules of hosted ports to be re-configured during +the next sync upon such case. + +This commit also fix a minor issue when deleting OpenFlow rules of +removed hosted ports. + +Fixes: 56cf4bd93 ("openvswitch-agent: implement EW DVR using OpenFlow rules") +Fixes: redmine #10558 + +Signed-off-by: Hunt Xu +--- + neutron/plugins/openvswitch/agent/openflow_ew_dvr_agent.py | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/neutron/plugins/openvswitch/agent/openflow_ew_dvr_agent.py b/neutron/plugins/openvswitch/agent/openflow_ew_dvr_agent.py +index 1dfce3c15..87056a50a 100644 +--- a/neutron/plugins/openvswitch/agent/openflow_ew_dvr_agent.py ++++ b/neutron/plugins/openvswitch/agent/openflow_ew_dvr_agent.py +@@ -232,8 +232,14 @@ class OFEWDVRAgent(object): + dl_dst=gateway_mac, proto='ip', ip_dst=port['ip']) + + def _add_flows_to_hosted_port(self, port, gateway_mac): ++ if port['host'] != self.host: ++ LOG.debug("Port %s is not hosted by this agent.", port['name']) ++ return + port_ofno = self.int_br.get_port_ofport(port['name']) +- if port_ofno == INVALID_OFPORT or port['host'] != self.host: ++ if port_ofno == INVALID_OFPORT: ++ LOG.warning("Port %s is not ready.", port['name']) ++ # Setting port['host'] to None for next sync. ++ port['host'] = None + return + actions = "strip_vlan,mod_dl_src:%s,dec_ttl,output:%s" % ( + gateway_mac, port_ofno) +@@ -289,7 +295,7 @@ class OFEWDVRAgent(object): + + old_port = old_subnet['ports'].pop(port_id) + if self._port_moved_out_of_host(old_port, port): +- self._del_flows_to_hosted_port(port) ++ self._del_flows_to_hosted_port(old_port) + elif self._port_moved_into_host(old_port, port): + self._add_flows_to_hosted_port(port, gateway_mac) + +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0120-Fix-syntax-error.patch b/packaging/openstack-neutron/0120-Fix-syntax-error.patch new file mode 100644 index 0000000..b2b8074 --- /dev/null +++ b/packaging/openstack-neutron/0120-Fix-syntax-error.patch @@ -0,0 +1,29 @@ +From 65a4fd2eb8c60e7466c0a996629097a616abfa7c Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Wed, 19 Jul 2017 15:37:54 +0800 +Subject: [PATCH] Fix syntax error + +Fixes: d26e41e4c ("FWaaS: support some more protocols in FW rules") +Fixes: redmine #10240 + +Signed-off-by: Hunt Xu +--- + neutron/extensions/firewall.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/neutron/extensions/firewall.py b/neutron/extensions/firewall.py +index e6e13db42..fa4ed86fc 100644 +--- a/neutron/extensions/firewall.py ++++ b/neutron/extensions/firewall.py +@@ -151,7 +151,7 @@ class FirewallRuleConflict(qexception.Conflict): + "another tenant %(tenant_id)s") + + +-fw_valid_protocol_values = [None, constants.TCP, constants.UDP, constants.ICMP ++fw_valid_protocol_values = [None, constants.TCP, constants.UDP, constants.ICMP, + constants.SCTP, constants.GRE, + constants.ESP, constants.AH] + fw_valid_action_values = [constants.FWAAS_ALLOW, constants.FWAAS_DENY] +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0121-Switch-to-use-classmethod-in-eayun-notifier.patch b/packaging/openstack-neutron/0121-Switch-to-use-classmethod-in-eayun-notifier.patch new file mode 100644 index 0000000..8de9f6a --- /dev/null +++ b/packaging/openstack-neutron/0121-Switch-to-use-classmethod-in-eayun-notifier.patch @@ -0,0 +1,106 @@ +From f308d28b84523a963ecdef1b12e276afc69b6139 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Wed, 19 Jul 2017 17:06:53 +0800 +Subject: [PATCH] Switch to use classmethod in eayun notifier + +Or it will cause problems when running neutron-db-manage. + +Fixes: 057990390("Add status_changed notification for some components") +Fixes: #10220 + +Signed-off-by: Hunt Xu +--- + neutron/notifiers/eayun.py | 28 +++++++++++++--------------- + 1 file changed, 13 insertions(+), 15 deletions(-) + +diff --git a/neutron/notifiers/eayun.py b/neutron/notifiers/eayun.py +index 5c09c4774..4f9f0ecb3 100644 +--- a/neutron/notifiers/eayun.py ++++ b/neutron/notifiers/eayun.py +@@ -4,19 +4,17 @@ from neutron.plugins.common import constants + + + class Notifier(object): ++ __notifier = None + +- def __init__(self): +- self._notifier = n_rpc.get_notifier('eayun') +- +- def status_changed(self, context, resource, resource_id, status): +- self._notifier.info( ++ @classmethod ++ def status_changed(cls, context, resource, resource_id, status): ++ if not cls.__notifier: ++ Notifier.__notifier = n_rpc.get_notifier('eayun') ++ Notifier.__notifier.info( + context, resource + '.status.changed', + {resource: {'id': resource_id, 'status': status}}) + + +-_notifier = Notifier() +- +- + def eayun_notify(service, obj_model=None): + def handle_func(func): + def handle_firewall( +@@ -24,7 +22,7 @@ def eayun_notify(service, obj_model=None): + ): + ret = func( + fw_rpc_callback, context, firewall_id, status, **kwargs) +- _notifier.status_changed(context, 'firewall', firewall_id, status) ++ Notifier.status_changed(context, 'firewall', firewall_id, status) + return ret + + def handle_ipsec_vpns( +@@ -32,13 +30,13 @@ def eayun_notify(service, obj_model=None): + ): + func(vpn_plugin, context, service_status_info_list) + for vpnservice in service_status_info_list: +- _notifier.status_changed( ++ Notifier.status_changed( + context, 'vpnservice', + vpnservice['id'], vpnservice['status']) + for conn_id, conn in vpnservice[ + 'ipsec_site_connections' + ].items(): +- _notifier.status_changed( ++ Notifier.status_changed( + context, 'ipsec_site_connection', + conn_id, conn['status']) + +@@ -47,20 +45,20 @@ def eayun_notify(service, obj_model=None): + ): + func(lb_rpc_callback, context, obj_type, obj_id, status) + if obj_type != 'member': +- _notifier.status_changed(context, obj_type, obj_id, status) ++ Notifier.status_changed(context, obj_type, obj_id, status) + + def handle_lb_member( + lb_plugin, context, model, obj_id, status, **kwargs + ): + func(lb_plugin, context, model, obj_id, status, **kwargs) + if issubclass(model, obj_model): +- _notifier.status_changed(context, 'member', obj_id, status) ++ Notifier.status_changed(context, 'member', obj_id, status) + + def handle_pptp_vpn( + vpn_plugin, context, vpnservice_id, status + ): + func(vpn_plugin, context, vpnservice_id, status) +- _notifier.status_changed( ++ Notifier.status_changed( + context, 'vpnservice', vpnservice_id, status) + + def handle_pptp_ports( +@@ -73,7 +71,7 @@ def eayun_notify(service, obj_model=None): + port_status = n_constants.PORT_STATUS_DOWN + if status: + port_status = n_constants.PORT_STATUS_ACTIVE +- _notifier.status_changed( ++ Notifier.status_changed( + context, 'pptp_port', port_id, port_status) + + if service == constants.FIREWALL: +-- +2.13.3 + diff --git a/packaging/openstack-neutron/0122-Porting-neutron-lbaas-certificates-manager-to-neutro.patch b/packaging/openstack-neutron/0122-Porting-neutron-lbaas-certificates-manager-to-neutro.patch new file mode 100644 index 0000000..09029ad --- /dev/null +++ b/packaging/openstack-neutron/0122-Porting-neutron-lbaas-certificates-manager-to-neutro.patch @@ -0,0 +1,1136 @@ +From af7f1d1df2190351e188c3b248e6356e3fad46f6 Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Tue, 27 Jun 2017 15:47:42 +0800 +Subject: [PATCH] Porting neutron-lbaas certificates manager to neutron + +The cert_manager depend on barbicanclient packages. + +Fixes: redmine #10330 + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + etc/neutron.conf | 41 ++++ + neutron/certificates/__init__.py | 0 + neutron/certificates/cert_manager/__init__.py | 43 +++++ + .../cert_manager/barbican_auth/__init__.py | 0 + .../cert_manager/barbican_auth/barbican_acl.py | 47 +++++ + .../cert_manager/barbican_auth/common.py | 28 +++ + .../cert_manager/barbican_cert_manager.py | 211 +++++++++++++++++++++ + neutron/certificates/cert_manager/cert_manager.py | 100 ++++++++++ + .../cert_manager/local_cert_manager.py | 200 +++++++++++++++++++ + neutron/certificates/exceptions.py | 37 ++++ + neutron/certificates/keystone.py | 122 ++++++++++++ + neutron/certificates/tls_utils/__init__.py | 0 + neutron/certificates/tls_utils/cert_parser.py | 177 +++++++++++++++++ + setup.cfg | 5 + + 14 files changed, 1011 insertions(+) + create mode 100644 neutron/certificates/__init__.py + create mode 100644 neutron/certificates/cert_manager/__init__.py + create mode 100644 neutron/certificates/cert_manager/barbican_auth/__init__.py + create mode 100644 neutron/certificates/cert_manager/barbican_auth/barbican_acl.py + create mode 100644 neutron/certificates/cert_manager/barbican_auth/common.py + create mode 100644 neutron/certificates/cert_manager/barbican_cert_manager.py + create mode 100644 neutron/certificates/cert_manager/cert_manager.py + create mode 100644 neutron/certificates/cert_manager/local_cert_manager.py + create mode 100644 neutron/certificates/exceptions.py + create mode 100644 neutron/certificates/keystone.py + create mode 100644 neutron/certificates/tls_utils/__init__.py + create mode 100644 neutron/certificates/tls_utils/cert_parser.py + +diff --git a/etc/neutron.conf b/etc/neutron.conf +index 40b91079e..8d22738b5 100644 +--- a/etc/neutron.conf ++++ b/etc/neutron.conf +@@ -641,3 +641,44 @@ service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVP + #service_provider = LOADBALANCER:A10Networks:neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default + # Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend + # service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default ++ ++[certificates] ++# Certificate manager plugin, default to barbican ++# cert_manager_type = barbican ++ ++# Name of the barbican authentication method to use ++# barbican_auth = barbican_acl_auth ++ ++[service_auth] ++# Authentication endpoint ++# auth_url = ++ ++# The service auth username ++# admin_user = admin ++ ++# The service admin tenant name ++# admin_tenant_name = admin ++ ++# The service admin password ++# admin_password = passsword ++ ++# The admin user domain name ++# admin_user_domain = admin ++ ++# The admin project domain name ++# admin_project_domain = admin ++ ++# The deployment region ++# region = RegionOne ++ ++# The name of the service ++# service_name = lbaas ++ ++# The auth version used to authenticate ++# auth_version = 3 ++ ++# The endpoint_type to be used ++# endpoint_type = public ++ ++# Disable server certificate verification ++# insecure = False +diff --git a/neutron/certificates/__init__.py b/neutron/certificates/__init__.py +new file mode 100644 +index 000000000..e69de29bb +diff --git a/neutron/certificates/cert_manager/__init__.py b/neutron/certificates/cert_manager/__init__.py +new file mode 100644 +index 000000000..0a5ee039d +--- /dev/null ++++ b/neutron/certificates/cert_manager/__init__.py +@@ -0,0 +1,43 @@ ++# Copyright 2015 Rackspace US, Inc. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++from oslo.config import cfg ++from stevedore import driver ++ ++CONF = cfg.CONF ++ ++CERT_MANAGER_DEFAULT = 'barbican' ++ ++cert_manager_opts = [ ++ cfg.StrOpt('cert_manager_type', ++ default=CERT_MANAGER_DEFAULT, ++ help='Certificate Manager plugin. ' ++ 'Defaults to {0}.'.format(CERT_MANAGER_DEFAULT)), ++ cfg.StrOpt('barbican_auth', ++ default='barbican_acl_auth', ++ help='Name of the Barbican authentication method to use') ++] ++ ++CONF.register_opts(cert_manager_opts, group='certificates') ++ ++_CERT_MANAGER_PLUGIN = None ++ ++ ++def get_backend(): ++ global _CERT_MANAGER_PLUGIN ++ if not _CERT_MANAGER_PLUGIN: ++ _CERT_MANAGER_PLUGIN = driver.DriverManager( ++ "neutron.cert_manager.backend", ++ cfg.CONF.certificates.cert_manager_type).driver ++ return _CERT_MANAGER_PLUGIN +diff --git a/neutron/certificates/cert_manager/barbican_auth/__init__.py b/neutron/certificates/cert_manager/barbican_auth/__init__.py +new file mode 100644 +index 000000000..e69de29bb +diff --git a/neutron/certificates/cert_manager/barbican_auth/barbican_acl.py b/neutron/certificates/cert_manager/barbican_auth/barbican_acl.py +new file mode 100644 +index 000000000..51ca310e5 +--- /dev/null ++++ b/neutron/certificates/cert_manager/barbican_auth/barbican_acl.py +@@ -0,0 +1,47 @@ ++# Copyright (c) 2014-2016 Rackspace US, Inc ++# All Rights Reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++""" ++Barbican ACL auth class for Barbican certificate handling ++""" ++from barbicanclient import client as barbican_client ++from oslo.config import cfg ++from oslo.utils import excutils ++ ++from neutron.certificates.cert_manager.barbican_auth import common ++from neutron.certificates import keystone ++from neutron.openstack.common import log as logging ++ ++LOG = logging.getLogger(__name__) ++ ++CONF = cfg.CONF ++ ++ ++class BarbicanACLAuth(common.BarbicanAuth): ++ _barbican_client = None ++ ++ @classmethod ++ def get_barbican_client(cls, project_id=None): ++ if not cls._barbican_client: ++ try: ++ cls._barbican_client = barbican_client.Client( ++ session=keystone.get_session(), ++ region_name=CONF.service_auth.region, ++ interface=CONF.service_auth.endpoint_type ++ ) ++ except Exception: ++ with excutils.save_and_reraise_exception(): ++ LOG.exception("Error creating Barbican client") ++ return cls._barbican_client +diff --git a/neutron/certificates/cert_manager/barbican_auth/common.py b/neutron/certificates/cert_manager/barbican_auth/common.py +new file mode 100644 +index 000000000..45c687e35 +--- /dev/null ++++ b/neutron/certificates/cert_manager/barbican_auth/common.py +@@ -0,0 +1,28 @@ ++# Copyright 2014-2016 Rackspace US, Inc. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++import abc ++ ++import six ++ ++ ++@six.add_metaclass(abc.ABCMeta) ++class BarbicanAuth(object): ++ @abc.abstractmethod ++ def get_barbican_client(self, project_id): ++ """Creates a Barbican client object. ++ ++ :param project_id: Project ID that the request will be used for ++ :return: a Barbican Client object ++ :raises Exception: if the client cannot be created ++ """ +diff --git a/neutron/certificates/cert_manager/barbican_cert_manager.py b/neutron/certificates/cert_manager/barbican_cert_manager.py +new file mode 100644 +index 000000000..2dff3a78e +--- /dev/null ++++ b/neutron/certificates/cert_manager/barbican_cert_manager.py +@@ -0,0 +1,211 @@ ++# Copyright 2014, 2015 Rackspace US, Inc. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++from barbicanclient import client as barbican_client ++from oslo.config import cfg ++from oslo.utils import excutils ++from stevedore import driver as stevedore_driver ++ ++from neutron.certificates.cert_manager import cert_manager ++from neutron.openstack.common import log as logging ++ ++LOG = logging.getLogger(__name__) ++ ++CONF = cfg.CONF ++ ++ ++class Cert(cert_manager.Cert): ++ """Representation of a Cert based on the Barbican CertificateContainer.""" ++ ++ def __init__(self, cert_container): ++ if not isinstance(cert_container, ++ barbican_client.containers.CertificateContainer): ++ raise TypeError(_( ++ "Retrieved Barbican Container is not of the correct type " ++ "(certificate).")) ++ self._cert_container = cert_container ++ ++ # Container secrets are accessed upon query and can return as None, ++ # don't return the payload if the secret is not available. ++ ++ def get_certificate(self): ++ if self._cert_container.certificate: ++ return self._cert_container.certificate.payload ++ ++ def get_intermediates(self): ++ if self._cert_container.intermediates: ++ return self._cert_container.intermediates.payload ++ ++ def get_private_key(self): ++ if self._cert_container.private_key: ++ return self._cert_container.private_key.payload ++ ++ def get_private_key_passphrase(self): ++ if self._cert_container.private_key_passphrase: ++ return self._cert_container.private_key_passphrase.payload ++ ++ ++class CertManager(cert_manager.CertManager): ++ """Certificate Manager that wraps the Barbican client API.""" ++ ++ def __init__(self): ++ super(CertManager, self).__init__() ++ self.auth = stevedore_driver.DriverManager( ++ namespace='neutron.cert_manager.barbican_auth', ++ name=cfg.CONF.certificates.barbican_auth, ++ invoke_on_load=True, ++ ).driver ++ ++ def store_cert(self, project_id, certificate, private_key, ++ intermediates=None, private_key_passphrase=None, ++ expiration=None, name='LBaaS TLS Cert'): ++ """Stores a certificate in the certificate manager. ++ ++ :param certificate: PEM encoded TLS certificate ++ :param private_key: private key for the supplied certificate ++ :param intermediates: ordered and concatenated intermediate certs ++ :param private_key_passphrase: optional passphrase for the supplied key ++ :param expiration: the expiration time of the cert in ISO 8601 format ++ :param name: a friendly name for the cert ++ ++ :returns: the container_ref of the stored cert ++ :raises Exception: if certificate storage fails ++ """ ++ ++ connection = self.auth.get_barbican_client(project_id) ++ ++ LOG.info(( ++ "Storing certificate container '{0}' in Barbican." ++ ).format(name)) ++ ++ certificate_secret = None ++ private_key_secret = None ++ intermediates_secret = None ++ pkp_secret = None ++ ++ try: ++ certificate_secret = connection.secrets.create( ++ payload=certificate, ++ expiration=expiration, ++ name="Certificate" ++ ) ++ private_key_secret = connection.secrets.create( ++ payload=private_key, ++ expiration=expiration, ++ name="Private Key" ++ ) ++ certificate_container = connection.containers.create_certificate( ++ name=name, ++ certificate=certificate_secret, ++ private_key=private_key_secret ++ ) ++ if intermediates: ++ intermediates_secret = connection.secrets.create( ++ payload=intermediates, ++ expiration=expiration, ++ name="Intermediates" ++ ) ++ certificate_container.intermediates = intermediates_secret ++ if private_key_passphrase: ++ pkp_secret = connection.secrets.create( ++ payload=private_key_passphrase, ++ expiration=expiration, ++ name="Private Key Passphrase" ++ ) ++ certificate_container.private_key_passphrase = pkp_secret ++ ++ certificate_container.store() ++ return certificate_container.container_ref ++ # Barbican (because of Keystone-middleware) sometimes masks ++ # exceptions strangely -- this will catch anything that it raises and ++ # reraise the original exception, while also providing useful ++ # feedback in the logs for debugging ++ except Exception: ++ for secret in [certificate_secret, private_key_secret, ++ intermediates_secret, pkp_secret]: ++ if secret and secret.secret_ref: ++ old_ref = secret.secret_ref ++ try: ++ secret.delete() ++ LOG.info(( ++ "Deleted secret {0} ({1}) during rollback." ++ ).format(secret.name, old_ref)) ++ except Exception: ++ LOG.warning(( ++ "Failed to delete {0} ({1}) during rollback. This " ++ "is probably not a problem." ++ ).format(secret.name, old_ref)) ++ with excutils.save_and_reraise_exception(): ++ LOG.exception("Error storing certificate data") ++ ++ def get_cert(self, project_id, cert_ref, resource_ref, ++ check_only=False, service_name='lbaas'): ++ """Retrieves the specified cert and registers as a consumer. ++ ++ :param cert_ref: the UUID of the cert to retrieve ++ :param resource_ref: Full HATEOAS reference to the consuming resource ++ :param check_only: Read Certificate data without registering ++ :param service_name: Friendly name for the consuming service ++ ++ :returns: octavia.certificates.common.Cert representation of the ++ certificate data ++ :raises Exception: if certificate retrieval fails ++ """ ++ connection = self.auth.get_barbican_client(project_id) ++ ++ LOG.info(( ++ "Loading certificate container {0} from Barbican." ++ ).format(cert_ref)) ++ try: ++ if check_only: ++ cert_container = connection.containers.get( ++ container_ref=cert_ref ++ ) ++ else: ++ cert_container = connection.containers.register_consumer( ++ container_ref=cert_ref, ++ name=service_name, ++ url=resource_ref ++ ) ++ return Cert(cert_container) ++ except Exception: ++ with excutils.save_and_reraise_exception(): ++ LOG.exception("Error getting {0}".format(cert_ref)) ++ ++ def delete_cert(self, project_id, cert_ref, resource_ref, ++ service_name='lbaas'): ++ """Deregister as a consumer for the specified cert. ++ ++ :param cert_ref: the UUID of the cert to retrieve ++ :param service_name: Friendly name for the consuming service ++ :param lb_id: Loadbalancer id for building resource consumer URL ++ ++ :raises Exception: if deregistration fails ++ """ ++ connection = self.auth.get_barbican_client(project_id) ++ ++ LOG.info(( ++ "Deregistering as a consumer of {0} in Barbican." ++ ).format(cert_ref)) ++ try: ++ connection.containers.remove_consumer( ++ container_ref=cert_ref, ++ name=service_name, ++ url=resource_ref ++ ) ++ except Exception: ++ with excutils.save_and_reraise_exception(): ++ LOG.exception(( ++ "Error deregistering as a consumer of {0}" ++ ).format(cert_ref)) +diff --git a/neutron/certificates/cert_manager/cert_manager.py b/neutron/certificates/cert_manager/cert_manager.py +new file mode 100644 +index 000000000..0323378ac +--- /dev/null ++++ b/neutron/certificates/cert_manager/cert_manager.py +@@ -0,0 +1,100 @@ ++# Copyright 2014, 2015 Rackspace US, Inc. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++""" ++Certificate manager API ++""" ++import abc ++ ++from oslo.config import cfg ++import six ++ ++cfg.CONF.import_group('service_auth', 'neutron.certificates.keystone') ++ ++ ++@six.add_metaclass(abc.ABCMeta) ++class Cert(object): ++ """Base class to represent all certificates.""" ++ ++ @abc.abstractmethod ++ def get_certificate(self): ++ """Returns the certificate.""" ++ pass ++ ++ @abc.abstractmethod ++ def get_intermediates(self): ++ """Returns the intermediate certificates.""" ++ pass ++ ++ @abc.abstractmethod ++ def get_private_key(self): ++ """Returns the private key for the certificate.""" ++ pass ++ ++ @abc.abstractmethod ++ def get_private_key_passphrase(self): ++ """Returns the passphrase for the private key.""" ++ pass ++ ++ ++@six.add_metaclass(abc.ABCMeta) ++class CertManager(object): ++ """Base Cert Manager Interface ++ ++ A Cert Manager is responsible for managing certificates for TLS. ++ """ ++ ++ @abc.abstractmethod ++ def store_cert(self, project_id, certificate, private_key, ++ intermediates=None, private_key_passphrase=None, ++ expiration=None, name=None): ++ """Stores (i.e., registers) a cert with the cert manager. ++ ++ This method stores the specified cert and returns its UUID that ++ identifies it within the cert manager. ++ If storage of the certificate data fails, a CertificateStorageException ++ should be raised. ++ """ ++ pass ++ ++ @abc.abstractmethod ++ def get_cert(self, project_id, cert_ref, resource_ref, ++ check_only=False, service_name=None): ++ """Retrieves the specified cert. ++ ++ If check_only is True, don't perform any sort of registration. ++ If the specified cert does not exist, a CertificateStorageException ++ should be raised. ++ """ ++ pass ++ ++ @abc.abstractmethod ++ def delete_cert(self, project_id, cert_ref, resource_ref, ++ service_name=None): ++ """Deletes the specified cert. ++ ++ If the specified cert does not exist, a CertificateStorageException ++ should be raised. ++ """ ++ pass ++ ++ @classmethod ++ def get_service_url(cls, loadbalancer_id): ++ # Format: ://// ++ return "{0}://{1}/{2}/{3}".format( ++ cfg.CONF.service_auth.service_name, ++ cfg.CONF.service_auth.region, ++ "loadbalancer", ++ loadbalancer_id ++ ) +diff --git a/neutron/certificates/cert_manager/local_cert_manager.py b/neutron/certificates/cert_manager/local_cert_manager.py +new file mode 100644 +index 000000000..1b1db2621 +--- /dev/null ++++ b/neutron/certificates/cert_manager/local_cert_manager.py +@@ -0,0 +1,200 @@ ++# Copyright 2014, 2015 Rackspace US, Inc. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++import os ++ ++from oslo.config import cfg ++ ++from neutron.openstack.common import log as logging ++from neutron.openstack.common import utils as uuidutils ++from neutron.certificates.cert_manager import cert_manager ++from neutron.certificates import exceptions ++ ++LOG = logging.getLogger(__name__) ++ ++CONF = cfg.CONF ++ ++TLS_STORAGE_DEFAULT = os.environ.get( ++ 'OS_LBAAS_TLS_STORAGE', '/var/lib/neutron-lbaas/certificates/' ++) ++ ++local_cert_manager_opts = [ ++ cfg.StrOpt('storage_path', ++ default=TLS_STORAGE_DEFAULT, ++ help='Absolute path to the certificate storage directory. ' ++ 'Defaults to env[OS_LBAAS_TLS_STORAGE].') ++] ++ ++CONF.register_opts(local_cert_manager_opts, group='certificates') ++ ++ ++class Cert(cert_manager.Cert): ++ """Representation of a Cert for local storage.""" ++ ++ def __init__(self, certificate, private_key, intermediates=None, ++ private_key_passphrase=None): ++ self.certificate = certificate ++ self.intermediates = intermediates ++ self.private_key = private_key ++ self.private_key_passphrase = private_key_passphrase ++ ++ def get_certificate(self): ++ return self.certificate ++ ++ def get_intermediates(self): ++ return self.intermediates ++ ++ def get_private_key(self): ++ return self.private_key ++ ++ def get_private_key_passphrase(self): ++ return self.private_key_passphrase ++ ++ ++class CertManager(cert_manager.CertManager): ++ """Cert Manager Interface that stores data locally.""" ++ ++ def store_cert(self, project_id, certificate, private_key, ++ intermediates=None, private_key_passphrase=None, **kwargs): ++ """Stores (i.e., registers) a cert with the cert manager. ++ ++ This method stores the specified cert to the filesystem and returns ++ a UUID that can be used to retrieve it. ++ ++ :param project_id: Project ID for the owner of the certificate ++ :param certificate: PEM encoded TLS certificate ++ :param private_key: private key for the supplied certificate ++ :param intermediates: ordered and concatenated intermediate certs ++ :param private_key_passphrase: optional passphrase for the supplied key ++ ++ :returns: the UUID of the stored cert ++ :raises CertificateStorageException: if certificate storage fails ++ """ ++ cert_ref = uuidutils.generate_uuid() ++ filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) ++ ++ LOG.info("Storing certificate data on the local filesystem.") ++ try: ++ filename_certificate = "{0}.crt".format(filename_base) ++ with open(filename_certificate, 'w') as cert_file: ++ cert_file.write(certificate) ++ ++ filename_private_key = "{0}.key".format(filename_base) ++ with open(filename_private_key, 'w') as key_file: ++ key_file.write(private_key) ++ ++ if intermediates: ++ filename_intermediates = "{0}.int".format(filename_base) ++ with open(filename_intermediates, 'w') as int_file: ++ int_file.write(intermediates) ++ ++ if private_key_passphrase: ++ filename_pkp = "{0}.pass".format(filename_base) ++ with open(filename_pkp, 'w') as pass_file: ++ pass_file.write(private_key_passphrase) ++ except IOError as ioe: ++ LOG.error("Failed to store certificate.") ++ raise exceptions.CertificateStorageException(message=ioe.message) ++ ++ return cert_ref ++ ++ def get_cert(self, project_id, cert_ref, resource_ref, **kwargs): ++ """Retrieves the specified cert. ++ ++ :param project_id: Project ID for the owner of the certificate ++ :param cert_ref: the UUID of the cert to retrieve ++ :param resource_ref: Full HATEOAS reference to the consuming resource ++ ++ :returns: neutron_lbaas.common.cert_manager.cert_manager.Cert ++ representation of the certificate data ++ :raises CertificateStorageException: if certificate retrieval fails ++ """ ++ LOG.info(( ++ "Loading certificate {0} from the local filesystem." ++ ).format(cert_ref)) ++ ++ filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) ++ ++ filename_certificate = "{0}.crt".format(filename_base) ++ filename_private_key = "{0}.key".format(filename_base) ++ filename_intermediates = "{0}.int".format(filename_base) ++ filename_pkp = "{0}.pass".format(filename_base) ++ ++ cert_data = dict() ++ ++ try: ++ with open(filename_certificate, 'r') as cert_file: ++ cert_data['certificate'] = cert_file.read() ++ except IOError: ++ LOG.error(( ++ "Failed to read certificate for {0}." ++ ).format(cert_ref)) ++ raise exceptions.CertificateStorageException( ++ msg="Certificate could not be read." ++ ) ++ try: ++ with open(filename_private_key, 'r') as key_file: ++ cert_data['private_key'] = key_file.read() ++ except IOError: ++ LOG.error(( ++ "Failed to read private key for {0}." ++ ).format(cert_ref)) ++ raise exceptions.CertificateStorageException( ++ msg="Private Key could not be read." ++ ) ++ ++ try: ++ with open(filename_intermediates, 'r') as int_file: ++ cert_data['intermediates'] = int_file.read() ++ except IOError: ++ pass ++ ++ try: ++ with open(filename_pkp, 'r') as pass_file: ++ cert_data['private_key_passphrase'] = pass_file.read() ++ except IOError: ++ pass ++ ++ return Cert(**cert_data) ++ ++ def delete_cert(self, project_id, cert_ref, resource_ref, **kwargs): ++ """Deletes the specified cert. ++ ++ :param project_id: Project ID for the owner of the certificate ++ :param cert_ref: the UUID of the cert to delete ++ :param resource_ref: Full HATEOAS reference to the consuming resource ++ ++ :raises CertificateStorageException: if certificate deletion fails ++ """ ++ LOG.info(( ++ "Deleting certificate {0} from the local filesystem." ++ ).format(cert_ref)) ++ ++ filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) ++ ++ filename_certificate = "{0}.crt".format(filename_base) ++ filename_private_key = "{0}.key".format(filename_base) ++ filename_intermediates = "{0}.int".format(filename_base) ++ filename_pkp = "{0}.pass".format(filename_base) ++ ++ try: ++ os.remove(filename_certificate) ++ os.remove(filename_private_key) ++ os.remove(filename_intermediates) ++ os.remove(filename_pkp) ++ except IOError as ioe: ++ LOG.error(( ++ "Failed to delete certificate {0}." ++ ).format(cert_ref)) ++ raise exceptions.CertificateStorageException(message=ioe.message) +diff --git a/neutron/certificates/exceptions.py b/neutron/certificates/exceptions.py +new file mode 100644 +index 000000000..c919f7373 +--- /dev/null ++++ b/neutron/certificates/exceptions.py +@@ -0,0 +1,37 @@ ++# Copyright 2013 OpenStack Foundation. All rights reserved ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++ ++from neutron.common import exceptions ++ ++ ++class TLSException(exceptions.NeutronException): ++ pass ++ ++ ++class NeedsPassphrase(TLSException): ++ message = _("Passphrase needed to decrypt key but client " ++ "did not provide one.") ++ ++ ++class UnreadableCert(TLSException): ++ message = _("Could not read X509 from PEM") ++ ++ ++class MisMatchedKey(TLSException): ++ message = _("Key and x509 certificate do not match") ++ ++ ++class CertificateStorageException(TLSException): ++ message = _('Could not store certificate: %(msg)s') +diff --git a/neutron/certificates/keystone.py b/neutron/certificates/keystone.py +new file mode 100644 +index 000000000..7afe91eeb +--- /dev/null ++++ b/neutron/certificates/keystone.py +@@ -0,0 +1,122 @@ ++# Copyright 2015 Rackspace ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++from keystoneclient.auth.identity import v2 as v2_client ++from keystoneclient.auth.identity import v3 as v3_client ++from keystoneclient import session ++from oslo.config import cfg ++from oslo.utils import excutils ++ ++from neutron.openstack.common import log as logging ++ ++LOG = logging.getLogger(__name__) ++ ++_SESSION = None ++OPTS = [ ++ cfg.StrOpt( ++ 'auth_url', ++ help=_('Authentication endpoint'), ++ ), ++ cfg.StrOpt( ++ 'admin_user', ++ default='admin', ++ help=_('The service admin user name'), ++ ), ++ cfg.StrOpt( ++ 'admin_tenant_name', ++ default='admin', ++ help=_('The service admin tenant name'), ++ ), ++ cfg.StrOpt( ++ 'admin_password', ++ secret=True, ++ default='password', ++ help=_('The service admin password'), ++ ), ++ cfg.StrOpt( ++ 'admin_user_domain', ++ default='admin', ++ help=_('The admin user domain name'), ++ ), ++ cfg.StrOpt( ++ 'admin_project_domain', ++ default='admin', ++ help=_('The admin project domain name'), ++ ), ++ cfg.StrOpt( ++ 'region', ++ default='RegionOne', ++ help=_('The deployment region'), ++ ), ++ cfg.StrOpt( ++ 'service_name', ++ default='lbaas', ++ help=_('The name of the service'), ++ ), ++ cfg.StrOpt( ++ 'auth_version', ++ default='3', ++ help=_('The auth version used to authenticate'), ++ ), ++ cfg.StrOpt( ++ 'endpoint_type', ++ default='public', ++ help=_('The endpoint_type to be used') ++ ), ++ cfg.BoolOpt( ++ 'insecure', ++ default=False, ++ help=_('Disable server certificate verification') ++ ) ++] ++ ++cfg.CONF.register_opts(OPTS, 'service_auth') ++ ++ ++def get_session(): ++ """Initializes a Keystone session. ++ ++ :returns: a Keystone Session object ++ :raises Exception: if the session cannot be established ++ """ ++ global _SESSION ++ if not _SESSION: ++ ++ auth_url = cfg.CONF.service_auth.auth_url ++ insecure = cfg.CONF.service_auth.insecure ++ kwargs = {'auth_url': auth_url, ++ 'username': cfg.CONF.service_auth.admin_user, ++ 'password': cfg.CONF.service_auth.admin_password} ++ ++ if cfg.CONF.service_auth.auth_version == '2': ++ client = v2_client ++ kwargs['tenant_name'] = cfg.CONF.service_auth.admin_tenant_name ++ elif cfg.CONF.service_auth.auth_version == '3': ++ client = v3_client ++ kwargs['project_name'] = cfg.CONF.service_auth.admin_tenant_name ++ kwargs['user_domain_name'] = (cfg.CONF.service_auth. ++ admin_user_domain) ++ kwargs['project_domain_name'] = (cfg.CONF.service_auth. ++ admin_project_domain) ++ else: ++ raise Exception(_('Unknown keystone version!')) ++ ++ try: ++ kc = client.Password(**kwargs) ++ _SESSION = session.Session(auth=kc, verify=not insecure) ++ except Exception: ++ with excutils.save_and_reraise_exception(): ++ LOG.exception("Error creating Keystone session.") ++ ++ return _SESSION +diff --git a/neutron/certificates/tls_utils/__init__.py b/neutron/certificates/tls_utils/__init__.py +new file mode 100644 +index 000000000..e69de29bb +diff --git a/neutron/certificates/tls_utils/cert_parser.py b/neutron/certificates/tls_utils/cert_parser.py +new file mode 100644 +index 000000000..6c0b2d0ae +--- /dev/null ++++ b/neutron/certificates/tls_utils/cert_parser.py +@@ -0,0 +1,177 @@ ++# ++# Copyright 2014 OpenStack Foundation. All rights reserved ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++import six ++ ++from cryptography.hazmat import backends ++from cryptography.hazmat.primitives import serialization ++from cryptography import x509 ++ ++from neutron.openstack.common import log as logging ++import neutron.certificates.exceptions as exceptions ++ ++X509_BEG = "-----BEGIN CERTIFICATE-----" ++X509_END = "-----END CERTIFICATE-----" ++ ++LOG = logging.getLogger(__name__) ++ ++ ++def validate_cert(certificate, private_key=None, ++ private_key_passphrase=None, intermediates=None): ++ """ ++ Validate that the certificate is a valid PEM encoded X509 object ++ ++ Optionally verify that the private key matches the certificate. ++ Optionally verify that the intermediates are valid X509 objects. ++ ++ :param certificate: A PEM encoded certificate ++ :param private_key: The private key for the certificate ++ :param private_key_passphrase: Passphrase for accessing the private key ++ :param intermediates: PEM encoded intermediate certificates ++ :returns: boolean ++ """ ++ ++ cert = _get_x509_from_pem_bytes(certificate) ++ if intermediates: ++ for x509Pem in _split_x509s(intermediates): ++ _get_x509_from_pem_bytes(x509Pem) ++ if private_key: ++ pkey = _read_privatekey(private_key, passphrase=private_key_passphrase) ++ pknum = pkey.public_key().public_numbers() ++ certnum = cert.public_key().public_numbers() ++ if pknum != certnum: ++ raise exceptions.MisMatchedKey ++ return True ++ ++ ++def _read_privatekey(privatekey_pem, passphrase=None): ++ if passphrase is not None: ++ passphrase = passphrase.encode('utf-8') ++ privatekey_pem = privatekey_pem.encode('ascii') ++ ++ try: ++ return serialization.load_pem_private_key(privatekey_pem, passphrase, ++ backends.default_backend()) ++ except Exception: ++ raise exceptions.NeedsPassphrase ++ ++ ++def _split_x509s(x509Str): ++ """ ++ Split the input string into individb(ual x509 text blocks ++ ++ :param x509Str: A large multi x509 certificate blcok ++ :returns: A list of strings where each string represents an ++ X509 pem block surrounded by BEGIN CERTIFICATE, ++ END CERTIFICATE block tags ++ """ ++ curr_pem_block = [] ++ inside_x509 = False ++ for line in x509Str.replace("\r", "").split("\n"): ++ if inside_x509: ++ curr_pem_block.append(line) ++ if line == X509_END: ++ yield "\n".join(curr_pem_block) ++ curr_pem_block = [] ++ inside_x509 = False ++ continue ++ else: ++ if line == X509_BEG: ++ curr_pem_block.append(line) ++ inside_x509 = True ++ ++ ++def _read_pyca_private_key(private_key, private_key_passphrase=None): ++ kw = {"password": None, ++ "backend": backends.default_backend()} ++ if private_key_passphrase is not None: ++ kw["password"] = private_key_passphrase.encode("utf-8") ++ else: ++ kw["password"] = None ++ ++ if type(private_key) == six.text_type: ++ private_key = private_key.encode('utf-8') ++ try: ++ pk = serialization.load_pem_private_key(private_key, **kw) ++ return pk ++ except TypeError as ex: ++ if len(ex.args) > 0 and ex.args[0].startswith("Password"): ++ raise exceptions.NeedsPassphrase ++ ++ ++def dump_private_key(private_key, private_key_passphrase=None): ++ """ ++ Parses encrypted key to provide an unencrypted version in PKCS8 ++ ++ :param private_key: private key ++ :param private_key_passphrase: private key passphrase ++ :returns: Unencrypted private key in PKCS8 ++ """ ++ ++ # re encode the key as unencrypted PKCS8 ++ pk = _read_pyca_private_key(private_key, ++ private_key_passphrase=private_key_passphrase) ++ key = pk.private_bytes(encoding=serialization.Encoding.PEM, ++ format=serialization.PrivateFormat.PKCS8, ++ encryption_algorithm=serialization.NoEncryption()) ++ return key ++ ++ ++def get_host_names(certificate): ++ """Extract the host names from the Pem encoded X509 certificate ++ ++ :param certificate: A PEM encoded certificate ++ :returns: A dictionary containing the following keys: ++ ['cn', 'dns_names'] ++ where 'cn' is the CN from the SubjectName of the certificate, and ++ 'dns_names' is a list of dNSNames (possibly empty) from ++ the SubjectAltNames of the certificate. ++ """ ++ try: ++ cert = _get_x509_from_pem_bytes(certificate) ++ cn = cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)[0] ++ host_names = { ++ 'cn': cn.value.lower(), ++ 'dns_names': [] ++ } ++ try: ++ ext = cert.extensions.get_extension_for_oid( ++ x509.OID_SUBJECT_ALTERNATIVE_NAME ++ ) ++ host_names['dns_names'] = ext.value.get_values_for_type( ++ x509.DNSName) ++ except x509.ExtensionNotFound: ++ LOG.debug("%s extension not found", ++ x509.OID_SUBJECT_ALTERNATIVE_NAME) ++ ++ return host_names ++ except Exception: ++ LOG.exception("Unreadable certificate.") ++ raise exceptions.UnreadableCert ++ ++ ++def _get_x509_from_pem_bytes(certificate_pem): ++ """ ++ Parse X509 data from a PEM encoded certificate ++ ++ :param certificate_pem: Certificate in PEM format ++ :returns: crypto high-level x509 data from the PEM string ++ """ ++ try: ++ x509cert = x509.load_pem_x509_certificate(certificate_pem, ++ backends.default_backend()) ++ except Exception: ++ raise exceptions.UnreadableCert ++ return x509cert +diff --git a/setup.cfg b/setup.cfg +index 9cc699546..e6431b35a 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -186,6 +186,11 @@ neutron.ml2.extension_drivers = + test = neutron.tests.unit.ml2.test_extension_driver_api:TestExtensionDriver + neutron.openstack.common.cache.backends = + memory = neutron.openstack.common.cache._backends.memory:MemoryBackend ++neutron.cert_manager.barbican_auth = ++ barbican_acl_auth = neutron.certificates.cert_manager.barbican_auth.barbican_acl:BarbicanACLAuth ++neutron.cert_manager.backend = ++ barbican = neutron.certificates.cert_manager.barbican_cert_manager ++ local = neutron.certificates.cert_manager.local_cert_manager + oslo.messaging.notify.drivers = + neutron.openstack.common.notifier.log_notifier = oslo.messaging.notify._impl_log:LogDriver + neutron.openstack.common.notifier.no_op_notifier = oslo.messaging.notify._impl_noop:NoOpDriver +-- +2.11.0 (Apple Git-81) + diff --git a/packaging/openstack-neutron/0123-Add-query-and-fragment-valid-to-url_path.patch b/packaging/openstack-neutron/0123-Add-query-and-fragment-valid-to-url_path.patch new file mode 100644 index 0000000..8787608 --- /dev/null +++ b/packaging/openstack-neutron/0123-Add-query-and-fragment-valid-to-url_path.patch @@ -0,0 +1,36 @@ +From 55837a3129c60c3830f31daa05254567c62885ba Mon Sep 17 00:00:00 2001 +From: "cheng.tang" +Date: Tue, 4 Jul 2017 17:31:33 +0800 +Subject: [PATCH] Add query and fragment valid to url_path + +Fixes: redmine #10435 + +Signed-off-by: cheng.tang +Signed-off-by: Hunt Xu +--- + neutron/services/loadbalancer/constants.py | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/neutron/services/loadbalancer/constants.py b/neutron/services/loadbalancer/constants.py +index 5e414fd43..b5649ece1 100644 +--- a/neutron/services/loadbalancer/constants.py ++++ b/neutron/services/loadbalancer/constants.py +@@ -49,8 +49,14 @@ SUPPORTED_HTTP_METHODS = (HTTP_METHOD_GET, HTTP_METHOD_HEAD, HTTP_METHOD_POST, + # pct-encoded = "%" HEXDIG HEXDIG + # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" + # / "*" / "+" / "," / ";" / "=" ++# query = *( pchar / "/" / "?" ) ++# fragment = *( pchar / "/" / "?" ) ++# ++PCHAR = "[a-zA-Z0-9-._~!$&\'()*+,;=:@]|(%[a-fA-F0-9]{2})" + SUPPORTED_URL_PATH = ( +- "^(/([a-zA-Z0-9-._~!$&\'()*+,;=:@]|(%[a-fA-F0-9]{2}))*)+$") ++ "^(/(%s)*)+(\?((%s)|[/\?])*)?(#((%s)|[/\?])*)?$" % ( ++ PCHAR, PCHAR, PCHAR ++ )) + + SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP' + SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE' +-- +2.11.0 (Apple Git-81) + diff --git a/packaging/openstack-neutron/0124-agent_sync-filter-out-not-ready-fip-port-targets.patch b/packaging/openstack-neutron/0124-agent_sync-filter-out-not-ready-fip-port-targets.patch new file mode 100644 index 0000000..fceb132 --- /dev/null +++ b/packaging/openstack-neutron/0124-agent_sync-filter-out-not-ready-fip-port-targets.patch @@ -0,0 +1,32 @@ +From 6dd7c12cff5d7879aa6aa75aa95d2ec83c373bb4 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Thu, 31 Aug 2017 13:28:05 +0800 +Subject: [PATCH] agent_sync: filter out not-ready fip port targets + +When a FIP is not associated with a fixed IP, its port is not configured +on the hosts, and its router_id is None. So no QoS should be applied on +the target port. + +Fixes: redmine #10738 + +Signed-off-by: Hunt Xu +--- + neutron/db/qos/qos_db.py | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/neutron/db/qos/qos_db.py b/neutron/db/qos/qos_db.py +index b3a465ccb..f3eadfcb7 100644 +--- a/neutron/db/qos/qos_db.py ++++ b/neutron/db/qos/qos_db.py +@@ -827,6 +827,8 @@ class QosPluginRpcDbMixin(object): + context, qos.port.device_id) + except ext_l3.FloatingIPNotFound: + continue ++ if fip['router_id'] is None: ++ continue + namespace = 'qrouter-' + fip['router_id'] + else: + namespace = '_root' +-- +2.11.0 (Apple Git-81) + diff --git a/packaging/openstack-neutron/0125-PPTP-allow-the-same-username-used-by-different-tenan.patch b/packaging/openstack-neutron/0125-PPTP-allow-the-same-username-used-by-different-tenan.patch new file mode 100644 index 0000000..8415c63 --- /dev/null +++ b/packaging/openstack-neutron/0125-PPTP-allow-the-same-username-used-by-different-tenan.patch @@ -0,0 +1,42 @@ +From 77b18f890e34fac82d64d9ec868ac7344c67b2bd Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Tue, 5 Sep 2017 16:19:48 +0800 +Subject: [PATCH] PPTP: allow the same username used by different tenants + +Fixes: redmine #10809 + +Signed-off-by: Hunt Xu +--- + neutron/db/vpn/vpn_db.py | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/neutron/db/vpn/vpn_db.py b/neutron/db/vpn/vpn_db.py +index 6034bbc3a..7cd7b6b9f 100644 +--- a/neutron/db/vpn/vpn_db.py ++++ b/neutron/db/vpn/vpn_db.py +@@ -660,9 +660,10 @@ class VPNPluginDb(vpnaas.VPNPluginBase, base_db.CommonDbMixin): + pptp_credential['associations']]} + return self._fields(res, fields) + +- def _username_already_exists(self, context, username): +- query = self._model_query(context, PPTPCredential) +- return len(query.filter_by(username=username).all()) > 0 ++ def _username_already_exists(self, context, tenant_id, username): ++ credentials = self._model_query(context, PPTPCredential).filter_by( ++ tenant_id=tenant_id, username=username).all() ++ return len(credentials) > 0 + + def _create_port_for_vpnservice(self, context, + vpnservice_id, pptp_credential_id): +@@ -696,7 +697,7 @@ class VPNPluginDb(vpnaas.VPNPluginBase, base_db.CommonDbMixin): + pptp_credential_id = uuidutils.generate_uuid() + with context.session.begin(subtransactions=True): + username = pptp_credential['username'] +- if self._username_already_exists(context, username): ++ if self._username_already_exists(context, tenant_id, username): + raise vpnaas.PPTPUsernameAlreadyExists(username=username) + pptp_credential_db = PPTPCredential( + id=pptp_credential_id, +-- +2.11.0 (Apple Git-81) + diff --git a/packaging/openstack-neutron/0126-PPTP-fix-vpnservices-checking-for-PPTP-credentials.patch b/packaging/openstack-neutron/0126-PPTP-fix-vpnservices-checking-for-PPTP-credentials.patch new file mode 100644 index 0000000..b3b4a6a --- /dev/null +++ b/packaging/openstack-neutron/0126-PPTP-fix-vpnservices-checking-for-PPTP-credentials.patch @@ -0,0 +1,38 @@ +From ddd72d4f1addc72ffb2582862fb1eeb07c6a2395 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Tue, 5 Sep 2017 17:30:45 +0800 +Subject: [PATCH] PPTP: fix vpnservices checking for PPTP credentials + +Fixes: redmine #10813 + +Signed-off-by: Hunt Xu +--- + neutron/services/vpn/plugin.py | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/neutron/services/vpn/plugin.py b/neutron/services/vpn/plugin.py +index 3a4cb5642..c32554d64 100644 +--- a/neutron/services/vpn/plugin.py ++++ b/neutron/services/vpn/plugin.py +@@ -209,14 +209,16 @@ class VPNDriverPlugin(VPNPlugin, vpn_db.VPNPluginRpcDbMixin): + return ipsec_site_connection + + def create_pptp_credential(self, context, pptp_credential): +- self._check_drivers_for_pptp_credential(context, pptp_credential) ++ self._check_drivers_for_pptp_credential( ++ context, pptp_credential['pptp_credential']) + return super( + VPNDriverPlugin, self + ).create_pptp_credential(context, pptp_credential) + + def update_pptp_credential(self, context, pptp_credential_id, + pptp_credential): +- self._check_drivers_for_pptp_credential(context, pptp_credential) ++ self._check_drivers_for_pptp_credential( ++ context, pptp_credential['pptp_credential']) + return super( + VPNDriverPlugin, self + ).update_pptp_credential(context, pptp_credential_id, pptp_credential) +-- +2.11.0 (Apple Git-81) + diff --git a/packaging/openstack-neutron/0127-Metering-use-nfacct-to-get-metering-counters.patch b/packaging/openstack-neutron/0127-Metering-use-nfacct-to-get-metering-counters.patch new file mode 100644 index 0000000..e41d44d --- /dev/null +++ b/packaging/openstack-neutron/0127-Metering-use-nfacct-to-get-metering-counters.patch @@ -0,0 +1,470 @@ +From 4625b61f7bd5e2a8b15f4071c8223acdc505f1fd Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Thu, 7 Sep 2017 15:41:13 +0800 +Subject: [PATCH] Metering: use nfacct to get metering counters + +Using nfacct can speed up the process of getting traffic counters by +getting counters in the same router at once. + +Use eventlet to parallelize the procedures of getting traffic counters +among lots of routers. + +Fixes: redmine #10878 + +Signed-off-by: Hunt Xu +--- + etc/neutron/rootwrap.d/metering.filters | 12 ++ + neutron/agent/linux/nfacct.py | 122 +++++++++++++++++++++ + .../drivers/iptables/es_iptables_driver.py | 34 +++--- + .../metering/drivers/iptables/iptables_driver.py | 99 +++++++---------- + setup.cfg | 1 + + 5 files changed, 190 insertions(+), 78 deletions(-) + create mode 100644 etc/neutron/rootwrap.d/metering.filters + create mode 100644 neutron/agent/linux/nfacct.py + +diff --git a/etc/neutron/rootwrap.d/metering.filters b/etc/neutron/rootwrap.d/metering.filters +new file mode 100644 +index 000000000..1e720eb32 +--- /dev/null ++++ b/etc/neutron/rootwrap.d/metering.filters +@@ -0,0 +1,12 @@ ++# neutron-rootwrap command filters for nodes on which neutron is ++# expected to control network ++# ++# This file should be owned by (and only-writeable by) the root user ++ ++# format seems to be ++# cmd-name: filter-name, raw-command, user, args ++ ++[Filters] ++ ++# metering-agent ++nfacct: CommandFilter, nfacct, root +diff --git a/neutron/agent/linux/nfacct.py b/neutron/agent/linux/nfacct.py +new file mode 100644 +index 000000000..2595f7724 +--- /dev/null ++++ b/neutron/agent/linux/nfacct.py +@@ -0,0 +1,122 @@ ++# Copyright (c) 2017 Eayun, Inc. ++# All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++import json ++ ++from neutron.agent.linux import iptables_manager ++from neutron.openstack.common import log as logging ++ ++LOG = logging.getLogger(__name__) ++ ++ ++class NfacctMixin(object): ++ """ ++ The following attributes/methods are not defined in this class: ++ * methods ++ - self.execute ++ * attributes ++ - self.root_helper ++ - self.namespace ++ """ ++ NFACCT_OBJECT_NAME_LEN = 31 ++ ++ @staticmethod ++ def _get_nfacct_object_name(nfacct_object): ++ return nfacct_object[:NfacctMixin.NFACCT_OBJECT_NAME_LEN] ++ ++ @staticmethod ++ def get_nfacct_rule_part(nfacct_object): ++ nfacct_object_name = NfacctMixin._get_nfacct_object_name(nfacct_object) ++ return "-m nfacct --nfacct-name %s" % nfacct_object_name ++ ++ def _ns_wrap_cmd(self, cmd): ++ if self.namespace: ++ cmd = ['ip', 'netns', 'exec', self.namespace] + cmd ++ return cmd ++ ++ def get_objects(self): ++ args = self._ns_wrap_cmd(['nfacct', 'list', 'json']) ++ try: ++ nfacct_out = self.execute(args, root_helper=self.root_helper) ++ except RuntimeError: ++ return set() ++ ++ if not nfacct_out: ++ return set() ++ ++ return set( ++ counter['name'] ++ for counter in json.loads(nfacct_out)['nfacct_counters'] ++ ) ++ ++ def add_nfacct_objects(self, nfacct_objects): ++ args_prefix = self._ns_wrap_cmd(['nfacct', 'add']) ++ existing_objests = self.get_objects() ++ for nfacct_object in nfacct_objects: ++ nfacct_object_name = self._get_nfacct_object_name(nfacct_object) ++ if nfacct_object_name in existing_objests: ++ continue ++ args = args_prefix + [nfacct_object_name] ++ self.execute(args, root_helper=self.root_helper, ++ check_exit_code=False) ++ ++ def nfacct_flush(self): ++ args = self._ns_wrap_cmd(['nfacct', 'flush']) ++ self.execute(args, root_helper=self.root_helper, check_exit_code=False) ++ ++ def parse_nfacct_output(self, nfacct_out): ++ accs = {} ++ for counter in json.loads(nfacct_out)['nfacct_counters']: ++ name = counter.pop('name') ++ accs[name] = counter ++ return accs ++ ++ def get_result(self, nfacct_objects): ++ args = self._ns_wrap_cmd(['nfacct', 'list', 'reset', 'json']) ++ try: ++ nfacct_out = self.execute(args, root_helper=self.root_helper) ++ except RuntimeError: ++ return None ++ ++ if not nfacct_out: ++ return None ++ ++ parsed_accs = self.parse_nfacct_output(nfacct_out) ++ ret_accs = {} ++ for nfacct_object in nfacct_objects: ++ nfacct_object_name = self._get_nfacct_object_name(nfacct_object) ++ acc = parsed_accs.get(nfacct_object_name, None) ++ if acc: ++ ret_accs[nfacct_object] = acc ++ return ret_accs ++ ++ ++class NfacctIptablesManager(iptables_manager.IptablesManager, ++ NfacctMixin): ++ ++ def __init__(self, *args, **kwargs): ++ super(NfacctIptablesManager, self).__init__(*args, **kwargs) ++ self.nfacct_objects = set() ++ ++ def add_nfacct_object(self, object_name): ++ self.nfacct_objects.add(object_name) ++ ++ def apply(self): ++ if self.iptables_apply_deferred: ++ return ++ self.add_nfacct_objects(self.nfacct_objects) ++ self.nfacct_objects = set() ++ super(NfacctIptablesManager, self).apply() ++ self.nfacct_flush() +diff --git a/neutron/services/metering/drivers/iptables/es_iptables_driver.py b/neutron/services/metering/drivers/iptables/es_iptables_driver.py +index 9c2040053..9d5c0fa34 100644 +--- a/neutron/services/metering/drivers/iptables/es_iptables_driver.py ++++ b/neutron/services/metering/drivers/iptables/es_iptables_driver.py +@@ -16,6 +16,7 @@ + import six + + from neutron.agent.linux import iptables_manager ++from neutron.agent.linux.nfacct import NfacctMixin + from neutron.common import constants as constants + from neutron.common import log + from neutron.openstack.common import log as logging +@@ -46,8 +47,8 @@ class EsRouterWithMetering(iptables_driver.RouterWithMetering): + iptables_driver.EXTERNAL_DEV_PREFIX, ES_METERING_MARK) + im.ipv4['mangle'].add_rule('PREROUTING', mark_rule) + +- def iter_metering_labels(self): +- return self.metering_labels.items() + self.es_metering_labels.items() ++ def get_metering_labels(self): ++ return self.metering_labels.keys() + self.es_metering_labels.keys() + + + class EsIptablesMeteringDriver(iptables_driver.IptablesMeteringDriver): +@@ -104,7 +105,7 @@ class EsIptablesMeteringDriver(iptables_driver.IptablesMeteringDriver): + self._process_associate_es_metering_label(router) + + @staticmethod +- def _get_es_meter_rule(label, label_chain): ++ def _get_es_meter_rule(label): + rule_parts = [] + if label['direction'] == 'ingress': + rule_parts += ['-m mark --mark %s' % ES_METERING_MARK] +@@ -121,38 +122,31 @@ class EsIptablesMeteringDriver(iptables_driver.IptablesMeteringDriver): + if label['tcp_port'] is not None: + rule_parts += ['-p tcp %s %s' % (port_selector, label['tcp_port'])] + +- rule_parts += ['-j %s' % label_chain] ++ rule_parts += [NfacctMixin.get_nfacct_rule_part(label['id'])] + + return ' '.join(rule_parts) + +- @staticmethod +- def _get_label_chain_name(label_id): +- return iptables_manager.get_chain_name( +- iptables_driver.WRAP_NAME + iptables_driver.LABEL + label_id, +- wrap=False) +- + def _add_es_metering_label(self, rm, label): + table = rm.iptables_manager.ipv4['mangle'] +- label_id = label['id'] +- label_chain = self._get_label_chain_name(label_id) +- table.add_chain(label_chain, wrap=False) +- es_meter_rule = self._get_es_meter_rule(label, label_chain) ++ rm.iptables_manager.add_nfacct_object(label['id']) ++ es_meter_rule = self._get_es_meter_rule(label) + table.add_rule('POSTROUTING', es_meter_rule) + if label['internal_ip'] is None and label['direction'] == 'ingress': + # If internal IP is unspecified, we should also count traffic + # directed to the router itself. + table.add_rule('INPUT', es_meter_rule) +- table.add_rule(label_chain, '', wrap=False) +- rm.es_metering_labels[label_id] = label ++ rm.es_metering_labels[label['id']] = label + + def _remove_es_metering_label(self, rm, label_id): + table = rm.iptables_manager.ipv4['mangle'] +- if label_id not in rm.es_metering_labels: ++ label = rm.es_metering_labels.pop(label_id, None) ++ if label is None: + return +- label_chain = self._get_label_chain_name(label_id) +- table.remove_chain(label_chain, wrap=False) + +- del rm.es_metering_labels[label_id] ++ es_meter_rule = self._get_es_meter_rule(label) ++ table.remove_rule('POSTROUTING', es_meter_rule) ++ if label['internal_ip'] is None and label['direction'] == 'ingress': ++ table.remove_rule('INPUT', es_meter_rule) + + def _process_associate_es_metering_label(self, router): + self._update_router(router) +diff --git a/neutron/services/metering/drivers/iptables/iptables_driver.py b/neutron/services/metering/drivers/iptables/iptables_driver.py +index 1610bbe71..963c16c31 100644 +--- a/neutron/services/metering/drivers/iptables/iptables_driver.py ++++ b/neutron/services/metering/drivers/iptables/iptables_driver.py +@@ -15,9 +15,12 @@ + from oslo.config import cfg + import six + ++import eventlet ++ + from neutron.agent.common import config +-from neutron.agent.linux import interface + from neutron.agent.linux import iptables_manager ++from neutron.agent.linux import interface ++from neutron.agent.linux.nfacct import NfacctMixin, NfacctIptablesManager + from neutron.common import constants as constants + from neutron.common import ipv6_utils + from neutron.common import log +@@ -32,7 +35,6 @@ WRAP_NAME = 'neutron-meter' + EXTERNAL_DEV_PREFIX = 'qg-' + TOP_CHAIN = WRAP_NAME + "-local" + RULE = '-r-' +-LABEL = '-l-' + + config.register_interface_driver_opts_helper(cfg.CONF) + config.register_use_namespaces_opts_helper(cfg.CONF) +@@ -71,7 +73,7 @@ class RouterWithMetering(object): + self.router = router + self.root_helper = config.get_root_helper(self.conf) + self.ns_name = NS_PREFIX + self.id if conf.use_namespaces else None +- self.iptables_manager = iptables_manager.IptablesManager( ++ self.iptables_manager = NfacctIptablesManager( + root_helper=self.root_helper, + namespace=self.ns_name, + binary_name=WRAP_NAME, +@@ -79,8 +81,8 @@ class RouterWithMetering(object): + use_ipv6=ipv6_utils.is_enabled()) + self.metering_labels = {} + +- def iter_metering_labels(self): +- return self.metering_labels.items() ++ def get_metering_labels(self): ++ return self.metering_labels.keys() + + + class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): +@@ -133,8 +135,7 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + if router_id in self.routers: + del self.routers[router_id] + +- def _process_metering_label_rules(self, rm, rules, label_chain, +- rules_chain): ++ def _process_metering_label_rules(self, rm, rules, label_id, rules_chain): + im = rm.iptables_manager + if not rm.router['gw_port_id']: + return +@@ -153,7 +154,9 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + im.ipv4['filter'].add_rule(rules_chain, ipt_rule, + wrap=False, top=True) + else: +- ipt_rule = '%s -j %s' % (dir_opt, label_chain) ++ ipt_rule = '%s %s' % ( ++ dir_opt, NfacctMixin.get_nfacct_rule_part(label_id)) ++ im.add_nfacct_object(label_id) + im.ipv4['filter'].add_rule(rules_chain, ipt_rule, + wrap=False, top=False) + +@@ -166,12 +169,6 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + for label in labels: + label_id = label['id'] + +- label_chain = iptables_manager.get_chain_name(WRAP_NAME + +- LABEL + label_id, +- wrap=False) +- rm.iptables_manager.ipv4['filter'].add_chain(label_chain, +- wrap=False) +- + rules_chain = iptables_manager.get_chain_name(WRAP_NAME + + RULE + label_id, + wrap=False) +@@ -181,14 +178,10 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + rules_chain, + wrap=False) + +- rm.iptables_manager.ipv4['filter'].add_rule(label_chain, +- '', +- wrap=False) +- + rules = label.get('rules') + if rules: + self._process_metering_label_rules(rm, rules, +- label_chain, ++ label_id, + rules_chain) + + rm.metering_labels[label_id] = label +@@ -205,15 +198,10 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + if label_id not in rm.metering_labels: + continue + +- label_chain = iptables_manager.get_chain_name(WRAP_NAME + +- LABEL + label_id, +- wrap=False) + rules_chain = iptables_manager.get_chain_name(WRAP_NAME + + RULE + label_id, + wrap=False) + +- rm.iptables_manager.ipv4['filter'].remove_chain(label_chain, +- wrap=False) + rm.iptables_manager.ipv4['filter'].remove_chain(rules_chain, + wrap=False) + +@@ -239,9 +227,6 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + for label in labels: + label_id = label['id'] + +- label_chain = iptables_manager.get_chain_name(WRAP_NAME + +- LABEL + label_id, +- wrap=False) + rules_chain = iptables_manager.get_chain_name(WRAP_NAME + + RULE + label_id, + wrap=False) +@@ -251,7 +236,7 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + rules = label.get('rules') + if rules: + self._process_metering_label_rules(rm, rules, +- label_chain, ++ label_id, + rules_chain) + + @log.log +@@ -259,44 +244,42 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + for router in routers: + self._process_disassociate_metering_label(router) + ++ def get_traffic_counter(self, router): ++ router_id = router['id'] ++ rm = self.routers.get(router_id) ++ if not rm: ++ return (True, router_id, {}) ++ ++ label_ids = rm.get_metering_labels() ++ accs = rm.iptables_manager.get_result(label_ids) ++ if accs is None: ++ return (False, router_id, {}) ++ ++ missing_labels = set(label_ids) - set(accs.keys()) ++ successful = False if missing_labels else True ++ for label_id in missing_labels: ++ LOG.warn("Missing counter for label %s.", label_id) ++ return (successful, router_id, accs) ++ + @log.log + def get_traffic_counters(self, context, routers): + accs = {} + routers_to_reconfigure = [] +- for router in routers: +- rm = self.routers.get(router['id']) +- if not rm: +- continue +- +- router_to_reconfigure = False +- for label_id, label in rm.iter_metering_labels(): +- try: +- chain = iptables_manager.get_chain_name(WRAP_NAME + +- LABEL + +- label_id, +- wrap=False) +- +- chain_acc = rm.iptables_manager.get_traffic_counters( +- chain, wrap=False, zero=True) +- except RuntimeError: +- LOG.exception(_('Failed to get traffic counters, ' +- 'router: %s'), router) +- router_to_reconfigure = True +- continue +- +- if not chain_acc: +- continue + ++ pool = eventlet.greenpool.GreenPool() ++ for successful, router_id, acc in pool.imap( ++ self.get_traffic_counter, routers ++ ): ++ if not successful: ++ routers_to_reconfigure.append(router_id) ++ LOG.exception(_('Failed to get traffic counters, ' ++ 'router: %s'), router_id) ++ for label_id, label_acc in acc.items(): + acc = accs.get(label_id, {'pkts': 0, 'bytes': 0}) +- +- acc['pkts'] += chain_acc['pkts'] +- acc['bytes'] += chain_acc['bytes'] +- ++ acc['pkts'] += label_acc['pkts'] ++ acc['bytes'] += label_acc['bytes'] + accs[label_id] = acc + +- if router_to_reconfigure: +- routers_to_reconfigure.append(router['id']) +- + for router_id in routers_to_reconfigure: + self.routers.pop(router_id, None) + +diff --git a/setup.cfg b/setup.cfg +index e6431b35a..6ed791989 100644 +--- a/setup.cfg ++++ b/setup.cfg +@@ -42,6 +42,7 @@ data_files = + etc/neutron/rootwrap.d/l3.filters + etc/neutron/rootwrap.d/lbaas-haproxy.filters + etc/neutron/rootwrap.d/linuxbridge-plugin.filters ++ etc/neutron/rootwrap.d/metering.filters + etc/neutron/rootwrap.d/nec-plugin.filters + etc/neutron/rootwrap.d/openvswitch-plugin.filters + etc/neutron/rootwrap.d/qos.filters +-- +2.11.0 (Apple Git-81) + diff --git a/packaging/openstack-neutron/0128-metering-get-all-metering-label-at-once-on-a-host.patch b/packaging/openstack-neutron/0128-metering-get-all-metering-label-at-once-on-a-host.patch new file mode 100644 index 0000000..b01022e --- /dev/null +++ b/packaging/openstack-neutron/0128-metering-get-all-metering-label-at-once-on-a-host.patch @@ -0,0 +1,93 @@ +From 9371e8a66fb40460d961e16dee567e152457613e Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Thu, 14 Sep 2017 14:34:50 +0800 +Subject: [PATCH 128/129] metering: get all metering label at once on a host + +This is a hack for kernel which doesn't support nfacct per net +namespace. Upstream commit 3499abb249bb5ed9d21031944bc3059ec4aa2909. + +Fixes: redmine #10957 + +Signed-off-by: Hunt Xu +--- + neutron/services/metering/agents/metering_agent.py | 2 +- + .../metering/drivers/iptables/iptables_driver.py | 37 ++++++++++++++++------ + 2 files changed, 28 insertions(+), 11 deletions(-) + +diff --git a/neutron/services/metering/agents/metering_agent.py b/neutron/services/metering/agents/metering_agent.py +index 4ba07526c..270ec781a 100644 +--- a/neutron/services/metering/agents/metering_agent.py ++++ b/neutron/services/metering/agents/metering_agent.py +@@ -74,8 +74,8 @@ class MeteringAgent(MeteringPluginRpc, manager.Manager): + + def __init__(self, host, conf=None): + self.conf = conf or cfg.CONF +- self._load_drivers() + self.root_helper = config.get_root_helper(self.conf) ++ self._load_drivers() + self.context = context.get_admin_context_without_session() + self.metering_loop = loopingcall.FixedIntervalLoopingCall( + self._metering_loop +diff --git a/neutron/services/metering/drivers/iptables/iptables_driver.py b/neutron/services/metering/drivers/iptables/iptables_driver.py +index 963c16c31..ec73e6f42 100644 +--- a/neutron/services/metering/drivers/iptables/iptables_driver.py ++++ b/neutron/services/metering/drivers/iptables/iptables_driver.py +@@ -97,6 +97,8 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + LOG.info(_("Loading interface driver %s"), self.conf.interface_driver) + self.driver = importutils.import_object(self.conf.interface_driver, + self.conf) ++ self.dummy_iptables_manager = NfacctIptablesManager( ++ root_helper=plugin.root_helper) + + def _update_router(self, router): + r = self.routers.get(router['id'], +@@ -264,21 +266,36 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + @log.log + def get_traffic_counters(self, context, routers): + accs = {} ++ router_label_map = {} + routers_to_reconfigure = [] + +- pool = eventlet.greenpool.GreenPool() +- for successful, router_id, acc in pool.imap( +- self.get_traffic_counter, routers +- ): +- if not successful: ++ # Hack for kernel without nfacct per netns support. ++ # Kernel commit 3499abb249bb5ed9d21031944bc3059ec4aa2909 ++ # Count metering label only once. ++ label_ids = set() ++ for router in routers: ++ router_id = router['id'] ++ rm = self.routers.get(router_id) ++ if not rm: ++ continue ++ router_label_ids = set(rm.get_metering_labels()) ++ router_label_map[router_id] = router_label_ids ++ label_ids.update(router_label_ids) ++ ++ accs = self.dummy_iptables_manager.get_result(label_ids) ++ if accs is None: ++ accs = {} ++ ++ for router_id, label_ids in router_label_map.items(): ++ missing_labels = label_ids - set(accs.keys()) ++ if missing_labels: + routers_to_reconfigure.append(router_id) + LOG.exception(_('Failed to get traffic counters, ' + 'router: %s'), router_id) +- for label_id, label_acc in acc.items(): +- acc = accs.get(label_id, {'pkts': 0, 'bytes': 0}) +- acc['pkts'] += label_acc['pkts'] +- acc['bytes'] += label_acc['bytes'] +- accs[label_id] = acc ++ for label_id in missing_labels: ++ LOG.warn("Missing counter for label %(label_id)s, " ++ "router %(router_id)s.", ++ {'label_id': label_id, 'router_id': router_id}) + + for router_id in routers_to_reconfigure: + self.routers.pop(router_id, None) +-- +2.11.0 (Apple Git-81) + diff --git a/packaging/openstack-neutron/0129-metering-parallelize-workloads-in-iptables-drivers.patch b/packaging/openstack-neutron/0129-metering-parallelize-workloads-in-iptables-drivers.patch new file mode 100644 index 0000000..1d15b4a --- /dev/null +++ b/packaging/openstack-neutron/0129-metering-parallelize-workloads-in-iptables-drivers.patch @@ -0,0 +1,254 @@ +From 4e7380a4ff3bba06d36aa34da429829c0fad0f4e Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Thu, 14 Sep 2017 09:49:22 +0800 +Subject: [PATCH 129/129] metering: parallelize workloads in *iptables drivers + +Fixes: redmine #10956 + +Signed-off-by: Hunt Xu +--- + neutron/agent/linux/nfacct.py | 8 ++- + neutron/services/metering/agents/metering_agent.py | 9 +++- + .../drivers/iptables/es_iptables_driver.py | 58 +++++++++++++--------- + .../metering/drivers/iptables/iptables_driver.py | 49 +++++++++++------- + 4 files changed, 80 insertions(+), 44 deletions(-) + +diff --git a/neutron/agent/linux/nfacct.py b/neutron/agent/linux/nfacct.py +index 2595f7724..786fc1272 100644 +--- a/neutron/agent/linux/nfacct.py ++++ b/neutron/agent/linux/nfacct.py +@@ -119,4 +119,10 @@ class NfacctIptablesManager(iptables_manager.IptablesManager, + self.add_nfacct_objects(self.nfacct_objects) + self.nfacct_objects = set() + super(NfacctIptablesManager, self).apply() +- self.nfacct_flush() ++ # Hack for kernel doesn't support nfacct per net namespace. ++ # We cannot flush nfacct now because iptables-save processes ++ # in other net namespaces may not finish yet. Flushing now would ++ # delete all newly added nfacct objects and make iptables-save in ++ # other net namespaces fail. ++ # The following line is keep here for reference. ++ # self.nfacct_flush() +diff --git a/neutron/services/metering/agents/metering_agent.py b/neutron/services/metering/agents/metering_agent.py +index 270ec781a..5044d4861 100644 +--- a/neutron/services/metering/agents/metering_agent.py ++++ b/neutron/services/metering/agents/metering_agent.py +@@ -21,6 +21,7 @@ eventlet.monkey_patch() + from oslo.config import cfg + + from neutron.agent.common import config ++from neutron.agent.linux.nfacct import NfacctIptablesManager + from neutron.agent import rpc as agent_rpc + from neutron.common import config as common_config + from neutron.common import constants as constants +@@ -88,6 +89,9 @@ class MeteringAgent(MeteringPluginRpc, manager.Manager): + self.label_tenant_id = {} + self.routers = {} + self.metering_infos = {} ++ # Hack for kernel doesn't support nfacct per net namespace ++ self.dummy_iptables_manager = \ ++ self.metering_driver.dummy_iptables_manager + super(MeteringAgent, self).__init__(host=host) + + def _load_drivers(self): +@@ -173,7 +177,10 @@ class MeteringAgent(MeteringPluginRpc, manager.Manager): + @utils.synchronized('metering-agent') + def _invoke_driver(self, context, meterings, func_name): + try: +- return getattr(self.metering_driver, func_name)(context, meterings) ++ ret = getattr(self.metering_driver, func_name)(context, meterings) ++ # Hack for kernel doesn't support nfacct per net namespace ++ self.dummy_iptables_manager.nfacct_flush() ++ return ret + except AttributeError: + LOG.exception(_("Driver %(driver)s does not implement %(func)s"), + {'driver': self.conf.driver, +diff --git a/neutron/services/metering/drivers/iptables/es_iptables_driver.py b/neutron/services/metering/drivers/iptables/es_iptables_driver.py +index 9d5c0fa34..116452814 100644 +--- a/neutron/services/metering/drivers/iptables/es_iptables_driver.py ++++ b/neutron/services/metering/drivers/iptables/es_iptables_driver.py +@@ -15,6 +15,8 @@ + + import six + ++import eventlet ++ + from neutron.agent.linux import iptables_manager + from neutron.agent.linux.nfacct import NfacctMixin + from neutron.common import constants as constants +@@ -67,6 +69,28 @@ class EsIptablesMeteringDriver(iptables_driver.IptablesMeteringDriver): + self.routers[r.id] = r + return r + ++ def _green_update_es_router(self, router): ++ old_rm = self.routers.get(router['id']) ++ if old_rm: ++ old_es_metering_labels = set(old_rm.es_metering_labels.keys()) ++ persist_labels = set() ++ with iptables_driver.IptablesManagerTransaction( ++ old_rm.iptables_manager ++ ): ++ labels = router.get(constants.ES_METERING_LABEL_KEY, []) ++ for label in labels: ++ label_id = label['id'] ++ if label_id in old_es_metering_labels: ++ persist_labels.add(label_id) ++ else: ++ self._add_es_metering_label(old_rm, label) ++ ++ for label_id in old_es_metering_labels - persist_labels: ++ self._remove_es_metering_label(old_rm, label_id) ++ ++ else: ++ self._process_associate_es_metering_label(router) ++ + @log.log + def update_routers(self, context, routers): + """Deal with the EayunStack metering extension.""" +@@ -79,30 +103,14 @@ class EsIptablesMeteringDriver(iptables_driver.IptablesMeteringDriver): + router_ids = set(router['id'] for router in routers) + for router_id, rm in six.iteritems(self.routers): + if router_id not in router_ids: +- self._process_disassociate_es_metering_label(rm.router) ++ self.green_pool.spawn_n( ++ self._process_disassociate_es_metering_label, rm.router) ++ self.green_pool.waitall() + + # Added or updated routers + for router in routers: +- old_rm = self.routers.get(router['id']) +- if old_rm: +- old_es_metering_labels = set(old_rm.es_metering_labels.keys()) +- persist_labels = set() +- with iptables_driver.IptablesManagerTransaction( +- old_rm.iptables_manager +- ): +- labels = router.get(constants.ES_METERING_LABEL_KEY, []) +- for label in labels: +- label_id = label['id'] +- if label_id in old_es_metering_labels: +- persist_labels.add(label_id) +- else: +- self._add_es_metering_label(old_rm, label) +- +- for label_id in old_es_metering_labels - persist_labels: +- self._remove_es_metering_label(old_rm, label_id) +- +- else: +- self._process_associate_es_metering_label(router) ++ self.green_pool.spawn_n(self._green_update_es_router, router) ++ self.green_pool.waitall() + + @staticmethod + def _get_es_meter_rule(label): +@@ -169,9 +177,13 @@ class EsIptablesMeteringDriver(iptables_driver.IptablesMeteringDriver): + @log.log + def add_es_metering_label(self, _context, routers): + for router in routers: +- self._process_associate_es_metering_label(router) ++ self.green_pool.spawn_n( ++ self._process_associate_es_metering_label, router) ++ self.green_pool.waitall() + + @log.log + def remove_es_metering_label(self, _context, routers): + for router in routers: +- self._process_disassociate_es_metering_label(router) ++ self.green_pool.spawn_n( ++ self._process_disassociate_es_metering_label, router) ++ self.green_pool.waitall() +diff --git a/neutron/services/metering/drivers/iptables/iptables_driver.py b/neutron/services/metering/drivers/iptables/iptables_driver.py +index ec73e6f42..fc7da34cd 100644 +--- a/neutron/services/metering/drivers/iptables/iptables_driver.py ++++ b/neutron/services/metering/drivers/iptables/iptables_driver.py +@@ -99,6 +99,7 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + self.conf) + self.dummy_iptables_manager = NfacctIptablesManager( + root_helper=plugin.root_helper) ++ self.green_pool = eventlet.greenpool.GreenPool() + + def _update_router(self, router): + r = self.routers.get(router['id'], +@@ -108,29 +109,34 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + + return r + ++ def _green_update_router(self, router): ++ old_gw_port_id = None ++ old_rm = self.routers.get(router['id']) ++ if old_rm: ++ old_gw_port_id = old_rm.router['gw_port_id'] ++ gw_port_id = router['gw_port_id'] ++ ++ if gw_port_id != old_gw_port_id: ++ if old_rm: ++ with IptablesManagerTransaction(old_rm.iptables_manager): ++ self._process_disassociate_metering_label(router) ++ if gw_port_id: ++ self._process_associate_metering_label(router) ++ elif gw_port_id: ++ self._process_associate_metering_label(router) ++ + @log.log + def update_routers(self, context, routers): + # disassociate removed routers + router_ids = set(router['id'] for router in routers) + for router_id, rm in six.iteritems(self.routers): + if router_id not in router_ids: +- self._process_disassociate_metering_label(rm.router) +- ++ self.green_pool.spawn_n( ++ self._process_disassociate_metering_label, rm.router) ++ self.green_pool.waitall() + for router in routers: +- old_gw_port_id = None +- old_rm = self.routers.get(router['id']) +- if old_rm: +- old_gw_port_id = old_rm.router['gw_port_id'] +- gw_port_id = router['gw_port_id'] +- +- if gw_port_id != old_gw_port_id: +- if old_rm: +- with IptablesManagerTransaction(old_rm.iptables_manager): +- self._process_disassociate_metering_label(router) +- if gw_port_id: +- self._process_associate_metering_label(router) +- elif gw_port_id: +- self._process_associate_metering_label(router) ++ self.green_pool.spawn_n(self._green_update_router, router) ++ self.green_pool.waitall() + + @log.log + def remove_router(self, context, router_id): +@@ -212,12 +218,15 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + @log.log + def add_metering_label(self, context, routers): + for router in routers: +- self._process_associate_metering_label(router) ++ self.green_pool.spawn_n( ++ self._process_associate_metering_label, router) ++ self.green_pool.waitall() + + @log.log + def update_metering_label_rules(self, context, routers): + for router in routers: +- self._update_metering_label_rules(router) ++ self.green_pool.spawn_n(self._update_metering_label_rules, router) ++ self.green_pool.waitall() + + def _update_metering_label_rules(self, router): + rm = self.routers.get(router['id']) +@@ -244,7 +253,9 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + @log.log + def remove_metering_label(self, context, routers): + for router in routers: +- self._process_disassociate_metering_label(router) ++ self.green_pool.spawn_n( ++ self._process_disassociate_metering_label, router) ++ self.green_pool.waitall() + + def get_traffic_counter(self, router): + router_id = router['id'] +-- +2.11.0 (Apple Git-81) + diff --git a/packaging/openstack-neutron/0130-Get-rid-of-ml2-port-model-hook-join.patch b/packaging/openstack-neutron/0130-Get-rid-of-ml2-port-model-hook-join.patch new file mode 100644 index 0000000..89c7ffc --- /dev/null +++ b/packaging/openstack-neutron/0130-Get-rid-of-ml2-port-model-hook-join.patch @@ -0,0 +1,50 @@ +From b3bd06eee45e7e664aaad26339a15ab681fcfa65 Mon Sep 17 00:00:00 2001 +From: Kevin Benton +Date: Tue, 13 Dec 2016 18:07:42 -0800 +Subject: [PATCH 130/133] Get rid of ml2 port model hook join + +The binding is already joined to the port via a backref relationship +so we can just utilize that rather than join to the table an additional +time. + +Partial-Bug: #1649317 +Change-Id: I267a808b411f44b2128955dc93bd8da34d1fac91 +(cherry picked from commit 3ea5f7ce5627599b7e1f0f1c1d583dd5466b7d31) +Signed-off-by: Hunt Xu +--- + neutron/plugins/ml2/plugin.py | 11 +++-------- + 1 file changed, 3 insertions(+), 8 deletions(-) + +diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py +index 9d37c871e..e54c4d9ed 100644 +--- a/neutron/plugins/ml2/plugin.py ++++ b/neutron/plugins/ml2/plugin.py +@@ -446,22 +446,17 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, + # registration of hooks in portbindings_db.py used by other + # plugins. + +- def _ml2_port_model_hook(self, context, original_model, query): +- query = query.outerjoin(models.PortBinding, +- (original_model.id == +- models.PortBinding.port_id)) +- return query +- + def _ml2_port_result_filter_hook(self, query, filters): + values = filters and filters.get(portbindings.HOST_ID, []) + if not values: + return query +- return query.filter(models.PortBinding.host.in_(values)) ++ bind_criteria = models.PortBinding.host.in_(values) ++ return query.filter(models_v2.Port.port_binding.has(bind_criteria)) + + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Port, + "ml2_port_bindings", +- '_ml2_port_model_hook', ++ None, + None, + '_ml2_port_result_filter_hook') + +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/0131-Get-rid-of-_network_model_hook-for-external_net.patch b/packaging/openstack-neutron/0131-Get-rid-of-_network_model_hook-for-external_net.patch new file mode 100644 index 0000000..6e2aea7 --- /dev/null +++ b/packaging/openstack-neutron/0131-Get-rid-of-_network_model_hook-for-external_net.patch @@ -0,0 +1,61 @@ +From 8f698ccdd021989db9d20367ba1e7a34e14eec09 Mon Sep 17 00:00:00 2001 +From: Kevin Benton +Date: Mon, 9 Jan 2017 05:30:56 -0800 +Subject: [PATCH 131/133] Get rid of _network_model_hook for external_net + +The network already has a joined relationship to the external +network table so we can leverage that instead of causing an +additional join for the filtering criteria. + +Partial-Bug: #1649317 +Change-Id: Idfee69b124f4ab8e2998da8492c5fa627f705bb9 +(cherry picked from commit 495b7863a0c9c1f4ab319bb114ff0bec442376df) +Signed-off-by: Hunt Xu + +Conflicts: + neutron/db/external_net_db.py +Signed-off-by: Hunt Xu +--- + neutron/db/external_net_db.py | 12 +++--------- + 1 file changed, 3 insertions(+), 9 deletions(-) + +diff --git a/neutron/db/external_net_db.py b/neutron/db/external_net_db.py +index 53f389536..b38cc07f7 100644 +--- a/neutron/db/external_net_db.py ++++ b/neutron/db/external_net_db.py +@@ -48,12 +48,6 @@ class ExternalNetwork(model_base.BASEV2): + class External_net_db_mixin(object): + """Mixin class to add external network methods to db_base_plugin_v2.""" + +- def _network_model_hook(self, context, original_model, query): +- query = query.outerjoin(ExternalNetwork, +- (original_model.id == +- ExternalNetwork.network_id)) +- return query +- + def _network_filter_hook(self, context, original_model, conditions): + if conditions is not None and not hasattr(conditions, '__iter__'): + conditions = (conditions, ) +@@ -68,8 +62,8 @@ class External_net_db_mixin(object): + if not vals: + return query + if vals[0]: +- return query.filter((ExternalNetwork.network_id != expr.null())) +- return query.filter((ExternalNetwork.network_id == expr.null())) ++ return query.filter(models_v2.Network.external.has()) ++ return query.filter(~models_v2.Network.external.has()) + + # TODO(salvatore-orlando): Perform this operation without explicitly + # referring to db_base_plugin_v2, as plugins that do not extend from it +@@ -77,7 +71,7 @@ class External_net_db_mixin(object): + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Network, + "external_net", +- '_network_model_hook', ++ None, + '_network_filter_hook', + '_network_result_filter_hook') + +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/0132-Get-rid-of-additional-fixed_ip-filter-join.patch b/packaging/openstack-neutron/0132-Get-rid-of-additional-fixed_ip-filter-join.patch new file mode 100644 index 0000000..0d3e0c6 --- /dev/null +++ b/packaging/openstack-neutron/0132-Get-rid-of-additional-fixed_ip-filter-join.patch @@ -0,0 +1,40 @@ +From 9d8e00f60edf2b765ac8ccbf35eb46ea383302ee Mon Sep 17 00:00:00 2001 +From: Kevin Benton +Date: Mon, 9 Jan 2017 14:14:57 -0800 +Subject: [PATCH 132/133] Get rid of additional fixed_ip filter join + +Partial-Bug: #1649317 +Change-Id: I692b4b85d539af3465a48eed83e40f2ad5b87e51 +(cherry picked from commit f204728e37d4f18e741dfa295d6b3da5529efd6c) +Signed-off-by: Hunt Xu +--- + neutron/db/db_base_plugin_v2.py | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + +diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py +index f213438cc..e8b83929f 100644 +--- a/neutron/db/db_base_plugin_v2.py ++++ b/neutron/db/db_base_plugin_v2.py +@@ -1452,11 +1452,16 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + ip_addresses = fixed_ips.get('ip_address') + subnet_ids = fixed_ips.get('subnet_id') + if ip_addresses or subnet_ids: +- query = query.join(Port.fixed_ips) + if ip_addresses: +- query = query.filter(IPAllocation.ip_address.in_(ip_addresses)) ++ query = query.filter( ++ Port.fixed_ips.any( ++ IPAllocation.ip_address.in_(ip_addresses) ++ ) ++ ) + if subnet_ids: +- query = query.filter(IPAllocation.subnet_id.in_(subnet_ids)) ++ query = query.filter( ++ Port.fixed_ips.any(IPAllocation.subnet_id.in_(subnet_ids)) ++ ) + + query = self._apply_filters_to_query(query, Port, filters) + if limit and page_reverse and sorts: +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/0133-Switch-to-subquery-for-1-M-relationships.patch b/packaging/openstack-neutron/0133-Switch-to-subquery-for-1-M-relationships.patch new file mode 100644 index 0000000..6115771 --- /dev/null +++ b/packaging/openstack-neutron/0133-Switch-to-subquery-for-1-M-relationships.patch @@ -0,0 +1,144 @@ +From b0bca831a4c4189bfb9ffd7bbdb92df16be3e373 Mon Sep 17 00:00:00 2001 +From: Kevin Benton +Date: Mon, 9 Jan 2017 05:02:42 -0800 +Subject: [PATCH 133/133] Switch to 'subquery' for 1-M relationships + +This switches to the use of subqueries for 1-m relationships +which will result in a higher constant query factor but will +eliminate the potential for cross-product explosions. + +Closes-Bug: #1649317 +Change-Id: I6952c48236153a8e2f2f155375b70573ddc2cf0f +(cherry picked from commit 3ffe006743b33f48bca6fce541e0a8f64f844fb7) +Signed-off-by: Hunt Xu + +Conflicts: + neutron/db/extra_dhcp_opt/models.py + neutron/db/models/allowed_address_pair.py + neutron/db/models/l3.py + neutron/db/models/metering.py + neutron/db/models/securitygroup.py + neutron/db/models/segment.py + neutron/db/models/subnet_service_type.py + neutron/db/models/tag.py + neutron/db/models_v2.py + neutron/db/qos/models.py + neutron/plugins/ml2/models.py + neutron/services/trunk/models.py + neutron/tests/unit/db/test_l3_hamode_db.py + +Signed-off-by: Hunt Xu +--- + neutron/db/allowedaddresspairs_db.py | 2 +- + neutron/db/extradhcpopt_db.py | 2 +- + neutron/db/metering/metering_db.py | 3 ++- + neutron/db/models_v2.py | 10 ++++++---- + neutron/db/securitygroups_db.py | 2 +- + neutron/plugins/ml2/models.py | 2 +- + 6 files changed, 12 insertions(+), 9 deletions(-) + +diff --git a/neutron/db/allowedaddresspairs_db.py b/neutron/db/allowedaddresspairs_db.py +index b648c8c47..fe4295c0f 100644 +--- a/neutron/db/allowedaddresspairs_db.py ++++ b/neutron/db/allowedaddresspairs_db.py +@@ -33,7 +33,7 @@ class AllowedAddressPair(model_base.BASEV2): + port = orm.relationship( + models_v2.Port, + backref=orm.backref("allowed_address_pairs", +- lazy="joined", cascade="delete")) ++ lazy="subquery", cascade="delete")) + + + class AllowedAddressPairsMixin(object): +diff --git a/neutron/db/extradhcpopt_db.py b/neutron/db/extradhcpopt_db.py +index 6e7b23f43..ae5abaea2 100644 +--- a/neutron/db/extradhcpopt_db.py ++++ b/neutron/db/extradhcpopt_db.py +@@ -48,7 +48,7 @@ class ExtraDhcpOpt(model_base.BASEV2, models_v2.HasId): + # eagerly load extra_dhcp_opts bindings + ports = orm.relationship( + models_v2.Port, +- backref=orm.backref("dhcp_opts", lazy='joined', cascade='delete')) ++ backref=orm.backref("dhcp_opts", lazy='subquery', cascade='delete')) + + + class ExtraDhcpOptMixin(object): +diff --git a/neutron/db/metering/metering_db.py b/neutron/db/metering/metering_db.py +index 94d5d3ddc..99f7a9f1e 100644 +--- a/neutron/db/metering/metering_db.py ++++ b/neutron/db/metering/metering_db.py +@@ -46,11 +46,12 @@ class MeteringLabel(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(1024)) + rules = orm.relationship(MeteringLabelRule, backref="label", +- cascade="delete", lazy="joined") ++ cascade="delete", lazy="subquery") + routers = orm.relationship( + l3_db.Router, + primaryjoin="MeteringLabel.tenant_id==Router.tenant_id", + foreign_keys='MeteringLabel.tenant_id', ++ lazy='subquery', + uselist=True) + shared = sa.Column(sa.Boolean, default=False, server_default=sql.false()) + +diff --git a/neutron/db/models_v2.py b/neutron/db/models_v2.py +index 53efc6692..94157dd31 100644 +--- a/neutron/db/models_v2.py ++++ b/neutron/db/models_v2.py +@@ -122,7 +122,7 @@ class Port(model_base.BASEV2, HasId, HasTenant): + name = sa.Column(sa.String(255)) + network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"), + nullable=False) +- fixed_ips = orm.relationship(IPAllocation, backref='ports', lazy='joined') ++ fixed_ips = orm.relationship(IPAllocation, backref='ports', lazy='subquery') + mac_address = sa.Column(sa.String(32), nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + status = sa.Column(sa.String(16), nullable=False) +@@ -172,15 +172,17 @@ class Subnet(model_base.BASEV2, HasId, HasTenant): + gateway_ip = sa.Column(sa.String(64)) + allocation_pools = orm.relationship(IPAllocationPool, + backref='subnet', +- lazy="joined", ++ lazy="subquery", + cascade='delete') + enable_dhcp = sa.Column(sa.Boolean()) + dns_nameservers = orm.relationship(DNSNameServer, + backref='subnet', +- cascade='all, delete, delete-orphan') ++ cascade='all, delete, delete-orphan', ++ lazy='subquery') + routes = orm.relationship(SubnetRoute, + backref='subnet', +- cascade='all, delete, delete-orphan') ++ cascade='all, delete, delete-orphan', ++ lazy='subquery') + shared = sa.Column(sa.Boolean) + ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, + constants.DHCPV6_STATEFUL, +diff --git a/neutron/db/securitygroups_db.py b/neutron/db/securitygroups_db.py +index 23b5c80cb..14a7ed23a 100644 +--- a/neutron/db/securitygroups_db.py ++++ b/neutron/db/securitygroups_db.py +@@ -82,7 +82,7 @@ class SecurityGroupRule(model_base.BASEV2, models_v2.HasId, + remote_ip_prefix = sa.Column(sa.String(255)) + security_group = orm.relationship( + SecurityGroup, +- backref=orm.backref('rules', cascade='all,delete'), ++ backref=orm.backref('rules', cascade='all,delete', lazy='subquery'), + primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id") + source_group = orm.relationship( + SecurityGroup, +diff --git a/neutron/plugins/ml2/models.py b/neutron/plugins/ml2/models.py +index 80ae1bbb6..8ce472ff3 100644 +--- a/neutron/plugins/ml2/models.py ++++ b/neutron/plugins/ml2/models.py +@@ -114,5 +114,5 @@ class DVRPortBinding(model_base.BASEV2): + port = orm.relationship( + models_v2.Port, + backref=orm.backref("dvr_port_binding", +- lazy='joined', uselist=False, ++ lazy='subquery', uselist=False, + cascade='delete')) +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/0134-ES-ACL-add-EayunStack-subnet-ACL-extension.patch b/packaging/openstack-neutron/0134-ES-ACL-add-EayunStack-subnet-ACL-extension.patch new file mode 100644 index 0000000..0f2c0ca --- /dev/null +++ b/packaging/openstack-neutron/0134-ES-ACL-add-EayunStack-subnet-ACL-extension.patch @@ -0,0 +1,302 @@ +From 102d22d255d2d2387b39f628f48e7187dbee6fdd Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Tue, 29 Aug 2017 12:23:09 +0800 +Subject: [PATCH 134/136] ES ACL: add EayunStack subnet ACL extension + +Fixes(1/3): redmine #11027 + +Signed-off-by: Hunt Xu +--- + neutron/extensions/es_acl.py | 259 ++++++++++++++++++++++++++++++++++++ + neutron/plugins/common/constants.py | 2 + + 2 files changed, 261 insertions(+) + create mode 100644 neutron/extensions/es_acl.py + +diff --git a/neutron/extensions/es_acl.py b/neutron/extensions/es_acl.py +new file mode 100644 +index 000000000..6ec6bcc88 +--- /dev/null ++++ b/neutron/extensions/es_acl.py +@@ -0,0 +1,259 @@ ++# Copyright (c) 2017 Eayun, Inc. ++# All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++import abc ++import six ++ ++from neutron.api import extensions ++from neutron.api.v2 import attributes as attr ++from neutron.api.v2 import resource_helper ++from neutron.openstack.common import log as logging ++from neutron.plugins.common import constants ++from neutron.services import service_base ++ ++LOG = logging.getLogger(__name__) ++ ++ACL_VALID_ACTION_VALUES = [constants.FWAAS_ALLOW, constants.FWAAS_DENY] ++ ++ ++def _validate_acl_ipaddr(data, valid_values=None): ++ if data is None: ++ return ++ msg_ip = attr._validate_ip_address(data, valid_values) ++ if not msg_ip: ++ return ++ msg_subnet = attr._validate_subnet(data, valid_values) ++ if not msg_subnet: ++ return ++ return "%(msg_ip)s and %(msg_subnet)s" % {'msg_ip': msg_ip, ++ 'msg_subnet': msg_subnet} ++ ++ ++def _validate_acl_port_range(data, valid_values=None): ++ if data is None: ++ return ++ data = str(data) ++ ports = data.split(':') ++ for port in ports: ++ try: ++ val = int(port) ++ if val <= 0 or val > 65535: ++ raise ValueError ++ except (ValueError, TypeError): ++ return "'%s' is not a valid port number." % port ++ ++ if len(ports) > 2 or int(ports[0]) > int(ports[-1]): ++ return "'%s' is not a valid port range." % data ++ ++ ++def _convert_to_string(value): ++ return str(value) if value is not None else None ++ ++ ++def _convert_to_lower_string(value): ++ return value.lower() ++ ++ ++attr.validators['type:acl_ipaddr'] = _validate_acl_ipaddr ++attr.validators['type:acl_port_range'] = _validate_acl_port_range ++ ++RESOURCE_ATTRIBUTE_MAP = { ++ 'es_acls': { ++ 'id': {'allow_post': False, 'allow_put': False, ++ 'is_visible': True, 'primary_key': True}, ++ 'name': {'allow_post': True, 'allow_put': True, ++ 'default': '', 'is_visible': True}, ++ 'tenant_id': {'allow_post': True, 'allow_put': False, ++ 'is_visible': True, 'required_by_policy': True}, ++ 'subnets': {'allow_post': False, 'allow_put': False, ++ 'is_visible': True}, ++ 'ingress_rules': {'allow_post': False, 'allow_put': False, ++ 'is_visible': True}, ++ 'egress_rules': {'allow_post': False, 'allow_put': False, ++ 'is_visible': True}, ++ }, ++ 'es_acl_rules': { ++ 'id': {'allow_post': False, 'allow_put': False, ++ 'is_visible': True, 'primary_key': True}, ++ 'name': {'allow_post': True, 'allow_put': True, ++ 'default': '', 'is_visible': True}, ++ 'tenant_id': {'allow_post': True, 'allow_put': False, ++ 'is_visible': True, 'required_by_policy': True}, ++ 'acl_id': {'allow_post': True, 'allow_put': True, ++ 'default': None, 'validate': {'type:uuid_or_none': None}, ++ 'is_visible': True}, ++ 'position': {'allow_post': True, 'allow_put': True, ++ 'default': None, ++ 'validate': {'type:range_or_none': [0, 255]}, ++ 'convert_to': attr.convert_to_int_if_not_none, ++ 'is_visible': True}, ++ 'direction': {'allow_post': True, 'allow_put': True, ++ 'validate': {'type:values': ['ingress', 'egress']}, ++ 'is_visible': True}, ++ 'protocol': {'allow_post': True, 'allow_put': True, ++ 'default': None, ++ 'validate': {'type:range_or_none': [0, 255]}, ++ 'convert_to': attr.convert_to_int_if_not_none, ++ 'is_visible': True}, ++ 'source_ip_address': {'allow_post': True, 'allow_put': True, ++ 'default': None, ++ 'validate': {'type:acl_ipaddr': None}, ++ 'is_visible': True}, ++ 'destination_ip_address': {'allow_post': True, 'allow_put': True, ++ 'default': None, ++ 'validate': {'type:acl_ipaddr': None}, ++ 'is_visible': True}, ++ 'source_port': {'allow_post': True, 'allow_put': True, ++ 'default': None, ++ 'validate': {'type:acl_port_range': None}, ++ 'convert_to': _convert_to_string, ++ 'is_visible': True}, ++ 'destination_port': {'allow_post': True, 'allow_put': True, ++ 'default': None, ++ 'validate': {'type:acl_port_range': None}, ++ 'convert_to': _convert_to_string, ++ 'is_visible': True}, ++ 'action': {'allow_post': True, 'allow_put': True, ++ 'validate': {'type:values': ACL_VALID_ACTION_VALUES}, ++ 'convert_to': _convert_to_lower_string, ++ 'is_visible': True}, ++ } ++} ++ ++ ++class Es_acl(extensions.ExtensionDescriptor): ++ ++ @classmethod ++ def get_name(cls): ++ return "EayunStack Neutron Subnet ACL" ++ ++ @classmethod ++ def get_alias(cls): ++ return "es-acl" ++ ++ @classmethod ++ def get_description(cls): ++ return "Eayunstack Neutron Subnet ACL extension." ++ ++ @classmethod ++ def get_namespace(cls): ++ return "https://github.com/eayunstack" ++ ++ @classmethod ++ def get_updated(cls): ++ return "2017-08-24:00:00-00:00" ++ ++ @classmethod ++ def get_plugin_interface(cls): ++ return EsAclPluginBase ++ ++ @classmethod ++ def get_resources(cls): ++ """Returns Ext Resources.""" ++ plural_mappings = resource_helper.build_plural_mappings( ++ {}, RESOURCE_ATTRIBUTE_MAP) ++ attr.PLURALS.update(plural_mappings) ++ action_map = {'es_acl': {'bind_subnets': 'PUT', ++ 'unbind_subnets': 'PUT'}} ++ return resource_helper.build_resource_info(plural_mappings, ++ RESOURCE_ATTRIBUTE_MAP, ++ constants.ES_ACL, ++ action_map=action_map, ++ register_quota=True) ++ ++ def update_attributes_map(self, extended_attributes, ++ extension_attrs_map=None): ++ super(Es_acl, self).update_attributes_map( ++ extended_attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) ++ ++ def get_extended_resources(self, version): ++ return RESOURCE_ATTRIBUTE_MAP if version == "2.0" else {} ++ ++ ++@six.add_metaclass(abc.ABCMeta) ++class EsAclPluginBase(service_base.ServicePluginBase): ++ ++ def get_plugin_name(self): ++ return constants.ES_ACL ++ ++ def get_plugin_description(self): ++ return constants.ES_ACL ++ ++ def get_plugin_type(self): ++ return constants.ES_ACL ++ ++ @abc.abstractmethod ++ def create_es_acl(self, context, es_acl): ++ """Create an EayunStack subnet ACL.""" ++ pass ++ ++ @abc.abstractmethod ++ def update_es_acl(self, context, es_acl_id, es_acl): ++ """Update an EayunStack subnet ACL.""" ++ pass ++ ++ @abc.abstractmethod ++ def delete_es_acl(self, context, es_acl_id): ++ """Delete an EayunStack subnet ACL.""" ++ pass ++ ++ @abc.abstractmethod ++ def get_es_acl(self, context, es_acl_id, fields=None): ++ """Get an EayunStack subnet ACL.""" ++ pass ++ ++ @abc.abstractmethod ++ def get_es_acls(self, context, filters=None, fields=None, ++ sorts=None, limit=None, marker=None, ++ page_reverse=False): ++ """List EayunStack subnet ACLs.""" ++ pass ++ ++ @abc.abstractmethod ++ def bind_subnets(self, context, es_acl_id, subnet_ids): ++ """Bind subnets to ACL.""" ++ pass ++ ++ @abc.abstractmethod ++ def unbind_subnets(self, context, es_acl_id, subnet_ids): ++ """Unbind subnets from ACL.""" ++ pass ++ ++ @abc.abstractmethod ++ def create_es_acl_rule(self, context, es_acl_rule): ++ """Create an EayunStack subnet ACL rule.""" ++ pass ++ ++ @abc.abstractmethod ++ def update_es_acl_rule(self, context, es_acl_rule_id, es_acl_rule): ++ """Update an EayunStack subnet ACL rule.""" ++ pass ++ ++ @abc.abstractmethod ++ def delete_es_acl_rule(self, context, es_acl_rule_id): ++ """Delete an EayunStack subnet ACL rule.""" ++ pass ++ ++ @abc.abstractmethod ++ def get_es_acl_rule(self, context, es_acl_rule_id, fields=None): ++ """Get an EayunStack subnet ACL rule.""" ++ pass ++ ++ @abc.abstractmethod ++ def get_es_acl_rules(self, context, filters=None, fields=None, ++ sorts=None, limit=None, marker=None, ++ page_reverse=False): ++ """List EayunStack subnet ACL rules.""" ++ pass +diff --git a/neutron/plugins/common/constants.py b/neutron/plugins/common/constants.py +index 110addb4d..7802d60e3 100644 +--- a/neutron/plugins/common/constants.py ++++ b/neutron/plugins/common/constants.py +@@ -22,6 +22,7 @@ VPN = "VPN" + METERING = "METERING" + L3_ROUTER_NAT = "L3_ROUTER_NAT" + ES_METERING = "ES_METERING" ++ES_ACL = "ES_ACL" + + + #maps extension alias to service type +@@ -41,6 +42,7 @@ ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER, FIREWALL, VPN, METERING, + COMMON_PREFIXES = { + CORE: "", + DUMMY: "/dummy_svc", ++ ES_ACL: "/fw", + LOADBALANCER: "/lb", + FIREWALL: "/fw", + VPN: "/vpn", +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/0135-ES-ACL-add-database-operations-related-to-subnet-ACL.patch b/packaging/openstack-neutron/0135-ES-ACL-add-database-operations-related-to-subnet-ACL.patch new file mode 100644 index 0000000..7ae0650 --- /dev/null +++ b/packaging/openstack-neutron/0135-ES-ACL-add-database-operations-related-to-subnet-ACL.patch @@ -0,0 +1,584 @@ +From f6ced6a6782466268d868dd2e6af113bba0711e0 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Tue, 5 Sep 2017 11:42:50 +0800 +Subject: [PATCH 135/136] ES ACL: add database operations related to subnet ACL + +Fixes(2/3): redmine #11027 + +Signed-off-by: Hunt Xu +--- + neutron/db/es_acl_db.py | 418 +++++++++++++++++++++ + .../1ec373736f8b_add_eayunstack_acl_support.py | 96 +++++ + .../db/migration/alembic_migrations/versions/HEAD | 2 +- + neutron/extensions/es_acl.py | 13 + + 4 files changed, 528 insertions(+), 1 deletion(-) + create mode 100644 neutron/db/es_acl_db.py + create mode 100644 neutron/db/migration/alembic_migrations/versions/1ec373736f8b_add_eayunstack_acl_support.py + +diff --git a/neutron/db/es_acl_db.py b/neutron/db/es_acl_db.py +new file mode 100644 +index 000000000..af2ffe175 +--- /dev/null ++++ b/neutron/db/es_acl_db.py +@@ -0,0 +1,418 @@ ++# Copyright (c) 2017 Eayun, Inc. ++# All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++import sqlalchemy as sa ++from sqlalchemy import orm ++from sqlalchemy.ext.orderinglist import ordering_list ++from sqlalchemy.orm import exc ++ ++from neutron.common import constants ++from neutron.common import exceptions as n_exc ++from neutron.db import common_db_mixin as base_db ++from neutron.db import model_base ++from neutron.db import models_v2 ++from neutron.extensions import es_acl as es_acl ++from neutron.openstack.common import uuidutils ++from neutron.openstack.common import log as logging ++ ++from neutron import manager ++ ++ ++LOG = logging.getLogger(__name__) ++ ++ ++class EsAclRule(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): ++ """Represents an EayunStack ACL rule.""" ++ __tablename__ = 'es_acl_rules' ++ name = sa.Column(sa.String(255)) ++ acl_id = sa.Column(sa.String(36), sa.ForeignKey('es_acls.id')) ++ position = sa.Column(sa.Integer) ++ direction = sa.Column( ++ sa.Enum('ingress', 'egress', name='es_acl_rule_direction'), ++ nullable=False) ++ protocol = sa.Column(sa.Integer) ++ source_ip_address = sa.Column(sa.String(64)) ++ destination_ip_address = sa.Column(sa.String(64)) ++ source_port_min = sa.Column(sa.Integer) ++ source_port_max = sa.Column(sa.Integer) ++ destination_port_min = sa.Column(sa.Integer) ++ destination_port_max = sa.Column(sa.Integer) ++ action = sa.Column( ++ sa.Enum('allow', 'deny', name='es_acl_rule_action'), ++ nullable=False) ++ ++ ++class EsAcl(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): ++ """Represents an EayunStack ACL.""" ++ __tablename__ = 'es_acls' ++ name = sa.Column(sa.String(255)) ++ ingress_rules = orm.relationship( ++ EsAclRule, order_by='EsAclRule.position', lazy='subquery', ++ primaryjoin="and_(EsAclRule.acl_id==EsAcl.id, " ++ "EsAclRule.direction=='ingress')", ++ collection_class=ordering_list('position', count_from=1)) ++ egress_rules = orm.relationship( ++ EsAclRule, order_by='EsAclRule.position', lazy='subquery', ++ primaryjoin="and_(EsAclRule.acl_id==EsAcl.id, " ++ "EsAclRule.direction=='egress')", ++ collection_class=ordering_list('position', count_from=1)) ++ ++ ++class EsAclSubnetBinding(model_base.BASEV2): ++ """Represents a binding between a neutron subnet and an Eayunstack ACL.""" ++ __tablename__ = 'es_acl_subnet_bindings' ++ subnet_id = sa.Column( ++ sa.String(36), sa.ForeignKey('subnets.id', ondelete='CASCADE'), ++ primary_key=True) ++ acl_id = sa.Column( ++ sa.String(36), sa.ForeignKey('es_acls.id'), nullable=False) ++ router_id = sa.Column( ++ sa.String(36), sa.ForeignKey('routers.id', ondelete='SET NULL')) ++ router_port_id = sa.Column( ++ sa.String(36), sa.ForeignKey('ports.id', ondelete='SET NULL')) ++ ++ acl = orm.relationship( ++ EsAcl, backref=orm.backref('bindings', lazy='subquery', uselist=True)) ++ ++ ++class EsAclDbMixin(es_acl.EsAclPluginBase, base_db.CommonDbMixin): ++ ++ @property ++ def _core_plugin(self): ++ return manager.NeutronManager.get_plugin() ++ ++ def _get_es_acl(self, context, es_acl_id): ++ try: ++ return self._get_by_id(context, EsAcl, es_acl_id) ++ except exc.NoResultFound: ++ raise es_acl.AclNotFound(acl_id=es_acl_id) ++ ++ def _make_es_acl_dict(self, acl_db, fields=None): ++ res = {'id': acl_db.id, ++ 'name': acl_db.name, ++ 'tenant_id': acl_db.tenant_id, ++ 'subnets': [binding.subnet_id for binding in acl_db.bindings], ++ 'ingress_rules': [rule.id for rule in acl_db.ingress_rules], ++ 'egress_rules': [rule.id for rule in acl_db.egress_rules]} ++ return self._fields(res, fields) ++ ++ def create_es_acl(self, context, es_acl): ++ """Create an EayunStack subnet ACL.""" ++ acl = es_acl['es_acl'] ++ tenant_id = self._get_tenant_id_for_create(context, acl) ++ with context.session.begin(subtransactions=True): ++ acl_db = EsAcl(id=uuidutils.generate_uuid(), ++ tenant_id=tenant_id, ++ name=acl['name']) ++ context.session.add(acl_db) ++ return self._make_es_acl_dict(acl_db) ++ ++ def update_es_acl(self, context, es_acl_id, es_acl): ++ """Update an EayunStack subnet ACL.""" ++ acl = es_acl['es_acl'] ++ acl_db = self._get_es_acl(context, es_acl_id) ++ with context.session.begin(subtransactions=True): ++ acl_db.update(acl) ++ return self._make_es_acl_dict(acl_db) ++ ++ def delete_es_acl(self, context, es_acl_id): ++ """Delete an EayunStack subnet ACL.""" ++ acl_db = self._get_es_acl(context, es_acl_id) ++ if acl_db.bindings: ++ raise es_acl.AclInUse( ++ acl_id=es_acl_id, ++ subnets=[binding.subnet_id for binding in acl_db.bindings]) ++ with context.session.begin(subtransactions=True): ++ context.session.delete(acl_db) ++ ++ def get_es_acl(self, context, es_acl_id, fields=None): ++ """Get an EayunStack subnet ACL.""" ++ acl_db = self._get_es_acl(context, es_acl_id) ++ return self._make_es_acl_dict(acl_db, fields) ++ ++ def get_es_acls(self, context, filters=None, fields=None, ++ sorts=None, limit=None, marker=None, ++ page_reverse=False): ++ """List EayunStack subnet ACLs.""" ++ marker_object = self._get_marker_obj(context, 'es_acl', limit, marker) ++ return self._get_collection( ++ context, EsAcl, self._make_es_acl_dict, ++ filters=filters, fields=fields, sorts=sorts, ++ limit=limit, marker_obj=marker_object, page_reverse=page_reverse) ++ ++ def bind_subnets(self, context, es_acl_id, subnet_ids): ++ """Bind subnets to ACL.""" ++ subnet_ids = subnet_ids['subnet_ids'] ++ bound_subnets = [] ++ with context.session.begin(subtransactions=True): ++ acl_db = self._get_es_acl(context, es_acl_id) ++ already_bound = set( ++ binding.subnet_id for binding in acl_db.bindings) ++ for subnet_id in subnet_ids: ++ failed_msg = ('ACL %(acl_id)s failed to bind to subnet ' ++ 'subnet %(subnet_id)s: %(reason)s.') ++ failed_reason = None ++ ++ if subnet_id in already_bound: ++ failed_reason = 'already bound' ++ ++ if failed_reason is None: ++ try: ++ subnet = self._core_plugin._get_subnet( ++ context, subnet_id) ++ except n_exc.SubnetNotFound: ++ failed_reason = 'subnet not found' ++ ++ if failed_reason is None: ++ if acl_db.tenant_id != subnet.tenant_id: ++ failed_reason = 'not the same tenant' ++ ++ if failed_reason: ++ LOG.warn(failed_msg, {'acl_id': es_acl_id, ++ 'subnet_id': subnet_id, ++ 'reason': failed_reason}) ++ continue ++ ++ router_port = self._core_plugin._get_ports_query( ++ context, ++ filters={ ++ 'fixed_ips': {'subnet_id': [subnet_id], ++ 'ip_address': [subnet.gateway_ip]}, ++ 'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF] ++ } ++ ).first() ++ router_id = None ++ router_port_id = None ++ if router_port: ++ router_id = router_port.routerport.router_id ++ router_port_id = router_port.id ++ binding_db = EsAclSubnetBinding( ++ subnet_id=subnet_id, ++ acl_id=es_acl_id, ++ router_id=router_id, ++ router_port_id=router_port_id) ++ context.session.add(binding_db) ++ bound_subnets.append(subnet_id) ++ return {'bound_subnets': bound_subnets} ++ ++ def unbind_subnets(self, context, es_acl_id, subnet_ids): ++ """Unbind subnets from ACL.""" ++ subnet_ids = subnet_ids['subnet_ids'] ++ unbound_subnets = [] ++ with context.session.begin(subtransactions=True): ++ acl_db = self._get_es_acl(context, es_acl_id) ++ subnet_ids = set(subnet_ids) ++ for binding in acl_db.bindings: ++ subnet_id = binding.subnet_id ++ if subnet_id in subnet_ids: ++ context.session.delete(binding) ++ subnet_ids.remove(subnet_id) ++ unbound_subnets.append(subnet_id) ++ if subnet_ids: ++ LOG.warn('ACL %(acl_id)s is not bound to ' ++ 'subnet(s) %(subnet_ids)s.', ++ {'acl_id': es_acl_id, 'subnet_ids': subnet_ids}) ++ return {'unbound_subnets': unbound_subnets} ++ ++ def _get_es_acl_rule(self, context, es_acl_rule_id): ++ try: ++ return self._get_by_id(context, EsAclRule, es_acl_rule_id) ++ except exc.NoResultFound: ++ raise es_acl.AclRuleNotFound(acl_rule_id=es_acl_rule_id) ++ ++ @staticmethod ++ def _ports_to_range(port_min, port_max): ++ if port_min is None: ++ return ++ elif port_min == port_max: ++ return '%d' % port_min ++ else: ++ return '%d:%d' % (port_min, port_max) ++ ++ @staticmethod ++ def _range_to_ports(port_range): ++ if port_range is not None: ++ ports = [int(port) for port in port_range.split(':')] ++ return ports[0], ports[-1] ++ else: ++ return None, None ++ ++ def _make_es_acl_rule_dict(self, acl_rule_db, fields=None): ++ source_port_range = self._ports_to_range( ++ acl_rule_db.source_port_min, acl_rule_db.source_port_max) ++ destination_port_range = self._ports_to_range( ++ acl_rule_db.destination_port_min, acl_rule_db.destination_port_max) ++ # Don't show position if acl_id is None. Case that position is not None ++ # when acl_id is, can happen when acl is deleted, acl_id would be set ++ # to None while position would not be changed. ++ acl_id = acl_rule_db.acl_id ++ position = acl_rule_db.position if acl_id is not None else None ++ res = {'id': acl_rule_db.id, ++ 'name': acl_rule_db.name, ++ 'tenant_id': acl_rule_db.tenant_id, ++ 'acl_id': acl_id, ++ 'position': position, ++ 'direction': acl_rule_db.direction, ++ 'protocol': acl_rule_db.protocol, ++ 'source_ip_address': acl_rule_db.source_ip_address, ++ 'destination_ip_address': acl_rule_db.destination_ip_address, ++ 'source_port': source_port_range, ++ 'destination_port': destination_port_range, ++ 'action': acl_rule_db.action} ++ return self._fields(res, fields) ++ ++ def _validate_acl(self, context, acl_id, tenant_id): ++ if acl_id is not None: ++ acl = self._get_es_acl(context, acl_id) ++ if acl.tenant_id != tenant_id: ++ raise es_acl.AclNotFound(acl_id=acl_id) ++ ++ def _process_rule_for_acl(self, context, acl_id, rule_db, position, ++ pos_changed=False, rule_removed=False): ++ if not acl_id: ++ return ++ with context.session.begin(subtransactions=True): ++ acl_query = context.session.query(EsAcl).with_lockmode('update') ++ acl_db = acl_query.filter_by(id=acl_id).one() ++ rules = getattr(acl_db, '%s_rules' % rule_db.direction) ++ last_pos = len(rules) ++ if rule_removed: ++ # Remove a rule from acl ++ position = rule_db.position ++ rules.pop(position - 1) ++ rule_db.update({'position': None}) ++ elif pos_changed: ++ orig_pos = rule_db.position ++ new_pos = min(position or last_pos, last_pos) ++ if orig_pos != new_pos: ++ rules.pop(orig_pos - 1) ++ if new_pos == last_pos: ++ rules.append(rule_db) ++ else: ++ rules.insert(new_pos - 1, rule_db) ++ else: ++ # Add a rule to acl ++ if position is None or position > last_pos: ++ rules.append(rule_db) ++ else: ++ rules.insert(position - 1, rule_db) ++ rules.reorder() ++ ++ def create_es_acl_rule(self, context, es_acl_rule): ++ """Create an EayunStack subnet ACL rule.""" ++ acl_rule = es_acl_rule['es_acl_rule'] ++ acl_id = acl_rule['acl_id'] ++ tenant_id = self._get_tenant_id_for_create(context, acl_rule) ++ position = acl_rule['position'] ++ direction = acl_rule['direction'] ++ ++ self._validate_acl(context, acl_id, tenant_id) ++ if acl_id is None and position is not None: ++ LOG.warn('Setting position without specifying acl_id is ' ++ 'meaningless, ignored.') ++ ++ source_port_min, source_port_max = self._range_to_ports( ++ acl_rule['source_port']) ++ destination_port_min, destination_port_max = self._range_to_ports( ++ acl_rule['destination_port']) ++ with context.session.begin(subtransactions=True): ++ acl_rule_db = EsAclRule( ++ id=uuidutils.generate_uuid(), ++ tenant_id=tenant_id, ++ name=acl_rule['name'], ++ direction=direction, ++ protocol=acl_rule['protocol'], ++ source_ip_address=acl_rule['source_ip_address'], ++ destination_ip_address=acl_rule['destination_ip_address'], ++ source_port_min=source_port_min, ++ source_port_max=source_port_max, ++ destination_port_min=destination_port_min, ++ destination_port_max=destination_port_max, ++ action=acl_rule['action']) ++ context.session.add(acl_rule_db) ++ self._process_rule_for_acl(context, acl_id, acl_rule_db, position) ++ return self._make_es_acl_rule_dict(acl_rule_db) ++ ++ def update_es_acl_rule(self, context, es_acl_rule_id, es_acl_rule): ++ """Update an EayunStack subnet ACL rule.""" ++ acl_rule = es_acl_rule['es_acl_rule'] ++ ++ acl_may_change = 'acl_id' in acl_rule ++ acl_id = acl_rule.pop('acl_id', None) ++ position = acl_rule.pop('position', None) ++ ++ if 'source_port' in acl_rule: ++ source_port_min, source_port_max = self._range_to_ports( ++ acl_rule.pop['source_port']) ++ acl_rule['source_port_min'] = source_port_min ++ acl_rule['source_port_max'] = source_port_max ++ if 'destination_port' in acl_rule: ++ destination_port_min, destination_port_max = self._range_to_ports( ++ acl_rule.pop['destination_port']) ++ acl_rule['destination_port_min'] = destination_port_min ++ acl_rule['destination_port_max'] = destination_port_max ++ ++ with context.session.begin(subtransactions=True): ++ acl_rule_db = self._get_es_acl_rule(context, es_acl_rule_id) ++ self._validate_acl(context, acl_id, acl_rule_db.tenant_id) ++ new_direction = acl_rule.get('direction', acl_rule_db.direction) ++ ++ _add_rule = False ++ if acl_may_change and acl_id != acl_rule_db.acl_id: ++ self._process_rule_for_acl( ++ context, acl_rule_db.acl_id, acl_rule_db, None, ++ rule_removed=True) ++ _add_rule = True ++ elif new_direction != acl_rule_db.direction: ++ acl_id = acl_rule_db.acl_id ++ self._process_rule_for_acl( ++ context, acl_rule_db.acl_id, acl_rule_db, None, ++ rule_removed=True) ++ _add_rule = True ++ elif position is not None and position != acl_rule_db.position: ++ self._process_rule_for_acl( ++ context, acl_rule_db.acl_id, acl_rule_db, ++ position, pos_changed=True) ++ ++ acl_rule_db.update(acl_rule) ++ if _add_rule: ++ self._process_rule_for_acl( ++ context, acl_id, acl_rule_db, position) ++ return self._make_es_acl_rule_dict(acl_rule_db) ++ ++ def delete_es_acl_rule(self, context, es_acl_rule_id): ++ """Delete an EayunStack subnet ACL rule.""" ++ acl_rule_db = self._get_es_acl_rule(context, es_acl_rule_id) ++ with context.session.begin(subtransactions=True): ++ self._process_rule_for_acl( ++ context, acl_rule_db.acl_id, acl_rule_db, None, ++ rule_removed=True) ++ context.session.delete(acl_rule_db) ++ ++ def get_es_acl_rule(self, context, es_acl_rule_id, fields=None): ++ """Get an EayunStack subnet ACL rule.""" ++ acl_rule_db = self._get_es_acl_rule(context, es_acl_rule_id) ++ return self._make_es_acl_rule_dict(acl_rule_db, fields) ++ ++ def get_es_acl_rules(self, context, filters=None, fields=None, ++ sorts=None, limit=None, marker=None, ++ page_reverse=False): ++ """List EayunStack subnet ACL rules.""" ++ marker_object = self._get_marker_obj( ++ context, 'es_acl_rule', limit, marker) ++ return self._get_collection( ++ context, EsAclRule, self._make_es_acl_rule_dict, ++ filters=filters, fields=fields, sorts=sorts, ++ limit=limit, marker_obj=marker_object, page_reverse=page_reverse) +diff --git a/neutron/db/migration/alembic_migrations/versions/1ec373736f8b_add_eayunstack_acl_support.py b/neutron/db/migration/alembic_migrations/versions/1ec373736f8b_add_eayunstack_acl_support.py +new file mode 100644 +index 000000000..bbdff7bb4 +--- /dev/null ++++ b/neutron/db/migration/alembic_migrations/versions/1ec373736f8b_add_eayunstack_acl_support.py +@@ -0,0 +1,96 @@ ++# Copyright (c) 2017 Eayun, Inc. ++# All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++# ++ ++"""add EayunStack ACL support ++ ++Revision ID: 1ec373736f8b ++Revises: 0ffcc7f9a449 ++Create Date: 2017-09-21 14:17:22.862068 ++ ++""" ++ ++# revision identifiers, used by Alembic. ++revision = '1ec373736f8b' ++down_revision = '0ffcc7f9a449' ++ ++from alembic import op ++import sqlalchemy as sa ++ ++ ++direction = sa.Enum('ingress', 'egress', name='es_acl_rule_direction') ++action = sa.Enum('allow', 'deny', name='es_acl_rule_action') ++ ++ ++def upgrade(): ++ op.create_table( ++ 'es_acls', ++ sa.Column('id', sa.String(length=36), nullable=False), ++ sa.Column('tenant_id', sa.String(length=255), nullable=False), ++ sa.Column('name', sa.String(length=255), nullable=True), ++ sa.PrimaryKeyConstraint('id') ++ ) ++ op.create_table( ++ 'es_acl_subnet_bindings', ++ sa.Column('subnet_id', sa.String(length=36), nullable=False), ++ sa.Column('acl_id', sa.String(length=36), nullable=False), ++ sa.Column('router_id', sa.String(length=36), nullable=True), ++ sa.Column('router_port_id', sa.String(length=36), nullable=True), ++ sa.ForeignKeyConstraint( ++ ['subnet_id'], ['subnets.id'], ++ name='fk-eayun_acl_subnet_bindings-subnet_id-subents', ++ ondelete='CASCADE'), ++ sa.ForeignKeyConstraint( ++ ['acl_id'], ['es_acls.id'], ++ name='fk-eayun_acl_subnet_bindings-acl_id-es_acls'), ++ sa.ForeignKeyConstraint( ++ ['router_id'], ['routers.id'], ++ name='fk-eayun_acl_subnet_bindings-router_id-routers', ++ ondelete='SET NULL'), ++ sa.ForeignKeyConstraint( ++ ['router_port_id'], ['ports.id'], ++ name='fk-eayun_acl_subnet_bindings-router_port_id-ports', ++ ondelete='SET NULL'), ++ sa.PrimaryKeyConstraint('subnet_id') ++ ) ++ op.create_table( ++ 'es_acl_rules', ++ sa.Column('id', sa.String(length=36), nullable=False), ++ sa.Column('tenant_id', sa.String(length=255), nullable=False), ++ sa.Column('name', sa.String(length=255), nullable=True), ++ sa.Column('acl_id', sa.String(length=36), nullable=True), ++ sa.Column('position', sa.Integer(), nullable=True), ++ sa.Column('direction', direction, nullable=False), ++ sa.Column('protocol', sa.Integer(), nullable=True), ++ sa.Column('source_ip_address', sa.String(length=64), nullable=True), ++ sa.Column('destination_ip_address', sa.String(length=64), ++ nullable=True), ++ sa.Column('source_port_min', sa.Integer(), nullable=True), ++ sa.Column('source_port_max', sa.Integer(), nullable=True), ++ sa.Column('destination_port_min', sa.Integer(), nullable=True), ++ sa.Column('destination_port_max', sa.Integer(), nullable=True), ++ sa.Column('action', action, nullable=False), ++ sa.ForeignKeyConstraint( ++ ['acl_id'], ['es_acls.id'], ++ name='fk-eayun_acl_rules-acl_id-es_acls', ++ ondelete='SET NULL'), ++ sa.PrimaryKeyConstraint('id') ++ ) ++ ++ ++def downgrade(): ++ op.drop_table('es_acl_subnet_bindings') ++ op.drop_table('es_acl_rules') ++ op.drop_table('es_acls') +diff --git a/neutron/db/migration/alembic_migrations/versions/HEAD b/neutron/db/migration/alembic_migrations/versions/HEAD +index a2117f3c1..3c0ec3d13 100644 +--- a/neutron/db/migration/alembic_migrations/versions/HEAD ++++ b/neutron/db/migration/alembic_migrations/versions/HEAD +@@ -1 +1 @@ +-0ffcc7f9a449 ++1ec373736f8b +diff --git a/neutron/extensions/es_acl.py b/neutron/extensions/es_acl.py +index 6ec6bcc88..da5c91758 100644 +--- a/neutron/extensions/es_acl.py ++++ b/neutron/extensions/es_acl.py +@@ -19,6 +19,7 @@ import six + from neutron.api import extensions + from neutron.api.v2 import attributes as attr + from neutron.api.v2 import resource_helper ++from neutron.common import exceptions + from neutron.openstack.common import log as logging + from neutron.plugins.common import constants + from neutron.services import service_base +@@ -28,6 +29,18 @@ LOG = logging.getLogger(__name__) + ACL_VALID_ACTION_VALUES = [constants.FWAAS_ALLOW, constants.FWAAS_DENY] + + ++class AclNotFound(exceptions.NotFound): ++ message = _("ACL %(acl_id)s could not be found.") ++ ++ ++class AclInUse(exceptions.InUse): ++ message = _("ACL %(acl_id)s is used by subnets %(subnets)s.") ++ ++ ++class AclRuleNotFound(exceptions.NotFound): ++ message = _("ACL %(acl_rule_id)s could not be found.") ++ ++ + def _validate_acl_ipaddr(data, valid_values=None): + if data is None: + return +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/0136-ES-ACL-ACL-plugin-and-L3-agent-enhancement.patch b/packaging/openstack-neutron/0136-ES-ACL-ACL-plugin-and-L3-agent-enhancement.patch new file mode 100644 index 0000000..1bd1f70 --- /dev/null +++ b/packaging/openstack-neutron/0136-ES-ACL-ACL-plugin-and-L3-agent-enhancement.patch @@ -0,0 +1,659 @@ +From a745a36326274d26fec001a177b816086a206653 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Wed, 20 Sep 2017 20:05:09 +0800 +Subject: [PATCH 136/136] ES ACL: ACL plugin and L3 agent enhancement + +Use L3 agent to setup subnet ACL rules. + +Fixes(3/3): redmine #11027 + +Signed-off-by: Hunt Xu +--- + neutron/agent/l3_agent.py | 9 + + neutron/db/es_acl_db.py | 76 ++++++- + neutron/services/es_acl/__init__.py | 0 + neutron/services/es_acl/agents/__init__.py | 0 + neutron/services/es_acl/agents/es_acl_l3_agent.py | 237 ++++++++++++++++++++++ + neutron/services/es_acl/common/__init__.py | 0 + neutron/services/es_acl/common/topics.py | 16 ++ + neutron/services/es_acl/es_acl_l3_plugin.py | 157 ++++++++++++++ + 8 files changed, 493 insertions(+), 2 deletions(-) + create mode 100644 neutron/services/es_acl/__init__.py + create mode 100644 neutron/services/es_acl/agents/__init__.py + create mode 100644 neutron/services/es_acl/agents/es_acl_l3_agent.py + create mode 100644 neutron/services/es_acl/common/__init__.py + create mode 100644 neutron/services/es_acl/common/topics.py + create mode 100644 neutron/services/es_acl/es_acl_l3_plugin.py + +diff --git a/neutron/agent/l3_agent.py b/neutron/agent/l3_agent.py +index 86d87735e..80b0316a0 100644 +--- a/neutron/agent/l3_agent.py ++++ b/neutron/agent/l3_agent.py +@@ -51,6 +51,7 @@ from neutron.openstack.common import processutils + from neutron.openstack.common import service + from neutron.openstack.common import timeutils + from neutron import service as neutron_service ++from neutron.services.es_acl.agents import es_acl_l3_agent + from neutron.services.firewall.agents.l3reference import firewall_l3_agent + + LOG = logging.getLogger(__name__) +@@ -449,6 +450,7 @@ class RouterProcessingQueue(object): + + class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, + l3_ha_agent.AgentMixin, ++ es_acl_l3_agent.EsAclL3AgentMixin, + manager.Manager): + """Manager for L3NatAgent + +@@ -585,6 +587,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, + + self.target_ex_net_id = None + self.use_ipv6 = ipv6_utils.is_enabled() ++ self.init_es_acl(self.conf) + + def _check_config_params(self): + """Check items in configuration files. +@@ -904,6 +907,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, + for p in new_ports: + self._set_subnet_info(p) + self.internal_network_added(ri, p) ++ self.es_acl_internal_network_added(ri, p) + ri.internal_ports.append(p) + self._set_subnet_arp_info(ri, p) + if (not new_ipv6_port and +@@ -1007,6 +1011,9 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, + self.plugin_rpc.update_portmapping_statuses( + self.context, pm_statuses) + ++ # Process EayunStack ACL rules ++ self.es_acl_process_router(ri) ++ + # Process SNAT/DNAT rules for floating IPs + fip_statuses = {} + try: +@@ -1994,6 +2001,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, + update.timestamp = timeutils.utcnow() + routers = self.plugin_rpc.get_routers(self.context, + [update.id]) ++ self.es_acl_update_router_info(routers) + except Exception: + msg = _("Failed to fetch router information for '%s'") + LOG.exception(msg, update.id) +@@ -2045,6 +2053,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, + timestamp = timeutils.utcnow() + routers = self.plugin_rpc.get_routers( + context, router_ids) ++ self.es_acl_update_router_info(routers) + + LOG.debug(_('Processing :%r'), routers) + for r in routers: +diff --git a/neutron/db/es_acl_db.py b/neutron/db/es_acl_db.py +index af2ffe175..edb0a9e75 100644 +--- a/neutron/db/es_acl_db.py ++++ b/neutron/db/es_acl_db.py +@@ -28,6 +28,7 @@ from neutron.openstack.common import uuidutils + from neutron.openstack.common import log as logging + + from neutron import manager ++from neutron.plugins.common import constants as p_constants + + + LOG = logging.getLogger(__name__) +@@ -93,6 +94,11 @@ class EsAclDbMixin(es_acl.EsAclPluginBase, base_db.CommonDbMixin): + def _core_plugin(self): + return manager.NeutronManager.get_plugin() + ++ @property ++ def _l3_plugin(self): ++ return manager.NeutronManager.get_service_plugins().get( ++ p_constants.L3_ROUTER_NAT) ++ + def _get_es_acl(self, context, es_acl_id): + try: + return self._get_by_id(context, EsAcl, es_acl_id) +@@ -156,6 +162,7 @@ class EsAclDbMixin(es_acl.EsAclPluginBase, base_db.CommonDbMixin): + """Bind subnets to ACL.""" + subnet_ids = subnet_ids['subnet_ids'] + bound_subnets = [] ++ affected_routers = set() + with context.session.begin(subtransactions=True): + acl_db = self._get_es_acl(context, es_acl_id) + already_bound = set( +@@ -205,18 +212,23 @@ class EsAclDbMixin(es_acl.EsAclPluginBase, base_db.CommonDbMixin): + router_port_id=router_port_id) + context.session.add(binding_db) + bound_subnets.append(subnet_id) +- return {'bound_subnets': bound_subnets} ++ if router_id: ++ affected_routers.add(router_id) ++ return {'bound_subnets': bound_subnets}, affected_routers + + def unbind_subnets(self, context, es_acl_id, subnet_ids): + """Unbind subnets from ACL.""" + subnet_ids = subnet_ids['subnet_ids'] + unbound_subnets = [] ++ affected_routers = set() + with context.session.begin(subtransactions=True): + acl_db = self._get_es_acl(context, es_acl_id) + subnet_ids = set(subnet_ids) + for binding in acl_db.bindings: + subnet_id = binding.subnet_id + if subnet_id in subnet_ids: ++ if binding.router_id: ++ affected_routers.add(binding.router_id) + context.session.delete(binding) + subnet_ids.remove(subnet_id) + unbound_subnets.append(subnet_id) +@@ -224,7 +236,7 @@ class EsAclDbMixin(es_acl.EsAclPluginBase, base_db.CommonDbMixin): + LOG.warn('ACL %(acl_id)s is not bound to ' + 'subnet(s) %(subnet_ids)s.', + {'acl_id': es_acl_id, 'subnet_ids': subnet_ids}) +- return {'unbound_subnets': unbound_subnets} ++ return {'unbound_subnets': unbound_subnets}, affected_routers + + def _get_es_acl_rule(self, context, es_acl_rule_id): + try: +@@ -416,3 +428,63 @@ class EsAclDbMixin(es_acl.EsAclPluginBase, base_db.CommonDbMixin): + context, EsAclRule, self._make_es_acl_rule_dict, + filters=filters, fields=fields, sorts=sorts, + limit=limit, marker_obj=marker_object, page_reverse=page_reverse) ++ ++ # Helper functions for plugin ++ def _make_es_acl_rule_dict_for_agent(self, acl_rule_db): ++ fields_for_agent = set( ++ ['protocol', 'source_ip_address', 'destination_ip_address', ++ 'source_port', 'destination_port', 'action']) ++ return self._make_es_acl_rule_dict(acl_rule_db, fields_for_agent) ++ ++ def _make_es_acl_dict_for_agent(self, acl_db): ++ res = { ++ 'ingress': [ ++ self._make_es_acl_rule_dict_for_agent(rule) ++ for rule in acl_db.ingress_rules], ++ 'egress': [ ++ self._make_es_acl_rule_dict_for_agent(rule) ++ for rule in acl_db.egress_rules]} ++ return res ++ ++ def get_related_routers(self, context, acl_id): ++ routers = set() ++ if acl_id is not None: ++ acl_db = self._get_es_acl(context, acl_id) ++ routers = set(binding.router_id for binding in acl_db.bindings) ++ return routers ++ ++ def get_es_acl_by_routers(self, context, router_ids): ++ routers = {} ++ acls = {} ++ binding_query = context.session.query(EsAclSubnetBinding).filter( ++ EsAclSubnetBinding.router_id.in_(router_ids)) ++ acl_ids = set() ++ for binding in binding_query: ++ acl_id = binding.acl_id ++ router_id = binding.router_id ++ acl_ids.add(acl_id) ++ router_ports = routers.get(router_id, {}) ++ if acl_id not in router_ports: ++ router_ports[acl_id] = set() ++ router_ports[acl_id].add(binding.router_port_id) ++ routers[router_id] = router_ports ++ ++ acl_query = context.session.query(EsAcl).filter(EsAcl.id.in_(acl_ids)) ++ for acl in acl_query: ++ acls[acl.id] = self._make_es_acl_dict_for_agent(acl) ++ ++ return {'acls': acls, 'routers': routers} ++ ++ def internal_port_added_to_router(self, context, ++ router_id, subnet_id, port_id): ++ with context.session.begin(subtransactions=True): ++ binding = context.session.query( ++ EsAclSubnetBinding ++ ).filter_by(subnet_id=subnet_id).with_lockmode('update').first() ++ if not binding: ++ return {} ++ ++ binding.update({'router_id': router_id, 'router_port_id': port_id}) ++ acl_id = binding.acl_id ++ acl_db = self._get_es_acl(context, acl_id) ++ return {acl_id: self._make_es_acl_dict_for_agent(acl_db)} +diff --git a/neutron/services/es_acl/__init__.py b/neutron/services/es_acl/__init__.py +new file mode 100644 +index 000000000..e69de29bb +diff --git a/neutron/services/es_acl/agents/__init__.py b/neutron/services/es_acl/agents/__init__.py +new file mode 100644 +index 000000000..e69de29bb +diff --git a/neutron/services/es_acl/agents/es_acl_l3_agent.py b/neutron/services/es_acl/agents/es_acl_l3_agent.py +new file mode 100644 +index 000000000..39b40cc61 +--- /dev/null ++++ b/neutron/services/es_acl/agents/es_acl_l3_agent.py +@@ -0,0 +1,237 @@ ++# Copyright (c) 2017 Eayun, Inc. ++# All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++from oslo.config import cfg ++ ++from neutron.common import rpc as n_rpc ++from neutron.openstack.common import log as logging ++from neutron.plugins.common import constants ++ ++from neutron.services.es_acl.common import topics as es_acl_topics ++ ++LOG = logging.getLogger(__name__) ++ ++EsAclOpts = [ ++ cfg.BoolOpt('enabled', default=False, help="Enable EayunStack ACL"), ++] ++cfg.CONF.register_opts(EsAclOpts, 'es_acl') ++ ++ES_ACL_INFO_KEY = 'ES_ACL' ++ES_ACL_CHAIN_PREFIX = 'acl-' ++ ++INGRESS_DIRECTION = 'ingress' ++EGRESS_DIRECTION = 'egress' ++ ++CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i', ++ EGRESS_DIRECTION: 'o'} ++ ++# ACL applied on internal ports. ++IPTABLES_DIR = {INGRESS_DIRECTION: '-o', ++ EGRESS_DIRECTION: '-i'} ++PORT_VALID_PROTOCOLS = ( ++ 6, # TCP ++ 17, # UDP ++ 33, # DCCP ++ 132, # SCTP ++ # 136, # UDPLite, not yet supported by iptables v1.4.21 ++) ++ACTIONS = {'allow': 'ACCEPT', ++ 'deny': 'DROP'} ++ ++ ++class EsAclPluginApi(n_rpc.RpcProxy): ++ API_VERSION = '1.0' ++ ++ def __init__(self): ++ super(EsAclPluginApi, self).__init__( ++ es_acl_topics.ES_ACL_PLUGIN, self.API_VERSION) ++ ++ def get_es_acl_by_routers(self, context, router_ids): ++ return self.call( ++ context, ++ self.make_msg('get_es_acl_by_routers', router_ids=router_ids)) ++ ++ def internal_port_added_to_router(self, context, ++ router_id, subnet_id, port_id): ++ return self.call( ++ context, ++ self.make_msg( ++ 'internal_port_added_to_router', ++ router_id=router_id, subnet_id=subnet_id, port_id=port_id)) ++ ++ ++def _run_if_enabled(switch_name, default_ret=None): ++ def _decorator(func): ++ def _func(*args, **kwargs): ++ if getattr(args[0], switch_name, False): ++ return func(*args, **kwargs) ++ else: ++ return default_ret ++ return _func ++ return _decorator ++ ++ ++class EsAclL3AgentMixin(object): ++ ++ def init_es_acl(self, conf): ++ self.es_acl_enabled = cfg.CONF.es_acl.enabled ++ if self.neutron_service_plugins is not None: ++ plugin_configured = ( ++ constants.ES_ACL in self.neutron_service_plugins) ++ if plugin_configured and not self.es_acl_enabled: ++ LOG.error('EayunStack ACL plugin is configured in the server ' ++ 'side, but EayunStack ACL is disabled in L3 agent.') ++ self.es_acl_enabled = self.es_acl_enabled and plugin_configured ++ if self.es_acl_enabled: ++ self.es_acl_plugin_api = EsAclPluginApi() ++ ++ @property ++ def empty_acl_info(self): ++ return {'rules': {}, 'ports': {}} ++ ++ @_run_if_enabled('es_acl_enabled') ++ def es_acl_update_router_info(self, routers): ++ router_ids = [router['id'] for router in routers] ++ LOG.debug( ++ 'Getting EayunStack ACL information for routers %s.', router_ids) ++ acl_by_routers = self.es_acl_plugin_api.get_es_acl_by_routers( ++ self.context, router_ids) ++ LOG.debug('Get EayunStack ACL information: %r.' % acl_by_routers) ++ acls = acl_by_routers['acls'] ++ acl_routers = acl_by_routers['routers'] ++ ++ for router in routers: ++ router_id = router['id'] ++ acl_info = self.empty_acl_info ++ for acl_id, ports in acl_routers.get(router_id, {}).items(): ++ acl_info['ports'][acl_id] = set(ports) ++ related_acl_ids = set(acl_info['ports'].keys()) ++ acl_info['rules'] = { ++ acl_id: acls.get(acl_id) ++ for acl_id in related_acl_ids ++ } ++ LOG.debug('EayunStack ACL information for router %(router_id)s: ' ++ '%(info)r', {'router_id': router_id, 'info': acl_info}) ++ router[ES_ACL_INFO_KEY] = acl_info ++ ++ @staticmethod ++ def _get_es_acl_chain_name(direction, acl_id): ++ return "%s%s%s" % ( ++ ES_ACL_CHAIN_PREFIX, CHAIN_NAME_PREFIX[direction], acl_id) ++ ++ @staticmethod ++ def _drop_invalid_packets_rule(): ++ return '-m state --state INVALID -j DROP' ++ ++ @staticmethod ++ def _allow_established_rule(): ++ return '-m state --state ESTABLISHED,RELATED -j ACCEPT' ++ ++ @staticmethod ++ def _default_drop_all_rule(): ++ return '-j DROP' ++ ++ @staticmethod ++ def _translate_acl_rule(rule): ++ parts = [] ++ if rule['protocol']: ++ parts.append('-p %s' % rule['protocol']) ++ if rule['source_ip_address']: ++ parts.append('-s %s' % rule['source_ip_address']) ++ if rule['destination_ip_address']: ++ parts.append('-d %s' % rule['destination_ip_address']) ++ if rule['protocol'] in PORT_VALID_PROTOCOLS: ++ if rule['source_port']: ++ parts.append('--sport %s' % rule['source_port']) ++ if rule['destination_port']: ++ parts.append('--dport %s' % rule['destination_port']) ++ parts.append('-j %s' % ACTIONS[rule['action']]) ++ return ' '.join(parts) ++ ++ @_run_if_enabled('es_acl_enabled') ++ def es_acl_process_router(self, ri): ++ # Called with ri.iptables_manager.iptables_apply_deferred = True ++ im = ri.iptables_manager ++ table = im.ipv4['filter'] ++ acl_info = ri.router.get(ES_ACL_INFO_KEY, self.empty_acl_info) ++ ++ LOG.debug('Processing EayunStack ACL information for router ' ++ '%(router_id)s: %(info)r.', ++ {'router_id': ri.router_id, 'info': acl_info}) ++ # Clear all the chains ++ acl_chains = set( ++ chain for chain in table.chains ++ if chain.startswith(ES_ACL_CHAIN_PREFIX)) ++ for chain in acl_chains: ++ table.ensure_remove_chain(chain) ++ ++ # Build acl rules ++ for acl_id, acl_rules in acl_info['rules'].items(): ++ ports = acl_info['ports'].get(acl_id, set()) ++ for direction in (INGRESS_DIRECTION, EGRESS_DIRECTION): ++ chain = self._get_es_acl_chain_name(direction, acl_id) ++ table.add_chain(chain) ++ ++ rules = [self._drop_invalid_packets_rule(), ++ self._allow_established_rule()] ++ rules += [ ++ self._translate_acl_rule(rule) ++ for rule in acl_rules.get(direction, []) ++ ] ++ rules.append(self._default_drop_all_rule()) ++ for rule in rules: ++ table.add_rule(chain, rule) ++ ++ for port_id in ports: ++ rule = '%s %s -j $%s' % ( ++ IPTABLES_DIR[direction], ++ self.get_internal_device_name(port_id), ++ chain ++ ) ++ table.add_rule('FORWARD', rule) ++ ++ im.apply() ++ ++ @_run_if_enabled('es_acl_enabled') ++ def es_acl_remove_from_router_info(self, ri): ++ ri.router[ES_ACL_INFO_KEY] = self.empty_acl_info ++ ++ @_run_if_enabled('es_acl_enabled') ++ def es_acl_internal_network_added(self, ri, port): ++ acl_info = ri.router.get(ES_ACL_INFO_KEY, self.empty_acl_info) ++ ++ router_id = ri.router_id ++ subnet_id = port['subnet']['id'] ++ port_id = port['id'] ++ try: ++ acls = self.es_acl_plugin_api.internal_port_added_to_router( ++ self.context, router_id, subnet_id, port_id) ++ LOG.debug( ++ 'Get EayunStack ACL information for subnet %(subnet_id)s ' ++ 'newly added to router %(router_id)s: %(info)r.', ++ {'subnet_id': subnet_id, 'router_id': router_id, 'info': acls}) ++ except Exception: ++ LOG.exception( ++ 'Failed to fetch EayunStack ACL information for ' ++ 'subnet %(subnet_id)s newly added to router %(router_id)s.', ++ {subnet_id: subnet_id, router_id: router_id}) ++ return ++ ++ for acl_id, rules in acls.items(): ++ if acl_id not in acl_info['rules']: ++ acl_info['rules']['acl_id'] = rules ++ if acl_id not in acl_info['ports']: ++ acl_info['ports'][acl_id] = set() ++ acl_info['ports'][acl_id].add(port_id) +diff --git a/neutron/services/es_acl/common/__init__.py b/neutron/services/es_acl/common/__init__.py +new file mode 100644 +index 000000000..e69de29bb +diff --git a/neutron/services/es_acl/common/topics.py b/neutron/services/es_acl/common/topics.py +new file mode 100644 +index 000000000..0adc2bf5f +--- /dev/null ++++ b/neutron/services/es_acl/common/topics.py +@@ -0,0 +1,16 @@ ++# Copyright (c) 2017 Eayun, Inc. ++# All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++ES_ACL_PLUGIN = 'es_acl_plugin' +diff --git a/neutron/services/es_acl/es_acl_l3_plugin.py b/neutron/services/es_acl/es_acl_l3_plugin.py +new file mode 100644 +index 000000000..39d018a8c +--- /dev/null ++++ b/neutron/services/es_acl/es_acl_l3_plugin.py +@@ -0,0 +1,157 @@ ++# Copyright (c) 2017 Eayun, Inc. ++# All rights reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++from oslo.config import cfg ++ ++from neutron.common import constants ++from neutron.common import rpc as n_rpc ++from neutron.common import topics ++from neutron.common import utils ++from neutron.db import es_acl_db ++from neutron.openstack.common import log as logging ++from neutron.services.es_acl.common import topics as es_acl_topics ++ ++LOG = logging.getLogger(__name__) ++ ++ ++class EsAclL3Callbacks(n_rpc.RpcCallback): ++ RPC_API_VERSION = '1.0' ++ ++ def __init__(self, plugin): ++ super(EsAclL3Callbacks, self).__init__() ++ self.plugin = plugin ++ ++ def get_es_acl_by_routers(self, context, router_ids): ++ return self.plugin.get_es_acl_by_routers(context, router_ids) ++ ++ def internal_port_added_to_router(self, context, ++ router_id, subnet_id, port_id): ++ return self.plugin.internal_port_added_to_router( ++ context, router_id, subnet_id, port_id) ++ ++ ++class EsAclL3AgentApi(n_rpc.RpcProxy): ++ API_VERSION = '1.0' ++ ++ def __init__(self, plugin, host): ++ super(EsAclL3AgentApi, self).__init__( ++ topics.L3_AGENT, self.API_VERSION) ++ self.plugin = plugin ++ self.host = host ++ ++ def _agent_notify_routers_update(self, context, routers): ++ adminContext = context.is_admin and context or context.elevated() ++ ++ routers_by_host = {} ++ for router_id in routers: ++ l3_agents = self.plugin._l3_plugin.get_l3_agents_hosting_routers( ++ adminContext, [router_id], admin_state_up=True, active=True) ++ for l3_agent in l3_agents: ++ host = l3_agent['host'] ++ host_routers = routers_by_host.get(host, []) ++ host_routers.append(router_id) ++ routers_by_host[host] = host_routers ++ ++ for host, host_routers in routers_by_host.items(): ++ self.cast(context, ++ self.make_msg('routers_updated', routers=host_routers), ++ topic='%s.%s' % (self.topic, host)) ++ ++ def _fanout_notify_routers_update(self, context, routers): ++ self.fanout_cast( ++ context, self.make_msg('routers_updated', routers=routers)) ++ ++ def notify_routers_update(self, context, routers): ++ if utils.is_extension_supported( ++ self.plugin._l3_plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS ++ ): ++ self._agent_notify_routers_update(context, routers) ++ else: ++ self._fanout_notify_routers_update(context, routers) ++ ++ ++class EsAclL3Plugin(es_acl_db.EsAclDbMixin): ++ ++ supported_extension_aliases = ['es-acl'] ++ ++ def __init__(self): ++ self.endpoints = [EsAclL3Callbacks(self)] ++ self.conn = n_rpc.create_connection(new=True) ++ self.conn.create_consumer( ++ es_acl_topics.ES_ACL_PLUGIN, self.endpoints, fanout=False) ++ self.conn.consume_in_threads() ++ ++ self.agent_rpc = EsAclL3AgentApi(self, cfg.CONF.host) ++ ++ def bind_subnets(self, context, es_acl_id, subnet_ids): ++ bound_subnets, affected_routers = super( ++ EsAclL3Plugin, self ++ ).bind_subnets(context, es_acl_id, subnet_ids) ++ ++ self.agent_rpc.notify_routers_update(context, affected_routers) ++ ++ return bound_subnets ++ ++ def unbind_subnets(self, context, es_acl_id, subnet_ids): ++ unbound_subnets, affected_routers = super( ++ EsAclL3Plugin, self ++ ).unbind_subnets(context, es_acl_id, subnet_ids) ++ ++ self.agent_rpc.notify_routers_update(context, affected_routers) ++ ++ return unbound_subnets ++ ++ def create_es_acl_rule(self, context, es_acl_rule): ++ rule = super( ++ EsAclL3Plugin, self ++ ).create_es_acl_rule(context, es_acl_rule) ++ ++ routers = self.get_related_routers(context, rule['acl_id']) ++ self.agent_rpc.notify_routers_update(context, routers) ++ ++ return rule ++ ++ @staticmethod ++ def _test_rule_changed(old_rule, rule): ++ changed_columns = set( ++ key for key in rule.keys() if old_rule[key] != rule[key]) ++ changed_columns.discard('name') ++ return len(changed_columns) > 0 ++ ++ def update_es_acl_rule(self, context, es_acl_rule_id, es_acl_rule): ++ old_rule = self.get_es_acl_rule(context, es_acl_rule_id) ++ rule = super( ++ EsAclL3Plugin, self ++ ).update_es_acl_rule(context, es_acl_rule_id, es_acl_rule) ++ ++ rule_changed = self._test_rule_changed(old_rule, rule) ++ if rule_changed: ++ routers = self.get_related_routers(context, rule['acl_id']) ++ if old_rule['acl_id'] != rule['acl_id']: ++ routers = routers.union( ++ self.get_related_routers(context, old_rule['acl_id'])) ++ self.agent_rpc.notify_routers_update(context, routers) ++ else: ++ LOG.warn('Nothing changed for ACL rule %(acl_rule_id)s.', ++ {'acl_rule_id': es_acl_rule_id}) ++ ++ return rule ++ ++ def delete_es_acl_rule(self, context, es_acl_rule_id): ++ rule = self.get_es_acl_rule(context, es_acl_rule_id) ++ super(EsAclL3Plugin, self).delete_es_acl_rule(context, es_acl_rule_id) ++ ++ routers = self.get_related_routers(context, rule['acl_id']) ++ self.agent_rpc.notify_routers_update(context, routers) +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/0137-metering-exclude-label-without-rules-when-getting-co.patch b/packaging/openstack-neutron/0137-metering-exclude-label-without-rules-when-getting-co.patch new file mode 100644 index 0000000..866a01f --- /dev/null +++ b/packaging/openstack-neutron/0137-metering-exclude-label-without-rules-when-getting-co.patch @@ -0,0 +1,59 @@ +From d553659a9396e20e59876194cd435fc5e00ffdd0 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Wed, 27 Sep 2017 15:17:52 +0800 +Subject: [PATCH] metering: exclude label without rules when getting counters + +With nfacct we use "nfacct flush" to clean up unused objects. Thus a +label without rules will lead to unused objects which will be purged +later. And this leads to errors when getting counters for this label +because the related object is missing. + +Fixes: redmine #10988 + +Signed-off-by: Hunt Xu +--- + neutron/services/metering/drivers/iptables/es_iptables_driver.py | 4 +++- + neutron/services/metering/drivers/iptables/iptables_driver.py | 5 ++++- + 2 files changed, 7 insertions(+), 2 deletions(-) + +diff --git a/neutron/services/metering/drivers/iptables/es_iptables_driver.py b/neutron/services/metering/drivers/iptables/es_iptables_driver.py +index 116452814..8c361a4c6 100644 +--- a/neutron/services/metering/drivers/iptables/es_iptables_driver.py ++++ b/neutron/services/metering/drivers/iptables/es_iptables_driver.py +@@ -50,7 +50,9 @@ class EsRouterWithMetering(iptables_driver.RouterWithMetering): + im.ipv4['mangle'].add_rule('PREROUTING', mark_rule) + + def get_metering_labels(self): +- return self.metering_labels.keys() + self.es_metering_labels.keys() ++ return super( ++ EsRouterWithMetering, self ++ ).get_metering_labels() + self.es_metering_labels.keys() + + + class EsIptablesMeteringDriver(iptables_driver.IptablesMeteringDriver): +diff --git a/neutron/services/metering/drivers/iptables/iptables_driver.py b/neutron/services/metering/drivers/iptables/iptables_driver.py +index fc7da34cd..453f6b10d 100644 +--- a/neutron/services/metering/drivers/iptables/iptables_driver.py ++++ b/neutron/services/metering/drivers/iptables/iptables_driver.py +@@ -82,7 +82,9 @@ class RouterWithMetering(object): + self.metering_labels = {} + + def get_metering_labels(self): +- return self.metering_labels.keys() ++ return [ ++ label_id for label_id in self.metering_labels.keys() ++ if self.metering_labels[label_id]['rules']] + + + class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): +@@ -249,6 +251,7 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + self._process_metering_label_rules(rm, rules, + label_id, + rules_chain) ++ rm.metering_labels[label_id] = label + + @log.log + def remove_metering_label(self, context, routers): +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/0138-db-fix-backref-cascade-between-router-and-es_meter_l.patch b/packaging/openstack-neutron/0138-db-fix-backref-cascade-between-router-and-es_meter_l.patch new file mode 100644 index 0000000..5b62d47 --- /dev/null +++ b/packaging/openstack-neutron/0138-db-fix-backref-cascade-between-router-and-es_meter_l.patch @@ -0,0 +1,29 @@ +From f7cceedc098150ed05a25c34f964bb4d74e93323 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Wed, 27 Sep 2017 15:25:39 +0800 +Subject: [PATCH] db: fix backref cascade between router and es_meter_label + +Fixes: redmine #11042 + +Signed-off-by: Hunt Xu +--- + neutron/db/metering/es_metering_db.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/neutron/db/metering/es_metering_db.py b/neutron/db/metering/es_metering_db.py +index 1876ef28b..0a4fcd8d3 100644 +--- a/neutron/db/metering/es_metering_db.py ++++ b/neutron/db/metering/es_metering_db.py +@@ -45,7 +45,8 @@ class EsMeteringLabel(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + tcp_port = sa.Column(sa.Integer) + router = orm.relationship( + Router, +- backref=orm.backref("es_metering_labels", lazy='joined', uselist=True) ++ backref=orm.backref("es_metering_labels", lazy='joined', uselist=True, ++ cascade='all, delete-orphan') + ) + + +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/0139-metering-use-eventlet-when-sending-metering-reports.patch b/packaging/openstack-neutron/0139-metering-use-eventlet-when-sending-metering-reports.patch new file mode 100644 index 0000000..c581bce --- /dev/null +++ b/packaging/openstack-neutron/0139-metering-use-eventlet-when-sending-metering-reports.patch @@ -0,0 +1,45 @@ +From a74fc69bbf7c95a9e050eb0af16773063aa55c53 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Wed, 27 Sep 2017 15:39:24 +0800 +Subject: [PATCH] metering: use eventlet when sending metering reports + +This leads to a better performance when sending reports. + +Fixes: redmine #11060 + +Signed-off-by: Hunt Xu +--- + neutron/services/metering/agents/metering_agent.py | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/neutron/services/metering/agents/metering_agent.py b/neutron/services/metering/agents/metering_agent.py +index 5044d4861..4133e4e8a 100644 +--- a/neutron/services/metering/agents/metering_agent.py ++++ b/neutron/services/metering/agents/metering_agent.py +@@ -78,6 +78,8 @@ class MeteringAgent(MeteringPluginRpc, manager.Manager): + self.root_helper = config.get_root_helper(self.conf) + self._load_drivers() + self.context = context.get_admin_context_without_session() ++ self.report_pool = eventlet.greenpool.GreenPool() ++ self.notifier = n_rpc.get_notifier('metering') + self.metering_loop = loopingcall.FixedIntervalLoopingCall( + self._metering_loop + ) +@@ -114,11 +116,12 @@ class MeteringAgent(MeteringPluginRpc, manager.Manager): + 'host': self.host} + + LOG.debug(_("Send metering report: %s"), data) +- notifier = n_rpc.get_notifier('metering') +- notifier.info(self.context, 'l3.meter', data) ++ self.report_pool.spawn_n( ++ self.notifier.info, self.context, 'l3.meter', data) + info['pkts'] = 0 + info['bytes'] = 0 + info['time'] = 0 ++ self.report_pool.waitall() + + def _purge_metering_info(self): + deadline_timestamp = int(time.time()) - self.conf.report_interval +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/0140-metering-always-setup-metering-iptables-rules-for-ro.patch b/packaging/openstack-neutron/0140-metering-always-setup-metering-iptables-rules-for-ro.patch new file mode 100644 index 0000000..20a8581 --- /dev/null +++ b/packaging/openstack-neutron/0140-metering-always-setup-metering-iptables-rules-for-ro.patch @@ -0,0 +1,57 @@ +From 9e548394d34baba83dc82de063358f69241f9a97 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Wed, 11 Oct 2017 16:46:31 +0800 +Subject: [PATCH] metering: always setup metering iptables rules for routers + +The deployment of the metering iptables rules depends on routers +connecting to external networks with router ports. As we have introduced +EayunStack floatingip mechanism, this dependency should be removed. And +we can now do metering for a floating IP even though its associated +router is not connected to the external network with a router port. + +Fixes: redmine #9992 + +Signed-off-by: Hunt Xu +--- + .../metering/drivers/iptables/iptables_driver.py | 17 ++--------------- + 1 file changed, 2 insertions(+), 15 deletions(-) + +diff --git a/neutron/services/metering/drivers/iptables/iptables_driver.py b/neutron/services/metering/drivers/iptables/iptables_driver.py +index 453f6b10d..746bbe423 100644 +--- a/neutron/services/metering/drivers/iptables/iptables_driver.py ++++ b/neutron/services/metering/drivers/iptables/iptables_driver.py +@@ -112,20 +112,9 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + return r + + def _green_update_router(self, router): +- old_gw_port_id = None + old_rm = self.routers.get(router['id']) +- if old_rm: +- old_gw_port_id = old_rm.router['gw_port_id'] +- gw_port_id = router['gw_port_id'] +- +- if gw_port_id != old_gw_port_id: +- if old_rm: +- with IptablesManagerTransaction(old_rm.iptables_manager): +- self._process_disassociate_metering_label(router) +- if gw_port_id: +- self._process_associate_metering_label(router) +- elif gw_port_id: +- self._process_associate_metering_label(router) ++ if not old_rm: ++ self._process_associate_metering_label(router) + + @log.log + def update_routers(self, context, routers): +@@ -147,8 +136,6 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + + def _process_metering_label_rules(self, rm, rules, label_id, rules_chain): + im = rm.iptables_manager +- if not rm.router['gw_port_id']: +- return + ext_dev = "%s+" % EXTERNAL_DEV_PREFIX + + for rule in rules: +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/0141-Use-specific-priority-for-ES-fip-ip-rules.patch b/packaging/openstack-neutron/0141-Use-specific-priority-for-ES-fip-ip-rules.patch new file mode 100644 index 0000000..5a97685 --- /dev/null +++ b/packaging/openstack-neutron/0141-Use-specific-priority-for-ES-fip-ip-rules.patch @@ -0,0 +1,43 @@ +From 271b1f07db0de17546f0630772b4e29659a789e9 Mon Sep 17 00:00:00 2001 +From: Hunt Xu +Date: Mon, 23 Oct 2017 14:50:43 +0800 +Subject: [PATCH] Use specific priority for ES-fip ip rules + +Ip-rule will automatically add a rule before all the existing rules. To +prevent this, we need to specify the priority of the ip rules when using +EayunStack fip mechanism. + +Fixes: redmine #11086 + +Signed-off-by: Hunt Xu +--- + neutron/agent/l3_agent.py | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/neutron/agent/l3_agent.py b/neutron/agent/l3_agent.py +index 80b0316a0..aee4950a0 100644 +--- a/neutron/agent/l3_agent.py ++++ b/neutron/agent/l3_agent.py +@@ -78,6 +78,7 @@ PRIORITY_SYNC_ROUTERS_TASK = 1 + DELETE_ROUTER = 1 + # For EayunStack floatingip mechanism + IPSET_CHAIN_LEN = 20 ++ES_FIP_IP_RULE_PRIO = 32765 + + + class L3PluginApi(n_rpc.RpcProxy): +@@ -1254,8 +1255,9 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, + + for ip in set(fixed_ips) - existing_ips: + table = netaddr.IPNetwork(ip).value +- ns_ipr.add_rule_from(ip, table) +- ns_ipr.add_rule_from(fip_map[ip], table) ++ ns_ipr.add_rule_from(ip, table, rule_pr=ES_FIP_IP_RULE_PRIO) ++ ns_ipr.add_rule_from(fip_map[ip], table, ++ rule_pr=ES_FIP_IP_RULE_PRIO) + + def _es_add_floating_ip(self, ri, fip): + addr_added = False +-- +2.13.5 (Apple Git-94) + diff --git a/packaging/openstack-neutron/openstack-neutron.spec b/packaging/openstack-neutron/openstack-neutron.spec index 2d6bceb..4c56406 100644 --- a/packaging/openstack-neutron/openstack-neutron.spec +++ b/packaging/openstack-neutron/openstack-neutron.spec @@ -4,7 +4,7 @@ Name: openstack-neutron Version: 2014.2 -Release: 28%{?dist_eayunstack} +Release: 42%{?dist_eayunstack} Provides: openstack-quantum = %{version}-%{release} Obsoletes: openstack-quantum < 2013.2-0.4.b3 Summary: OpenStack Networking Service @@ -131,6 +131,59 @@ Patch0085: 0085-metering-extend-neutron-metering.patch Patch0086: 0086-l3_agent-implement-EayunStack-floating-ip-mechanism.patch Patch0087: 0087-Implement-lbaas-L7-policy-rule-model.patch Patch0088: 0088-Implement-l7policy-for-lbaas-agent.patch +Patch0089: 0089-Fix-errors-in-lbaas-L7-policy-implemetation.patch +Patch0090: 0090-iptables_firewall-use-wrap-chains-and-rules-for-mete.patch +Patch0091: 0091-firewall_l3_agent-only-get-hosted-routers-info.patch +Patch0092: 0092-metering-update-external-device-of-metering-iptables.patch +Patch0093: 0093-Fix-lbaas-l7-implement-many-errors.patch +Patch0094: 0094-Enable-egress-qos-to-be-set-on-floatingip-ports.patch +Patch0095: 0095-Add-extra-action-for-lb-session-persistence.patch +Patch0096: 0096-iptables_firewall-minor-fix-for-_setup_metering_chai.patch +Patch0097: 0097-Validate-http_method-and-url_path-for-lbaas-health-m.patch +Patch0098: 0098-Enable-ES-port-metering-on-all-sg-enabled-ports.patch +Patch0099: 0099-Configuration-option-for-whether-to-use-ES-port-mete.patch +Patch0100: 0100-Fix-enable-update-l7policy-value-attribute.patch +Patch0101: 0101-l3_db-update-GatewayInUseByFloatingIp-check.patch +Patch0102: 0102-Fix-fip-port-qos-namespace-selection-in-sync_qos.patch +Patch0103: 0103-ES-fip-setup-ip-rule-for-floatingip-itself.patch +Patch0104: 0104-Fix-error-when-update-l7policy-with-pool_id-None.patch +Patch0105: 0105-es-metering-fix-port-selection-when-tcp_port-is-spec.patch +Patch0106: 0106-Optimize-haproxy-driver-port_to_pool_id-dict.patch +Patch0107: 0107-OpenFlow-EW-DVR-be-more-torelant-when-syncing-dvr-po.patch +Patch0108: 0108-Add-check-if-extra-actions-params-is-correct.patch +Patch0109: 0109-Add-status_changed-notification-for-some-components.patch +Patch0110: 0110-FWaaS-apply-firewall-rules-to-router-ingress-traffic.patch +Patch0111: 0111-FWaaS-support-some-more-protocols-in-FW-rules.patch +Patch0112: 0112-Fix-firewall-port-range-compare-error.patch +Patch0113: 0113-metering-properly-do-data-filtering-within-some-APIs.patch +Patch0114: 0114-Loadbalancer-enable-a-created-free-port-to-be-used-b.patch +Patch0115: 0115-Fix-exception-message-format-error.patch +Patch0116: 0116-Add-monitor-address-and-port-for-lbaas-member.patch +Patch0117: 0117-Fix-exception-error-when-l7rule-delete.patch +Patch0118: 0118-Port-don-t-check-max-fixed_ips-quota-for-dhcp-agent-.patch +Patch0119: 0119-EW-DVR-fix-issues-related-to-hosted-ports.patch +Patch0120: 0120-Fix-syntax-error.patch +Patch0121: 0121-Switch-to-use-classmethod-in-eayun-notifier.patch +Patch0122: 0122-Porting-neutron-lbaas-certificates-manager-to-neutro.patch +Patch0123: 0123-Add-query-and-fragment-valid-to-url_path.patch +Patch0124: 0124-agent_sync-filter-out-not-ready-fip-port-targets.patch +Patch0125: 0125-PPTP-allow-the-same-username-used-by-different-tenan.patch +Patch0126: 0126-PPTP-fix-vpnservices-checking-for-PPTP-credentials.patch +Patch0127: 0127-Metering-use-nfacct-to-get-metering-counters.patch +Patch0128: 0128-metering-get-all-metering-label-at-once-on-a-host.patch +Patch0129: 0129-metering-parallelize-workloads-in-iptables-drivers.patch +Patch0130: 0130-Get-rid-of-ml2-port-model-hook-join.patch +Patch0131: 0131-Get-rid-of-_network_model_hook-for-external_net.patch +Patch0132: 0132-Get-rid-of-additional-fixed_ip-filter-join.patch +Patch0133: 0133-Switch-to-subquery-for-1-M-relationships.patch +Patch0134: 0134-ES-ACL-add-EayunStack-subnet-ACL-extension.patch +Patch0135: 0135-ES-ACL-add-database-operations-related-to-subnet-ACL.patch +Patch0136: 0136-ES-ACL-ACL-plugin-and-L3-agent-enhancement.patch +Patch0137: 0137-metering-exclude-label-without-rules-when-getting-co.patch +Patch0138: 0138-db-fix-backref-cascade-between-router-and-es_meter_l.patch +Patch0139: 0139-metering-use-eventlet-when-sending-metering-reports.patch +Patch0140: 0140-metering-always-setup-metering-iptables-rules-for-ro.patch +Patch0141: 0141-Use-specific-priority-for-ES-fip-ip-rules.patch BuildArch: noarch @@ -221,7 +274,6 @@ Requires: python-stevedore >= 1.0.0 Requires: python-six >= 1.7.0 Requires: python-webob >= 1.2.3 Requires: sudo -Requires: libreswan @@ -571,6 +623,7 @@ Summary: Neutron bandwidth metering agent Group: Applications/System Requires: openstack-neutron = %{version}-%{release} +Requires: nfacct >= 1.0.2 %description metering-agent Neutron provides an API to measure bandwidth utilization @@ -598,6 +651,7 @@ Summary: Neutron VPNaaS agent Group: Applications/System Requires: openstack-neutron = %{version}-%{release} +Requires: libreswan %description vpn-agent Neutron provides an API to implement VPN as a service @@ -697,6 +751,59 @@ IPSec. %patch0086 -p1 %patch0087 -p1 %patch0088 -p1 +%patch0089 -p1 +%patch0090 -p1 +%patch0091 -p1 +%patch0092 -p1 +%patch0093 -p1 +%patch0094 -p1 +%patch0095 -p1 +%patch0096 -p1 +%patch0097 -p1 +%patch0098 -p1 +%patch0099 -p1 +%patch0100 -p1 +%patch0101 -p1 +%patch0102 -p1 +%patch0103 -p1 +%patch0104 -p1 +%patch0105 -p1 +%patch0106 -p1 +%patch0107 -p1 +%patch0108 -p1 +%patch0109 -p1 +%patch0110 -p1 +%patch0111 -p1 +%patch0112 -p1 +%patch0113 -p1 +%patch0114 -p1 +%patch0115 -p1 +%patch0116 -p1 +%patch0117 -p1 +%patch0118 -p1 +%patch0119 -p1 +%patch0120 -p1 +%patch0121 -p1 +%patch0122 -p1 +%patch0123 -p1 +%patch0124 -p1 +%patch0125 -p1 +%patch0126 -p1 +%patch0127 -p1 +%patch0128 -p1 +%patch0129 -p1 +%patch0130 -p1 +%patch0131 -p1 +%patch0132 -p1 +%patch0133 -p1 +%patch0134 -p1 +%patch0135 -p1 +%patch0136 -p1 +%patch0137 -p1 +%patch0138 -p1 +%patch0139 -p1 +%patch0140 -p1 +%patch0141 -p1 find neutron -name \*.py -exec sed -i '/\/usr\/bin\/env python/{d;q}' {} + @@ -965,6 +1072,7 @@ exit 0 %{_datarootdir}/neutron/rootwrap/iptables-firewall.filters %{_datarootdir}/neutron/rootwrap/l3.filters %{_datarootdir}/neutron/rootwrap/lbaas-haproxy.filters +%{_datarootdir}/neutron/rootwrap/metering.filters %{_datarootdir}/neutron/rootwrap/qos.filters @@ -1158,6 +1266,85 @@ exit 0 %changelog +* Wed Oct 25 2017 Xu Meihong 2014.2-42.eayunstack.dev +- add patch 0141 from github pull request #136 (redmine#11086) + +* Fri Oct 13 2017 Xu Meihong 2014.2-41.eayunstack.dev +- fix dependency on libreswan (only vpn-agent needs it) +- add patch 0130-0133 from github pull request #116 (redmine#10685) +- add patch 0134-0136 from github pull request #130 (redmine#11027) +- add patch 0137 from github pull request #131 (redmine#10988) +- add patch 0138 from github pull request #132 (redmine#11042) +- add patch 0139 from github pull request #133 (redmine#11060) +- add patch 0140 from github pull request #135 (redmine#9992) + +* Thu Sep 14 2017 Xu Meihong 2014.2-40.eayunstack.dev +- add patch 0128 from github pull request #127, #124(1), #128 (redmine#10957) +- add patch 0129 from github pull request #124(2) (redmine#10956) + +* Wed Sep 13 2017 Xu Meihong 2014.2-39.eayunstack.dev +- update patch 0127 from github pull request #123 (redmine#10878) + +* Wed Sep 13 2017 Xu Meihong 2014.2-38.eayunstack.dev +- update patch 0127 from github pull request #122 (redmine#10878) + +* Wed Sep 13 2017 Xu Meihong 2014.2-37.eayunstack.dev +- add patch 0125 from github pull request #118 (redmine#10809) +- add patch 0126 from github pull request #119 (redmine#10813) +- add patch 0127 from github pull request #120 (redmine#10878) + +* Tue Sep 05 2017 Xu Meihong 2014.2-36.eayunstack.dev +- add patch 0122 from github pull request #108 (redmine#10330) +- add patch 0123 from github pull request #111 (redmine#10435) +- add patch 0124 from neutron-qos github pull request #26 (redmine#10738) + +* Wed Jul 19 2017 Xu Meihong 2014.2-35.eayunstack.dev +- add patch 0109 from github pull request #100 (redmine#10220) +- add patch 0110 from github pull request #101 (redmine#10238) +- add patch 0111 from github pull request #102 (redmine#10240) +- add patch 0112 from github pull request #103 (redmine#10246) +- add patch 0113 from github pull request #105 (redmine#10261) +- add patch 0114 from github pull request #106 (redmine#10286) +- add patch 0115 from github pull request #107 (redmine#10312) +- add patch 0116 from github pull request #95 (redmine#9977) +- add patch 0117 from github pull request #109 (redmine#10380) +- add patch 0118 from github pull request #110 (redmine#10437) +- add patch 0119 from github pull request #113 (redmine#10558) +- add patch 0120 from github pull request #114 (redmine#10240) +- add patch 0121 from github pull request #115 (redmine#10220) + +* Thu Jun 01 2017 Xu Meihong 2014.2-34.eayunstack.dev +- add patch 0108 from github pull request #99 (redmine#10217) + +* Thu May 18 2017 Xu Meihong 2014.2-33.eayunstack.dev +- add patch 0105 from github pull request #93 (redmine#10055) +- add patch 0106 from github pull request #94 (redmine#10056) +- add patch 0107 from github pull request #96 (redmine#9118) + +* Mon May 08 2017 Xu Meihong 2014.2-32.eayunstack.dev +- add patch 0098 from github pull request #86 (redmine#9968) +- add patch 0099 from github pull request #87 (redmine#9970) +- add patch 0100 from github pull request #90 (redmine#9989) +- add patch 0101 from github pull request #89 (redmine#9990) +- add patch 0102 from neutron-qos github pull request #25 (redmine#10008) +- add patch 0103 from github pull request #88 (redmine#9982) +- add patch 0104 from github pull request #91 (redmine#9998) + +* Wed Apr 26 2017 Xu Meihong 2014.2-31.eayunstack.dev +- add patch 0095 from github pull request #82 (redmine#9667) +- add patch 0096 from github pull request #83 (redmine#9154) +- add patch 0097 from github pull request #85 (redmine#9861) + +* Thu Mar 23 2017 Xu Meihong 2014.2-30.eayunstack.dev +- add patch 0090 from github pull request #76 (redmine#9154) +- add patch 0091 from github pull request #77 (redmine#9588) +- add patch 0092 from github pull request #79 (redmine#9641) +- add patch 0093 from github pull request #78 (redmine#9518) +- add patch 0094 from neutron-qos github pull request #24 (redmine#9642) + +* Tue Mar 07 2017 Xu Meihong 2014.2-29.eayunstack.dev +- add patch 0089 from github pull request #75 + * Mon Mar 06 2017 Xu Meihong 2014.2-28.eayunstack.1.1 - add patch 0077-0078 from github pull request #65 (redmine#9118) - add patch 0079 from github pull request #69 (redmine#9249)