From 6fdadc6ff39cd6f48d40a578726f9084d7c01a41 Mon Sep 17 00:00:00 2001 From: Fedor Partanskiy Date: Mon, 2 Sep 2024 17:31:54 +0300 Subject: [PATCH] add v2 with fabric-protos-go-apiv2 and google.golang.org/protobuf Signed-off-by: Fedor Partanskiy --- .gitignore | 27 + v2/configtx/application.go | 553 ++ v2/configtx/application_test.go | 2800 +++++++ v2/configtx/capabilities.go | 93 + v2/configtx/channel.go | 170 + v2/configtx/channel_test.go | 595 ++ v2/configtx/config.go | 638 ++ v2/configtx/config_test.go | 2251 ++++++ v2/configtx/consortiums.go | 430 ++ v2/configtx/consortiums_test.go | 2322 ++++++ v2/configtx/constants.go | 90 + v2/configtx/example_test.go | 1303 ++++ .../internal/policydsl/policyparser.go | 384 + .../internal/policydsl/policyparser_test.go | 487 ++ v2/configtx/membership/membership.go | 86 + v2/configtx/msp.go | 947 +++ v2/configtx/msp_test.go | 1693 +++++ v2/configtx/orderer.go | 1063 +++ v2/configtx/orderer/orderer.go | 100 + v2/configtx/orderer_test.go | 6619 +++++++++++++++++ v2/configtx/organization.go | 136 + v2/configtx/organization_test.go | 204 + v2/configtx/policies.go | 277 + v2/configtx/policies_test.go | 135 + v2/configtx/signer.go | 190 + v2/configtx/signer_test.go | 425 ++ v2/configtx/update.go | 242 + v2/configtx/update_test.go | 594 ++ v2/go.mod | 21 + v2/go.sum | 52 + v2/protolator/api.go | 147 + v2/protolator/dynamic.go | 149 + v2/protolator/dynamic_test.go | 140 + v2/protolator/integration/integration_test.go | 327 + v2/protolator/integration/testdata/block.json | 591 ++ v2/protolator/integration/testdata/block.pb | Bin 0 -> 9940 bytes v2/protolator/json.go | 479 ++ v2/protolator/json_test.go | 311 + v2/protolator/nested.go | 111 + v2/protolator/nested_test.go | 141 + v2/protolator/protoext/commonext/common.go | 153 + .../protoext/commonext/common_test.go | 124 + .../protoext/commonext/commonext_test.go | 50 + v2/protolator/protoext/commonext/configtx.go | 115 + .../protoext/commonext/configuration.go | 242 + v2/protolator/protoext/commonext/policies.go | 38 + v2/protolator/protoext/decorate.go | 77 + v2/protolator/protoext/decorate_test.go | 311 + .../protoext/ledger/rwsetext/rwset.go | 125 + .../protoext/ledger/rwsetext/rwsetext_test.go | 22 + v2/protolator/protoext/mspext/msp_config.go | 38 + .../protoext/mspext/msp_principal.go | 40 + v2/protolator/protoext/mspext/mspext_test.go | 21 + .../protoext/ordererext/configuration.go | 184 + .../protoext/ordererext/ordererext_test.go | 26 + .../protoext/peerext/configuration.go | 138 + .../protoext/peerext/peerext_test.go | 39 + v2/protolator/protoext/peerext/proposal.go | 57 + .../protoext/peerext/proposal_response.go | 33 + v2/protolator/protoext/peerext/transaction.go | 76 + v2/protolator/statically_opaque.go | 152 + v2/protolator/statically_opaque_test.go | 181 + v2/protolator/testprotos/sample.go | 203 + v2/protolator/testprotos/sample.pb.go | 779 ++ v2/protolator/testprotos/sample.proto | 78 + v2/protolator/variably_opaque.go | 124 + v2/protolator/variably_opaque_test.go | 150 + 67 files changed, 30899 insertions(+) create mode 100644 .gitignore create mode 100644 v2/configtx/application.go create mode 100644 v2/configtx/application_test.go create mode 100644 v2/configtx/capabilities.go create mode 100644 v2/configtx/channel.go create mode 100644 v2/configtx/channel_test.go create mode 100644 v2/configtx/config.go create mode 100644 v2/configtx/config_test.go create mode 100644 v2/configtx/consortiums.go create mode 100644 v2/configtx/consortiums_test.go create mode 100644 v2/configtx/constants.go create mode 100644 v2/configtx/example_test.go create mode 100644 v2/configtx/internal/policydsl/policyparser.go create mode 100644 v2/configtx/internal/policydsl/policyparser_test.go create mode 100644 v2/configtx/membership/membership.go create mode 100644 v2/configtx/msp.go create mode 100644 v2/configtx/msp_test.go create mode 100644 v2/configtx/orderer.go create mode 100644 v2/configtx/orderer/orderer.go create mode 100644 v2/configtx/orderer_test.go create mode 100644 v2/configtx/organization.go create mode 100644 v2/configtx/organization_test.go create mode 100644 v2/configtx/policies.go create mode 100644 v2/configtx/policies_test.go create mode 100644 v2/configtx/signer.go create mode 100644 v2/configtx/signer_test.go create mode 100644 v2/configtx/update.go create mode 100644 v2/configtx/update_test.go create mode 100644 v2/go.mod create mode 100644 v2/go.sum create mode 100644 v2/protolator/api.go create mode 100644 v2/protolator/dynamic.go create mode 100644 v2/protolator/dynamic_test.go create mode 100644 v2/protolator/integration/integration_test.go create mode 100644 v2/protolator/integration/testdata/block.json create mode 100644 v2/protolator/integration/testdata/block.pb create mode 100644 v2/protolator/json.go create mode 100644 v2/protolator/json_test.go create mode 100644 v2/protolator/nested.go create mode 100644 v2/protolator/nested_test.go create mode 100644 v2/protolator/protoext/commonext/common.go create mode 100644 v2/protolator/protoext/commonext/common_test.go create mode 100644 v2/protolator/protoext/commonext/commonext_test.go create mode 100644 v2/protolator/protoext/commonext/configtx.go create mode 100644 v2/protolator/protoext/commonext/configuration.go create mode 100644 v2/protolator/protoext/commonext/policies.go create mode 100644 v2/protolator/protoext/decorate.go create mode 100644 v2/protolator/protoext/decorate_test.go create mode 100644 v2/protolator/protoext/ledger/rwsetext/rwset.go create mode 100644 v2/protolator/protoext/ledger/rwsetext/rwsetext_test.go create mode 100644 v2/protolator/protoext/mspext/msp_config.go create mode 100644 v2/protolator/protoext/mspext/msp_principal.go create mode 100644 v2/protolator/protoext/mspext/mspext_test.go create mode 100644 v2/protolator/protoext/ordererext/configuration.go create mode 100644 v2/protolator/protoext/ordererext/ordererext_test.go create mode 100644 v2/protolator/protoext/peerext/configuration.go create mode 100644 v2/protolator/protoext/peerext/peerext_test.go create mode 100644 v2/protolator/protoext/peerext/proposal.go create mode 100644 v2/protolator/protoext/peerext/proposal_response.go create mode 100644 v2/protolator/protoext/peerext/transaction.go create mode 100644 v2/protolator/statically_opaque.go create mode 100644 v2/protolator/statically_opaque_test.go create mode 100644 v2/protolator/testprotos/sample.go create mode 100644 v2/protolator/testprotos/sample.pb.go create mode 100644 v2/protolator/testprotos/sample.proto create mode 100644 v2/protolator/variably_opaque.go create mode 100644 v2/protolator/variably_opaque_test.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..168f240 --- /dev/null +++ b/.gitignore @@ -0,0 +1,27 @@ +#SPDX-License-Identifier: Apache-2.0 +.#* +*~ +*# +/bin +/build +/.build +*.cov +/docs/build/* +.DS_Store +.*-dummy +.gradle +.idea +*.iml +*.log +.project +/release +report.xml +results.xml +.settings +.*.sw* +tags +.tags +TESTS*.xml +.tox/ +.vagrant/ +.vscode diff --git a/v2/configtx/application.go b/v2/configtx/application.go new file mode 100644 index 0000000..87f4b6a --- /dev/null +++ b/v2/configtx/application.go @@ -0,0 +1,553 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "errors" + "fmt" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + pb "github.com/hyperledger/fabric-protos-go-apiv2/peer" + "google.golang.org/protobuf/proto" +) + +// Application is a copy of the orderer configuration with the addition of an anchor peers +// list in the organization definition. +type Application struct { + Organizations []Organization + Capabilities []string + Policies map[string]Policy + ACLs map[string]string + ModPolicy string +} + +// ApplicationGroup encapsulates the part of the config that controls +// application channels. +type ApplicationGroup struct { + applicationGroup *cb.ConfigGroup +} + +// ApplicationOrg encapsulates the parts of the config that control +// an application organization's configuration. +type ApplicationOrg struct { + orgGroup *cb.ConfigGroup + name string +} + +// MSP returns an OrganizationMSP object that can be used to configure the organization's MSP. +func (a *ApplicationOrg) MSP() *OrganizationMSP { + return &OrganizationMSP{ + configGroup: a.orgGroup, + } +} + +// Application returns the application group the updated config. +func (c *ConfigTx) Application() *ApplicationGroup { + applicationGroup := c.updated.ChannelGroup.Groups[ApplicationGroupKey] + return &ApplicationGroup{applicationGroup: applicationGroup} +} + +// Organization returns the application org from the updated config. +func (a *ApplicationGroup) Organization(name string) *ApplicationOrg { + organizationGroup, ok := a.applicationGroup.Groups[name] + if !ok { + return nil + } + return &ApplicationOrg{name: name, orgGroup: organizationGroup} +} + +// SetOrganization sets the organization config group for the given application +// org key in an existing Application configuration's Groups map. +// If the application org already exists in the current configuration, its value will be overwritten. +func (a *ApplicationGroup) SetOrganization(org Organization) error { + orgGroup, err := newApplicationOrgConfigGroup(org) + if err != nil { + return fmt.Errorf("failed to create application org %s: %v", org.Name, err) + } + + a.applicationGroup.Groups[org.Name] = orgGroup + + return nil +} + +// RemoveOrganization removes an org from the Application group. +// Removal will panic if the application group does not exist. +func (a *ApplicationGroup) RemoveOrganization(orgName string) { + delete(a.applicationGroup.Groups, orgName) +} + +// Configuration returns the existing application configuration values from a config +// transaction as an Application type. This can be used to retrieve existing values for the application +// prior to updating the application configuration. +func (a *ApplicationGroup) Configuration() (Application, error) { + var applicationOrgs []Organization + for orgName := range a.applicationGroup.Groups { + orgConfig, err := a.Organization(orgName).Configuration() + if err != nil { + return Application{}, fmt.Errorf("retrieving application org %s: %v", orgName, err) + } + + applicationOrgs = append(applicationOrgs, orgConfig) + } + + capabilities, err := a.Capabilities() + if err != nil { + return Application{}, fmt.Errorf("retrieving application capabilities: %v", err) + } + + policies, err := a.Policies() + if err != nil { + return Application{}, fmt.Errorf("retrieving application policies: %v", err) + } + + acls, err := a.ACLs() + if err != nil { + return Application{}, fmt.Errorf("retrieving application acls: %v", err) + } + + return Application{ + Organizations: applicationOrgs, + Capabilities: capabilities, + Policies: policies, + ACLs: acls, + }, nil +} + +// Configuration returns the existing application org configuration values +// from the updated config. +func (a *ApplicationOrg) Configuration() (Organization, error) { + org, err := getOrganization(a.orgGroup, a.name) + if err != nil { + return Organization{}, err + } + return org, nil +} + +// Capabilities returns a map of enabled application capabilities +// from the updated config. +func (a *ApplicationGroup) Capabilities() ([]string, error) { + capabilities, err := getCapabilities(a.applicationGroup) + if err != nil { + return nil, fmt.Errorf("retrieving application capabilities: %v", err) + } + + return capabilities, nil +} + +// AddCapability sets capability to the provided channel config. +// If the provided capability already exists in current configuration, this action +// will be a no-op. +func (a *ApplicationGroup) AddCapability(capability string) error { + capabilities, err := a.Capabilities() + if err != nil { + return err + } + + err = addCapability(a.applicationGroup, capabilities, AdminsPolicyKey, capability) + if err != nil { + return err + } + + return nil +} + +// RemoveCapability removes capability to the provided channel config. +func (a *ApplicationGroup) RemoveCapability(capability string) error { + capabilities, err := a.Capabilities() + if err != nil { + return err + } + + err = removeCapability(a.applicationGroup, capabilities, AdminsPolicyKey, capability) + if err != nil { + return err + } + + return nil +} + +// Policies returns a map of policies for the application config group in +// the updatedconfig. +func (a *ApplicationGroup) Policies() (map[string]Policy, error) { + return getPolicies(a.applicationGroup.Policies) +} + +// SetModPolicy sets the specified modification policy for the application group. +func (a *ApplicationGroup) SetModPolicy(modPolicy string) error { + if modPolicy == "" { + return errors.New("non empty mod policy is required") + } + + a.applicationGroup.ModPolicy = modPolicy + + return nil +} + +// SetPolicy sets the specified policy in the application group's config policy map. +// If the policy already exists in current configuration, its value will be overwritten. +func (a *ApplicationGroup) SetPolicy(policyName string, policy Policy) error { + err := setPolicy(a.applicationGroup, policyName, policy) + if err != nil { + return fmt.Errorf("failed to set policy '%s': %v", policyName, err) + } + + return nil +} + +// SetPolicies sets the specified policies in the application group's config policy map. +// If the policies already exist in current configuration, the values will be replaced with new policies. +func (a *ApplicationGroup) SetPolicies(policies map[string]Policy) error { + err := setPolicies(a.applicationGroup, policies) + if err != nil { + return fmt.Errorf("failed to set policies: %v", err) + } + + return nil +} + +// RemovePolicy removes an existing policy from an application's configuration. +// Removal will panic if the application group does not exist. +func (a *ApplicationGroup) RemovePolicy(policyName string) error { + policies, err := a.Policies() + if err != nil { + return err + } + + removePolicy(a.applicationGroup, policyName, policies) + return nil +} + +// Policies returns the map of policies for a specific application org in +// the updated config. +func (a *ApplicationOrg) Policies() (map[string]Policy, error) { + return getPolicies(a.orgGroup.Policies) +} + +// SetModPolicy sets the specified modification policy for the application organization group. +func (a *ApplicationOrg) SetModPolicy(modPolicy string) error { + if modPolicy == "" { + return errors.New("non empty mod policy is required") + } + + a.orgGroup.ModPolicy = modPolicy + + return nil +} + +// SetPolicy sets the specified policy in the application org group's config policy map. +// If an Organization policy already exists in current configuration, its value will be overwritten. +func (a *ApplicationOrg) SetPolicy(policyName string, policy Policy) error { + err := setPolicy(a.orgGroup, policyName, policy) + if err != nil { + return fmt.Errorf("failed to set policy '%s': %v", policyName, err) + } + + return nil +} + +// SetPolicies sets the specified policies in the application org group's config policy map. +// If the policies already exist in current configuration, the values will be replaced with new policies. +func (a *ApplicationOrg) SetPolicies(policies map[string]Policy) error { + err := setPolicies(a.orgGroup, policies) + if err != nil { + return fmt.Errorf("failed to set policies: %v", err) + } + + return nil +} + +// RemovePolicy removes an existing policy from an application organization. +func (a *ApplicationOrg) RemovePolicy(policyName string) error { + policies, err := a.Policies() + if err != nil { + return err + } + + removePolicy(a.orgGroup, policyName, policies) + return nil +} + +// AnchorPeers returns the list of anchor peers for an application org +// in the updated config. +func (a *ApplicationOrg) AnchorPeers() ([]Address, error) { + anchorPeerConfigValue, ok := a.orgGroup.Values[AnchorPeersKey] + if !ok { + return nil, nil + } + + anchorPeersProto := &pb.AnchorPeers{} + + err := proto.Unmarshal(anchorPeerConfigValue.Value, anchorPeersProto) + if err != nil { + return nil, fmt.Errorf("failed unmarshaling %s's anchor peer endpoints: %v", a.name, err) + } + + if len(anchorPeersProto.AnchorPeers) == 0 { + return nil, nil + } + + anchorPeers := []Address{} + for _, ap := range anchorPeersProto.AnchorPeers { + anchorPeers = append(anchorPeers, Address{ + Host: ap.Host, + Port: int(ap.Port), + }) + } + + return anchorPeers, nil +} + +// AddAnchorPeer adds an anchor peer to an application org's configuration +// in the updated config. +func (a *ApplicationOrg) AddAnchorPeer(newAnchorPeer Address) error { + anchorPeersProto := &pb.AnchorPeers{} + + if anchorPeerConfigValue, ok := a.orgGroup.Values[AnchorPeersKey]; ok { + // Unmarshal existing anchor peers if the config value exists + err := proto.Unmarshal(anchorPeerConfigValue.Value, anchorPeersProto) + if err != nil { + return fmt.Errorf("failed unmarshaling anchor peer endpoints: %v", err) + } + } + + // Persist existing anchor peers if found + anchorProtos := anchorPeersProto.AnchorPeers + + for _, anchorPeer := range anchorProtos { + if anchorPeer.Host == newAnchorPeer.Host && anchorPeer.Port == int32(newAnchorPeer.Port) { + return nil + } + } + + // Append new anchor peer to anchorProtos + anchorProtos = append(anchorProtos, &pb.AnchorPeer{ + Host: newAnchorPeer.Host, + Port: int32(newAnchorPeer.Port), + }) + + // Add anchor peers config value back to application org + err := setValue(a.orgGroup, anchorPeersValue(anchorProtos), AdminsPolicyKey) + if err != nil { + return err + } + return nil +} + +// RemoveAnchorPeer removes an anchor peer from an application org's configuration +// in the updated config. +func (a *ApplicationOrg) RemoveAnchorPeer(anchorPeerToRemove Address) error { + anchorPeersProto := &pb.AnchorPeers{} + + if anchorPeerConfigValue, ok := a.orgGroup.Values[AnchorPeersKey]; ok { + // Unmarshal existing anchor peers if the config value exists + err := proto.Unmarshal(anchorPeerConfigValue.Value, anchorPeersProto) + if err != nil { + return fmt.Errorf("failed unmarshaling anchor peer endpoints for application org %s: %v", a.name, err) + } + } + + existingAnchorPeers := anchorPeersProto.AnchorPeers[:0] + for _, anchorPeer := range anchorPeersProto.AnchorPeers { + if anchorPeer.Host != anchorPeerToRemove.Host || anchorPeer.Port != int32(anchorPeerToRemove.Port) { + existingAnchorPeers = append(existingAnchorPeers, anchorPeer) + + // Add anchor peers config value back to application org + err := setValue(a.orgGroup, anchorPeersValue(existingAnchorPeers), AdminsPolicyKey) + if err != nil { + return fmt.Errorf("failed to remove anchor peer %v from org %s: %v", anchorPeerToRemove, a.name, err) + } + } + } + + if len(existingAnchorPeers) == len(anchorPeersProto.AnchorPeers) { + return fmt.Errorf("could not find anchor peer %s:%d in application org %s", anchorPeerToRemove.Host, anchorPeerToRemove.Port, a.name) + } + + // Add anchor peers config value back to application org + err := setValue(a.orgGroup, anchorPeersValue(existingAnchorPeers), AdminsPolicyKey) + if err != nil { + return fmt.Errorf("failed to remove anchor peer %v from org %s: %v", anchorPeerToRemove, a.name, err) + } + + return nil +} + +// ACLs returns a map of ACLS for given config application. +func (a *ApplicationGroup) ACLs() (map[string]string, error) { + aclConfigValue, ok := a.applicationGroup.Values[ACLsKey] + if !ok { + return nil, nil + } + + aclProtos := &pb.ACLs{} + + err := proto.Unmarshal(aclConfigValue.Value, aclProtos) + if err != nil { + return nil, fmt.Errorf("unmarshaling %s: %v", ACLsKey, err) + } + + retACLs := map[string]string{} + for apiResource, policyRef := range aclProtos.Acls { + retACLs[apiResource] = policyRef.PolicyRef + } + + return retACLs, nil +} + +// SetACLs sets ACLS to an existing channel config application. +// If an ACL already exists in current configuration, it will be replaced with new ACL. +func (a *ApplicationGroup) SetACLs(acls map[string]string) error { + err := setValue(a.applicationGroup, aclValues(acls), AdminsPolicyKey) + if err != nil { + return err + } + + return nil +} + +// RemoveACLs a list of ACLs from given channel config application. +// Specifying acls that do not exist in the application ConfigGroup of the channel config will not return a error. +// Removal will panic if application group does not exist. +func (a *ApplicationGroup) RemoveACLs(acls []string) error { + configACLs, err := a.ACLs() + if err != nil { + return err + } + + for _, acl := range acls { + delete(configACLs, acl) + } + + err = setValue(a.applicationGroup, aclValues(configACLs), AdminsPolicyKey) + if err != nil { + return err + } + + return nil +} + +// SetMSP updates the MSP config for the specified application +// org group. +func (a *ApplicationOrg) SetMSP(updatedMSP MSP) error { + currentMSP, err := a.MSP().Configuration() + if err != nil { + return fmt.Errorf("retrieving msp: %v", err) + } + + if currentMSP.Name != updatedMSP.Name { + return errors.New("MSP name cannot be changed") + } + + err = updatedMSP.validateCACerts() + if err != nil { + return err + } + + err = a.setMSPConfig(updatedMSP) + if err != nil { + return err + } + + return nil +} + +func (a *ApplicationOrg) setMSPConfig(updatedMSP MSP) error { + mspConfig, err := newMSPConfig(updatedMSP) + if err != nil { + return fmt.Errorf("new msp config: %v", err) + } + + err = setValue(a.orgGroup, mspValue(mspConfig), AdminsPolicyKey) + if err != nil { + return err + } + + return nil +} + +// newApplicationGroupTemplate returns the application component of the channel +// configuration with only the names of the application organizations. +// By default, it sets the mod_policy of all elements to "Admins". +func newApplicationGroupTemplate(application Application) (*cb.ConfigGroup, error) { + var err error + + applicationGroup := newConfigGroup() + applicationGroup.ModPolicy = AdminsPolicyKey + + if application.ModPolicy != "" { + applicationGroup.ModPolicy = application.ModPolicy + } + + if err = setPolicies(applicationGroup, application.Policies); err != nil { + return nil, err + } + + if len(application.ACLs) > 0 { + err = setValue(applicationGroup, aclValues(application.ACLs), AdminsPolicyKey) + if err != nil { + return nil, err + } + } + + if len(application.Capabilities) > 0 { + err = setValue(applicationGroup, capabilitiesValue(application.Capabilities), AdminsPolicyKey) + if err != nil { + return nil, err + } + } + + for _, org := range application.Organizations { + applicationGroup.Groups[org.Name] = newConfigGroup() + } + + return applicationGroup, nil +} + +// newApplicationGroup returns the application component of the channel +// configuration with the entire configuration for application organizations. +// By default, it sets the mod_policy of all elements to "Admins". +func newApplicationGroup(application Application) (*cb.ConfigGroup, error) { + applicationGroup, err := newApplicationGroupTemplate(application) + if err != nil { + return nil, err + } + + for _, org := range application.Organizations { + applicationGroup.Groups[org.Name], err = newOrgConfigGroup(org) + if err != nil { + return nil, fmt.Errorf("org group '%s': %v", org.Name, err) + } + } + + return applicationGroup, nil +} + +// aclValues returns the config definition for an application's resources based ACL definitions. +// It is a value for the /Channel/Application/. +func aclValues(acls map[string]string) *standardConfigValue { + a := &pb.ACLs{ + Acls: make(map[string]*pb.APIResource), + } + + for apiResource, policyRef := range acls { + a.Acls[apiResource] = &pb.APIResource{PolicyRef: policyRef} + } + + return &standardConfigValue{ + key: ACLsKey, + value: a, + } +} + +// anchorPeersValue returns the config definition for an org's anchor peers. +// It is a value for the /Channel/Application/*. +func anchorPeersValue(anchorPeers []*pb.AnchorPeer) *standardConfigValue { + return &standardConfigValue{ + key: AnchorPeersKey, + value: &pb.AnchorPeers{AnchorPeers: anchorPeers}, + } +} diff --git a/v2/configtx/application_test.go b/v2/configtx/application_test.go new file mode 100644 index 0000000..adfa4cc --- /dev/null +++ b/v2/configtx/application_test.go @@ -0,0 +1,2800 @@ +/* +Copyright IBM Corp All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "bytes" + "crypto/ecdsa" + "crypto/x509" + "encoding/base64" + "fmt" + "math/big" + "testing" + + "github.com/hyperledger/fabric-config/v2/configtx/internal/policydsl" + "github.com/hyperledger/fabric-config/v2/protolator" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/peerext" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func TestNewApplicationGroup(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + application, _ := baseApplication(t) + + expectedApplicationGroup := ` +{ + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "ACLs": { + "mod_policy": "Admins", + "value": "CgwKBGFjbDESBAoCaGk=", + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": "CggKBFYxXzMSAA==", + "version": "0" + } + }, + "version": "0" +} +` + + applicationGroup, err := newApplicationGroupTemplate(application) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedApplication := &cb.ConfigGroup{} + err = protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectedApplicationGroup), expectedApplication) + gt.Expect(err).ToNot(HaveOccurred()) + gt.Expect(proto.Equal(applicationGroup, expectedApplication)).To(BeTrue()) +} + +func TestNewApplicationGroupFailure(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + applicationMod func(*Application) + expectedErr string + }{ + { + testName: "When application group policy is empty", + applicationMod: func(a *Application) { + a.Policies = nil + }, + expectedErr: "no policies defined", + }, + { + testName: "When no Admins policies are defined", + applicationMod: func(application *Application) { + delete(application.Policies, AdminsPolicyKey) + }, + expectedErr: "no Admins policy defined", + }, + { + testName: "When no Readers policies are defined", + applicationMod: func(application *Application) { + delete(application.Policies, ReadersPolicyKey) + }, + expectedErr: "no Readers policy defined", + }, + { + testName: "When no Writers policies are defined", + applicationMod: func(application *Application) { + delete(application.Policies, WritersPolicyKey) + }, + expectedErr: "no Writers policy defined", + }, + { + testName: "When ImplicitMetaPolicy rules' subpolicy is missing", + applicationMod: func(application *Application) { + application.Policies[ReadersPolicyKey] = Policy{ + Rule: "ALL", + Type: ImplicitMetaPolicyType, + } + }, + expectedErr: "invalid implicit meta policy rule: 'ALL': expected two space separated " + + "tokens, but got 1", + }, + { + testName: "When ImplicitMetaPolicy rule is invalid", + applicationMod: func(application *Application) { + application.Policies[ReadersPolicyKey] = Policy{ + Rule: "ANYY Readers", + Type: ImplicitMetaPolicyType, + } + }, + expectedErr: "invalid implicit meta policy rule: 'ANYY Readers': unknown rule type " + + "'ANYY', expected ALL, ANY, or MAJORITY", + }, + { + testName: "When SignatureTypePolicy rule is invalid", + applicationMod: func(application *Application) { + application.Policies[ReadersPolicyKey] = Policy{ + Rule: "ANYY Readers", + Type: SignaturePolicyType, + } + }, + expectedErr: "invalid signature policy rule: 'ANYY Readers': Cannot transition " + + "token types from VARIABLE [ANYY] to VARIABLE [Readers]", + }, + { + testName: "When ImplicitMetaPolicy type is unknown policy type", + applicationMod: func(application *Application) { + application.Policies[ReadersPolicyKey] = Policy{ + Type: "GreenPolicy", + } + }, + expectedErr: "unknown policy type: GreenPolicy", + }, + } + + for _, tt := range tests { + tt := tt // capture range variable + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + application, _ := baseApplication(t) + tt.applicationMod(&application) + + configGrp, err := newApplicationGroupTemplate(application) + gt.Expect(err).To(MatchError(tt.expectedErr)) + gt.Expect(configGrp).To(BeNil()) + }) + } +} + +func TestAppOrgAddAnchorPeer(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseApplicationConf, _ := baseApplication(t) + + applicationGroup, err := newApplicationGroupTemplate(baseApplicationConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: applicationGroup, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + } + + c := New(config) + + newOrg1AnchorPeer := Address{ + Host: "host3", + Port: 123, + } + + newOrg2AnchorPeer := Address{ + Host: "host4", + Port: 123, + } + + expectedUpdatedConfigJSON := ` +{ + "channel_group": { + "groups": { + "Application": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": { + "AnchorPeers": { + "mod_policy": "Admins", + "value": { + "anchor_peers": [ + { + "host": "host3", + "port": 123 + } + ] + }, + "version": "0" + } + }, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": { + "AnchorPeers": { + "mod_policy": "Admins", + "value": { + "anchor_peers": [ + { + "host": "host4", + "port": 123 + } + ] + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "ACLs": { + "mod_policy": "Admins", + "value": { + "acls": { + "acl1": { + "policy_ref": "hi" + } + } + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "sequence": "0" +} +` + + expectedUpdatedConfig := &cb.Config{} + + err = protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectedUpdatedConfigJSON), expectedUpdatedConfig) + gt.Expect(err).ToNot(HaveOccurred()) + + err = c.Application().Organization("Org1").AddAnchorPeer(newOrg1AnchorPeer) + gt.Expect(err).NotTo(HaveOccurred()) + + err = c.Application().Organization("Org2").AddAnchorPeer(newOrg2AnchorPeer) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(proto.Equal(c.updated, expectedUpdatedConfig)).To(BeTrue()) +} + +func TestAppOrgRemoveAnchorPeer(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseApplicationConf, _ := baseApplication(t) + + applicationGroup, err := newApplicationGroupTemplate(baseApplicationConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Application": applicationGroup, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + } + + c := New(config) + + expectedUpdatedConfigJSON := ` +{ + "channel_group": { + "groups": { + "Application": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": { + "AnchorPeers": { + "mod_policy": "Admins", + "value": {}, + "version": "0" + } + }, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "ACLs": { + "mod_policy": "Admins", + "value": { + "acls": { + "acl1": { + "policy_ref": "hi" + } + } + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "sequence": "0" +} +` + + anchorPeer1 := Address{Host: "host1", Port: 123} + applicationOrg1 := c.Application().Organization("Org1") + err = applicationOrg1.AddAnchorPeer(anchorPeer1) + gt.Expect(err).NotTo(HaveOccurred()) + expectedUpdatedConfig := &cb.Config{} + + err = protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectedUpdatedConfigJSON), expectedUpdatedConfig) + gt.Expect(err).NotTo(HaveOccurred()) + + err = applicationOrg1.RemoveAnchorPeer(anchorPeer1) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(proto.Equal(c.updated, expectedUpdatedConfig)).To(BeTrue()) +} + +func TestAppOrgRemoveAnchorPeerFailure(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + orgName string + anchorPeerToRemove Address + configValues map[string]*cb.ConfigValue + expectedErr string + }{ + { + testName: "When the unmarshaling existing anchor peer proto fails", + orgName: "Org1", + anchorPeerToRemove: Address{Host: "host1", Port: 123}, + configValues: map[string]*cb.ConfigValue{AnchorPeersKey: {Value: []byte("a little fire")}}, + expectedErr: "failed unmarshaling anchor peer endpoints for application org Org1: proto", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseApplicationConf, _ := baseApplication(t) + + applicationGroup, err := newApplicationGroupTemplate(baseApplicationConf) + gt.Expect(err).NotTo(HaveOccurred()) + + applicationGroup.Groups["Org1"].Values = tt.configValues + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Application": applicationGroup, + }, + }, + } + + c := New(config) + + err = c.Application().Organization(tt.orgName).RemoveAnchorPeer(tt.anchorPeerToRemove) + gt.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) + }) + } +} + +func TestAnchorPeers(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + + application, _ := baseApplication(t) + applicationGroup, err := newApplicationGroupTemplate(application) + gt.Expect(err).NotTo(HaveOccurred()) + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + anchorPeers, err := c.Application().Organization("Org1").AnchorPeers() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(anchorPeers).To(BeNil()) + gt.Expect(anchorPeers).To(HaveLen(0)) + + expectedAnchorPeer := Address{Host: "host1", Port: 123} + err = c.Application().Organization("Org1").AddAnchorPeer(expectedAnchorPeer) + gt.Expect(err).NotTo(HaveOccurred()) + + anchorPeers, err = c.Application().Organization("Org1").AnchorPeers() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(anchorPeers).To(HaveLen(1)) + gt.Expect(anchorPeers[0]).To(Equal(expectedAnchorPeer)) + + err = c.Application().Organization("Org1").RemoveAnchorPeer(expectedAnchorPeer) + gt.Expect(err).NotTo(HaveOccurred()) + + anchorPeers, err = c.Application().Organization("Org1").AnchorPeers() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(anchorPeers).To(BeNil()) + gt.Expect(anchorPeers).To(HaveLen(0)) +} + +func TestSetACL(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + configMod func(*cb.Config) + newACL map[string]string + expectedACL map[string]string + expectedErr string + }{ + { + testName: "success", + newACL: map[string]string{"acl2": "newACL"}, + expectedACL: map[string]string{ + "acl2": "newACL", + }, + expectedErr: "", + }, + { + testName: "ACL overwrite", + newACL: map[string]string{"acl1": "overwrite acl"}, + expectedACL: map[string]string{ + "acl1": "overwrite acl", + }, + expectedErr: "", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + baseApplication, _ := baseApplication(t) + applicationGroup, err := newApplicationGroupTemplate(baseApplication) + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + if tt.configMod != nil { + tt.configMod(config) + } + c := New(config) + + err = c.Application().SetACLs(tt.newACL) + if tt.expectedErr != "" { + gt.Expect(err).To(MatchError(tt.expectedErr)) + } else { + gt.Expect(err).NotTo(HaveOccurred()) + acls, err := c.Application().ACLs() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(acls).To(Equal(tt.expectedACL)) + } + }) + } +} + +func TestAppOrgRemoveACL(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + configMod func(*cb.Config) + removeACL []string + expectedACL map[string]string + expectedErr string + }{ + { + testName: "success", + removeACL: []string{"acl1", "acl2"}, + expectedACL: map[string]string{ + "acl3": "acl3Value", + }, + expectedErr: "", + }, + { + testName: "remove non-existing acls", + removeACL: []string{"bad-acl1", "bad-acl2"}, + expectedACL: map[string]string{ + "acl1": "hi", + "acl2": "acl2Value", + "acl3": "acl3Value", + }, + expectedErr: "", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + baseApplication, _ := baseApplication(t) + baseApplication.ACLs["acl2"] = "acl2Value" + baseApplication.ACLs["acl3"] = "acl3Value" + applicationGroup, err := newApplicationGroupTemplate(baseApplication) + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + if tt.configMod != nil { + tt.configMod(config) + } + + c := New(config) + + err = c.Application().RemoveACLs(tt.removeACL) + if tt.expectedErr != "" { + gt.Expect(err).To(MatchError(tt.expectedErr)) + } else { + gt.Expect(err).NotTo(HaveOccurred()) + acls, err := c.Application().ACLs() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(acls).To(Equal(tt.expectedACL)) + } + }) + } +} + +func TestSetApplicationOrg(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + application, _ := baseApplication(t) + appGroup, err := newApplicationGroup(application) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Application": appGroup, + }, + }, + } + + c := New(config) + + baseMSP, _ := baseMSP(t) + org := Organization{ + Name: "Org3", + Policies: applicationOrgStandardPolicies(), + MSP: baseMSP, + AnchorPeers: []Address{ + { + Host: "127.0.0.1", + Port: 7051, + }, + }, + } + + certBase64, crlBase64 := certCRLBase64(t, org.MSP) + expectedConfigJSON := fmt.Sprintf(` +{ + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "LifecycleEndorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "AnchorPeers": { + "mod_policy": "Admins", + "value": { + "anchor_peers": [ + { + "host": "127.0.0.1", + "port": 7051 + } + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" +} +`, certBase64, crlBase64) + + err = c.Application().SetOrganization(org) + gt.Expect(err).NotTo(HaveOccurred()) + + actualApplicationConfigGroup := c.Application().Organization("Org3").orgGroup + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &peerext.DynamicApplicationOrgGroup{ConfigGroup: actualApplicationConfigGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(buf.String()).To(MatchJSON(expectedConfigJSON)) +} + +func TestSetApplicationOrgFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + application, _ := baseApplication(t) + appGroup, err := newApplicationGroupTemplate(application) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Application": appGroup, + }, + }, + } + + c := New(config) + + org := Organization{ + Name: "Org3", + } + + err = c.Application().SetOrganization(org) + gt.Expect(err).To(MatchError("failed to create application org Org3: no policies defined")) +} + +func TestApplicationConfiguration(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + baseApplicationConf, _ := baseApplication(t) + applicationGroup, err := newApplicationGroupTemplate(baseApplicationConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: applicationGroup, + }, + }, + } + + c := New(config) + + for _, org := range baseApplicationConf.Organizations { + err = c.Application().SetOrganization(org) + gt.Expect(err).NotTo(HaveOccurred()) + } + + applicationConfig, err := c.Application().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(applicationConfig.ACLs).To(Equal(baseApplicationConf.ACLs)) + gt.Expect(applicationConfig.Capabilities).To(Equal(baseApplicationConf.Capabilities)) + gt.Expect(applicationConfig.Policies).To(Equal(baseApplicationConf.Policies)) + gt.Expect(applicationConfig.Organizations).To(ContainElements(baseApplicationConf.Organizations)) +} + +func TestApplicationConfigurationFailure(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + configMod func(ConfigTx, Application, *GomegaWithT) + expectedErr string + }{ + { + testName: "Retrieving application org failed", + configMod: func(c ConfigTx, appOrg Application, gt *GomegaWithT) { + for _, org := range appOrg.Organizations { + if org.Name == "Org2" { + err := c.Application().SetOrganization(org) + gt.Expect(err).NotTo(HaveOccurred()) + } + } + }, + expectedErr: "retrieving application org Org1: config does not contain value for MSP", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseApplicationConf, _ := baseApplication(t) + applicationGroup, err := newApplicationGroupTemplate(baseApplicationConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: applicationGroup, + }, + }, + } + + c := New(config) + if tt.configMod != nil { + tt.configMod(c, baseApplicationConf, gt) + } + + c = New(c.updated) + + _, err = c.Application().Configuration() + gt.Expect(err).To(MatchError(tt.expectedErr)) + }) + } +} + +func TestApplicationACLs(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseApplicationConf, _ := baseApplication(t) + applicationGroup, err := newApplicationGroupTemplate(baseApplicationConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: applicationGroup, + }, + }, + } + + c := New(config) + + applicationACLs, err := c.Application().ACLs() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(applicationACLs).To(Equal(baseApplicationConf.ACLs)) +} + +func TestEmptyApplicationACLs(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseApplicationConf, _ := baseApplication(t) + baseApplicationConf.ACLs = nil + applicationGroup, err := newApplicationGroupTemplate(baseApplicationConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: applicationGroup, + }, + }, + } + + c := New(config) + + applicationACLs, err := c.Application().ACLs() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(applicationACLs).To(BeNil()) +} + +func TestApplicationACLsFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseApplicationConf, _ := baseApplication(t) + applicationGroup, err := newApplicationGroupTemplate(baseApplicationConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: applicationGroup, + }, + }, + } + + config.ChannelGroup.Groups[ApplicationGroupKey].Values[ACLsKey] = &cb.ConfigValue{ + Value: []byte("another little fire"), + } + + c := New(config) + + applicationACLs, err := c.Application().ACLs() + gt.Expect(err.Error()).To(ContainSubstring("unmarshaling ACLs: proto:")) + gt.Expect(err.Error()).To(ContainSubstring("cannot parse invalid wire-format data")) + gt.Expect(applicationACLs).To(BeNil()) +} + +func TestApplicationCapabilities(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseApplicationConf, _ := baseApplication(t) + applicationGroup, err := newApplicationGroupTemplate(baseApplicationConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: applicationGroup, + }, + }, + } + + c := New(config) + + applicationCapabilities, err := c.Application().Capabilities() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(applicationCapabilities).To(Equal(baseApplicationConf.Capabilities)) + + // Delete the capabilities key and assert retrieval to return nil + delete(c.Application().applicationGroup.Values, CapabilitiesKey) + applicationCapabilities, err = c.Application().Capabilities() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(applicationCapabilities).To(BeNil()) +} + +func TestAppOrgAddApplicationCapability(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + capability string + equalToOriginal bool + expectedConfigGroupJSON string + }{ + { + testName: "success -- adding new capability", + capability: "new_capability", + equalToOriginal: false, + expectedConfigGroupJSON: `{ + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "ACLs": { + "mod_policy": "Admins", + "value": { + "acls": { + "acl1": { + "policy_ref": "hi" + } + } + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {}, + "new_capability": {} + } + }, + "version": "0" + } + }, + "version": "0" +} +`, + }, + { + testName: "success -- when capability already exists", + capability: "V1_3", + equalToOriginal: true, + expectedConfigGroupJSON: `{ + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "ACLs": { + "mod_policy": "Admins", + "value": { + "acls": { + "acl1": { + "policy_ref": "hi" + } + } + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + } + }, + "version": "0" +} +`, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + baseApp, _ := baseApplication(t) + appGroup, err := newApplicationGroupTemplate(baseApp) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: appGroup, + }, + }, + } + + c := New(config) + + err = c.Application().AddCapability(tt.capability) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedApplicationGroupJSON := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&updatedApplicationGroupJSON, &peerext.DynamicApplicationGroup{ConfigGroup: c.Application().applicationGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + originalApplicationGroupJSON := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&originalApplicationGroupJSON, &peerext.DynamicApplicationGroup{ConfigGroup: c.original.ChannelGroup.Groups[ApplicationGroupKey]}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(updatedApplicationGroupJSON.String()).To(Equal(tt.expectedConfigGroupJSON)) + if !tt.equalToOriginal { + gt.Expect(updatedApplicationGroupJSON).NotTo(Equal(originalApplicationGroupJSON)) + } else { + gt.Expect(updatedApplicationGroupJSON).To(Equal(originalApplicationGroupJSON)) + } + }) + } +} + +func TestAppOrgAddApplicationCapabilityFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + capability string + applicationGroup func(ag *cb.ConfigGroup) + expectedErr string + }{ + { + testName: "when retrieving existing capabilities", + capability: "V1_3", + applicationGroup: func(ag *cb.ConfigGroup) { + ag.Values = map[string]*cb.ConfigValue{ + CapabilitiesKey: { + Value: []byte("foobar"), + }, + } + }, + expectedErr: "retrieving application capabilities: unmarshaling capabilities: proto", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseApp, _ := baseApplication(t) + appGroup, err := newApplicationGroupTemplate(baseApp) + gt.Expect(err).NotTo(HaveOccurred()) + tt.applicationGroup(appGroup) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: appGroup, + }, + }, + } + + c := New(config) + + err = c.Application().AddCapability(tt.capability) + gt.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) + }) + } +} + +func TestAppOrgRemoveApplicationCapability(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseApp, _ := baseApplication(t) + appGroup, err := newApplicationGroupTemplate(baseApp) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: appGroup, + }, + }, + } + + c := New(config) + + expectedConfigGroupJSON := `{ + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "ACLs": { + "mod_policy": "Admins", + "value": { + "acls": { + "acl1": { + "policy_ref": "hi" + } + } + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": {} + }, + "version": "0" + } + }, + "version": "0" +} +` + capability := "V1_3" + err = c.Application().RemoveCapability(capability) + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &peerext.DynamicApplicationGroup{ConfigGroup: c.updated.ChannelGroup.Groups[ApplicationGroupKey]}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) +} + +func TestAppOrgRemoveApplicationCapabilityFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + capability string + applicationGroup func(ag *cb.ConfigGroup) + expectedErr string + }{ + { + testName: "when capability does not exist", + capability: "V2_0", + applicationGroup: func(ag *cb.ConfigGroup) { + }, + expectedErr: "capability not set", + }, + { + testName: "when retrieving existing capabilities", + capability: "V1_3", + applicationGroup: func(ag *cb.ConfigGroup) { + ag.Values = map[string]*cb.ConfigValue{ + CapabilitiesKey: { + Value: []byte("foobar"), + }, + } + }, + expectedErr: "retrieving application capabilities: unmarshaling capabilities: proto", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseApp, _ := baseApplication(t) + appGroup, err := newApplicationGroupTemplate(baseApp) + gt.Expect(err).NotTo(HaveOccurred()) + tt.applicationGroup(appGroup) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: appGroup, + }, + }, + } + + c := New(config) + + err = c.Application().RemoveCapability(tt.capability) + gt.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) + }) + } +} + +func TestApplicationOrg(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel := Channel{ + Consortium: "SampleConsortium", + Application: Application{ + Policies: standardPolicies(), + Organizations: []Organization{baseApplicationOrg(t)}, + }, + ModPolicy: AdminsPolicyKey, + } + channelGroup, err := newChannelGroup(channel) + gt.Expect(err).NotTo(HaveOccurred()) + orgGroup, err := newApplicationOrgConfigGroup(channel.Application.Organizations[0]) + gt.Expect(err).NotTo(HaveOccurred()) + channelGroup.Groups[ApplicationGroupKey].Groups["Org1"] = orgGroup + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + expectedOrg := channel.Application.Organizations[0] + + tests := []struct { + name string + orgName string + }{ + { + name: "success", + orgName: "Org1", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + org, err := c.Application().Organization(tc.orgName).Configuration() + gt.Expect(err).ToNot(HaveOccurred()) + gt.Expect(expectedOrg).To(Equal(org)) + }) + } +} + +func TestAppOrgRemoveApplicationOrg(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel := Channel{ + Consortium: "SampleConsortium", + Application: Application{ + Policies: standardPolicies(), + Organizations: []Organization{baseApplicationOrg(t)}, + }, + } + channelGroup, err := newChannelGroup(channel) + gt.Expect(err).NotTo(HaveOccurred()) + orgGroup, err := newOrgConfigGroup(channel.Application.Organizations[0]) + gt.Expect(err).NotTo(HaveOccurred()) + channelGroup.Groups[ApplicationGroupKey].Groups["Org1"] = orgGroup + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + c.Application().RemoveOrganization("Org1") + gt.Expect(c.updated.ChannelGroup.Groups[ApplicationGroupKey].Groups["Org1"]).To(BeNil()) +} + +func TestAppOrgRemoveApplicationOrgPolicy(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + applicationGroup := newConfigGroup() + + application, _ := baseApplication(t) + + for _, org := range application.Organizations { + org.Policies = applicationOrgStandardPolicies() + org.Policies["TestPolicy"] = Policy{ + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + } + org.ModPolicy = AdminsPolicyKey + + orgGroup, err := newOrgConfigGroup(org) + gt.Expect(err).NotTo(HaveOccurred()) + + applicationGroup.Groups[org.Name] = orgGroup + } + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + application.Organizations[0].Policies = applicationOrgStandardPolicies() + expectedOrgConfigGroup, _ := newOrgConfigGroup(application.Organizations[0]) + expectedPolicies := expectedOrgConfigGroup.Policies + + applicationOrg1 := c.Application().Organization("Org1") + err := applicationOrg1.RemovePolicy("TestPolicy") + gt.Expect(err).NotTo(HaveOccurred()) + + actualOrg1Policies := applicationOrg1.orgGroup.Policies + gt.Expect(actualOrg1Policies).To(Equal(expectedPolicies)) +} + +func TestAppOrgRemoveApplicationOrgPolicyFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + applicationGroup := newConfigGroup() + + application, _ := baseApplication(t) + for _, org := range application.Organizations { + org.Policies = applicationOrgStandardPolicies() + orgGroup, err := newOrgConfigGroup(org) + gt.Expect(err).NotTo(HaveOccurred()) + applicationGroup.Groups[org.Name] = orgGroup + } + + applicationGroup.Groups["Org1"].Policies["TestPolicy"] = &cb.ConfigPolicy{ + Policy: &cb.Policy{ + Type: 15, + }, + } + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + err := c.Application().Organization("Org1").RemovePolicy("TestPolicy") + gt.Expect(err).To(MatchError("unknown policy type: 15")) +} + +func TestSetApplicationOrgModPolicy(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + applicationGroup := newConfigGroup() + application, _ := baseApplication(t) + + for _, org := range application.Organizations { + orgGroup, err := newOrgConfigGroup(org) + gt.Expect(err).NotTo(HaveOccurred()) + + applicationGroup.Groups[org.Name] = orgGroup + } + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + applicationOrg1 := c.Application().Organization("Org1") + err := applicationOrg1.SetModPolicy("TestModPolicy") + gt.Expect(err).NotTo(HaveOccurred()) + + actualModPolicy := applicationOrg1.orgGroup.GetModPolicy() + gt.Expect(actualModPolicy).To(Equal("TestModPolicy")) +} + +func TestSetApplicationOrgModPolicyFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + applicationGroup := newConfigGroup() + application, _ := baseApplication(t) + + for _, org := range application.Organizations { + orgGroup, err := newOrgConfigGroup(org) + gt.Expect(err).NotTo(HaveOccurred()) + + applicationGroup.Groups[org.Name] = orgGroup + } + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + err := c.Application().Organization("Org1").SetModPolicy("") + gt.Expect(err).To(MatchError("non empty mod policy is required")) +} + +func TestSetApplicationOrgPolicy(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + applicationGroup := newConfigGroup() + + application, _ := baseApplication(t) + + for _, org := range application.Organizations { + org.Policies = applicationOrgStandardPolicies() + + orgGroup, err := newOrgConfigGroup(org) + gt.Expect(err).NotTo(HaveOccurred()) + + applicationGroup.Groups[org.Name] = orgGroup + } + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + application.Organizations[0].Policies = applicationOrgStandardPolicies() + expectedOrgConfigGroup, _ := newOrgConfigGroup(application.Organizations[0]) + expectedPolicies := expectedOrgConfigGroup.Policies + + sp, err := policydsl.FromString("OR('Org1MSP.admin', 'Org1MSP.peer','Org1MSP.client')") + gt.Expect(err).NotTo(HaveOccurred()) + + signaturePolicy, err := proto.Marshal(sp) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedPolicies["TestPolicy"] = &cb.ConfigPolicy{ + ModPolicy: AdminsPolicyKey, + Policy: &cb.Policy{ + Type: int32(cb.Policy_SIGNATURE), + Value: signaturePolicy, + }, + } + + applicationOrg1 := c.Application().Organization("Org1") + err = applicationOrg1.SetPolicy("TestPolicy", Policy{ + Type: SignaturePolicyType, + Rule: "OR('Org1MSP.admin', 'Org1MSP.peer','Org1MSP.client')", + }) + gt.Expect(err).NotTo(HaveOccurred()) + + actualOrg1Policies := applicationOrg1.orgGroup.Policies + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(actualOrg1Policies).To(Equal(expectedPolicies)) +} + +func TestSetApplicationOrgPolicyFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + applicationGroup := newConfigGroup() + + application, _ := baseApplication(t) + for _, org := range application.Organizations { + org.Policies = applicationOrgStandardPolicies() + + orgGroup, err := newOrgConfigGroup(org) + gt.Expect(err).NotTo(HaveOccurred()) + + applicationGroup.Groups[org.Name] = orgGroup + } + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + err := c.Application().Organization("Org1").SetPolicy("TestPolicy", Policy{}) + gt.Expect(err).To(MatchError("failed to set policy 'TestPolicy': unknown policy type: ")) +} + +func TestSetApplicationOrgPolicies(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + applicationGroup := newConfigGroup() + + application, _ := baseApplication(t) + + for _, org := range application.Organizations { + org.Policies = applicationOrgStandardPolicies() + org.Policies["TestPolicy_Remove"] = org.Policies[EndorsementPolicyKey] + + orgGroup, err := newOrgConfigGroup(org) + gt.Expect(err).NotTo(HaveOccurred()) + + applicationGroup.Groups[org.Name] = orgGroup + } + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + applicationOrg1 := c.Application().Organization("Org1") + policies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + EndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + LifecycleEndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add1": { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add2": { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + err := applicationOrg1.SetPolicies(policies) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := applicationOrg1.Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(policies)) + + originalPolicies := c.original.ChannelGroup.Groups[ApplicationGroupKey].Groups["Org1"].Policies + gt.Expect(originalPolicies).To(Equal(applicationGroup.Groups["Org1"].Policies)) +} + +func TestSetApplicationOrgPoliciesFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + applicationGroup := newConfigGroup() + + application, _ := baseApplication(t) + for _, org := range application.Organizations { + org.Policies = applicationOrgStandardPolicies() + + orgGroup, err := newOrgConfigGroup(org) + gt.Expect(err).NotTo(HaveOccurred()) + + applicationGroup.Groups[org.Name] = orgGroup + } + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + policies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + EndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + LifecycleEndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + "TestPolicy": {}, + } + + err := c.Application().Organization("Org1").SetPolicies(policies) + gt.Expect(err).To(MatchError("failed to set policies: unknown policy type: ")) +} + +func TestSetApplicationModPolicy(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + application, _ := baseApplication(t) + + applicationGroup, err := newApplicationGroupTemplate(application) + gt.Expect(err).NotTo(HaveOccurred()) + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + a := c.Application() + err = a.SetModPolicy("TestModPolicy") + gt.Expect(err).NotTo(HaveOccurred()) + + updatedModPolicy := a.applicationGroup.GetModPolicy() + gt.Expect(updatedModPolicy).To(Equal("TestModPolicy")) +} + +func TestSetApplicationModPolicyFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + application, _ := baseApplication(t) + + applicationGroup, err := newApplicationGroupTemplate(application) + gt.Expect(err).NotTo(HaveOccurred()) + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + err = c.Application().SetModPolicy("") + gt.Expect(err).To(MatchError("non empty mod policy is required")) +} + +func TestSetApplicationPolicy(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + application, _ := baseApplication(t) + + applicationGroup, err := newApplicationGroupTemplate(application) + gt.Expect(err).NotTo(HaveOccurred()) + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + expectedPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy": { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + + a := c.Application() + err = a.SetPolicy("TestPolicy", Policy{Type: ImplicitMetaPolicyType, Rule: "MAJORITY Endorsement"}) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := a.Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(expectedPolicies)) +} + +func TestSetApplicationPolicyFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + application, _ := baseApplication(t) + + applicationGroup, err := newApplicationGroupTemplate(application) + gt.Expect(err).NotTo(HaveOccurred()) + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + expectedPolicies := application.Policies + expectedPolicies["TestPolicy"] = expectedPolicies[EndorsementPolicyKey] + + err = c.Application().SetPolicy("TestPolicy", Policy{}) + gt.Expect(err).To(MatchError("failed to set policy 'TestPolicy': unknown policy type: ")) +} + +func TestSetApplicationPolicies(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + application, _ := baseApplication(t) + application.Policies["TestPolicy_Remove"] = application.Policies[ReadersPolicyKey] + + applicationGroup, err := newApplicationGroupTemplate(application) + gt.Expect(err).NotTo(HaveOccurred()) + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + newPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add1": { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add2": { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + + a := c.Application() + err = a.SetPolicies(newPolicies) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := a.Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(newPolicies)) + + originalPolicies := c.original.ChannelGroup.Groups[ApplicationGroupKey].Policies + gt.Expect(originalPolicies).To(Equal(applicationGroup.Policies)) +} + +func TestSetApplicationPoliciesFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + application, _ := baseApplication(t) + + applicationGroup, err := newApplicationGroupTemplate(application) + gt.Expect(err).NotTo(HaveOccurred()) + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + newPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + "TestPolicy": {}, + } + + err = c.Application().SetPolicies(newPolicies) + gt.Expect(err).To(MatchError("failed to set policies: unknown policy type: ")) +} + +func TestAppOrgRemoveApplicationPolicy(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + application, _ := baseApplication(t) + + applicationGroup, err := newApplicationGroupTemplate(application) + gt.Expect(err).NotTo(HaveOccurred()) + applicationGroup.Policies["TestPolicy"] = applicationGroup.Policies[AdminsPolicyKey] + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + expectedPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + } + + a := c.Application() + err = a.RemovePolicy("TestPolicy") + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := a.Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(expectedPolicies)) +} + +func TestAppOrgRemoveApplicationPolicyFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup := newConfigGroup() + application, _ := baseApplication(t) + + applicationGroup, err := newApplicationGroupTemplate(application) + gt.Expect(err).NotTo(HaveOccurred()) + + applicationGroup.Policies[EndorsementPolicyKey] = &cb.ConfigPolicy{ + Policy: &cb.Policy{ + Type: 15, + }, + } + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + err = c.Application().RemovePolicy("TestPolicy") + gt.Expect(err).To(MatchError("unknown policy type: 15")) +} + +func TestApplicationMSP(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + application, _ := baseApplication(t) + applicationGroup, err := newApplicationGroup(application) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: applicationGroup, + }, + }, + } + + c := New(config) + + msp, err := c.Application().Organization("Org1").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp).To(Equal(application.Organizations[0].MSP)) +} + +func TestSetApplicationMSPFailure(t *testing.T) { + t.Parallel() + + tests := []struct { + spec string + mspMod func(MSP) MSP + orgName string + expectedErr string + }{ + { + spec: "updating msp name", + mspMod: func(msp MSP) MSP { + msp.Name = "thiscantbegood" + return msp + }, + orgName: "Org1", + expectedErr: "MSP name cannot be changed", + }, + { + spec: "invalid root ca cert keyusage", + mspMod: func(msp MSP) MSP { + msp.RootCerts = []*x509.Certificate{ + { + SerialNumber: big.NewInt(7), + KeyUsage: x509.KeyUsageKeyAgreement, + }, + } + return msp + }, + orgName: "Org1", + expectedErr: "invalid root cert: KeyUsage must be x509.KeyUsageCertSign. serial number: 7", + }, + { + spec: "root ca cert is not a ca", + mspMod: func(msp MSP) MSP { + msp.RootCerts = []*x509.Certificate{ + { + SerialNumber: big.NewInt(7), + KeyUsage: x509.KeyUsageCertSign, + IsCA: false, + }, + } + return msp + }, + orgName: "Org1", + expectedErr: "invalid root cert: must be a CA certificate. serial number: 7", + }, + { + spec: "invalid intermediate ca keyusage", + mspMod: func(msp MSP) MSP { + msp.IntermediateCerts = []*x509.Certificate{ + { + SerialNumber: big.NewInt(7), + KeyUsage: x509.KeyUsageKeyAgreement, + }, + } + return msp + }, + orgName: "Org1", + expectedErr: "invalid intermediate cert: KeyUsage must be x509.KeyUsageCertSign. serial number: 7", + }, + { + spec: "invalid intermediate cert -- not signed by root cert", + mspMod: func(msp MSP) MSP { + cert, _ := generateCACertAndPrivateKey(t, "org1.example.com") + cert.SerialNumber = big.NewInt(7) + msp.IntermediateCerts = []*x509.Certificate{cert} + return msp + }, + orgName: "Org1", + expectedErr: "intermediate cert not signed by any root certs of this MSP. serial number: 7", + }, + { + spec: "tls root ca cert is not a ca", + mspMod: func(msp MSP) MSP { + msp.TLSRootCerts = []*x509.Certificate{ + { + SerialNumber: big.NewInt(7), + KeyUsage: x509.KeyUsageCertSign, + IsCA: false, + }, + } + return msp + }, + orgName: "Org1", + expectedErr: "invalid tls root cert: must be a CA certificate. serial number: 7", + }, + { + spec: "tls intemediate ca cert is not a ca", + mspMod: func(msp MSP) MSP { + msp.TLSIntermediateCerts = []*x509.Certificate{ + { + SerialNumber: big.NewInt(7), + KeyUsage: x509.KeyUsageCertSign, + IsCA: false, + }, + } + return msp + }, + orgName: "Org1", + expectedErr: "invalid tls intermediate cert: must be a CA certificate. serial number: 7", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.spec, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + channelGroup, _, err := baseApplicationChannelGroup(t) + gt.Expect(err).ToNot(HaveOccurred()) + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + org1MSP, err := c.Application().Organization("Org1").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + + org1MSP = tc.mspMod(org1MSP) + err = c.Application().Organization(tc.orgName).SetMSP(org1MSP) + gt.Expect(err).To(MatchError(tc.expectedErr)) + }) + } +} + +func TestSetApplicationMSP(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channelGroup, privateKeys, err := baseApplicationChannelGroup(t) + gt.Expect(err).ToNot(HaveOccurred()) + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + org1MSP, err := c.Application().Organization("Org1").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + org2MSP, err := c.Application().Organization("Org2").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + org1CertBase64, org1CRLBase64 := certCRLBase64(t, org1MSP) + org2CertBase64, org2CRLBase64 := certCRLBase64(t, org2MSP) + + newRootCert, newRootPrivKey := generateCACertAndPrivateKey(t, "anotherca-org1.example.com") + newRootCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(newRootCert)) + org1MSP.RootCerts = append(org1MSP.RootCerts, newRootCert) + + newIntermediateCert, _ := generateIntermediateCACertAndPrivateKey(t, "anotherca-org1.example.com", newRootCert, newRootPrivKey) + newIntermediateCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(newIntermediateCert)) + org1MSP.IntermediateCerts = append(org1MSP.IntermediateCerts, newIntermediateCert) + + cert := org1MSP.RootCerts[0] + privKey := privateKeys[0] + certToRevoke, _ := generateCertAndPrivateKeyFromCACert(t, "org1.example.com", cert, privKey) + signingIdentity := &SigningIdentity{ + Certificate: cert, + PrivateKey: privKey, + MSPID: "MSPID", + } + newCRL, err := org1MSP.CreateMSPCRL(signingIdentity, certToRevoke) + gt.Expect(err).NotTo(HaveOccurred()) + pemNewCRL, err := pemEncodeCRL(newCRL) + gt.Expect(err).NotTo(HaveOccurred()) + newCRLBase64 := base64.StdEncoding.EncodeToString(pemNewCRL) + org1MSP.RevocationList = append(org1MSP.RevocationList, newCRL) + + err = c.Application().Organization("Org1").SetMSP(org1MSP) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedConfigJSON := fmt.Sprintf(` +{ + "channel_group": { + "groups": { + "Application": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "LifecycleEndorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s", + "%[2]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[3]s", + "%[4]s" + ], + "root_certs": [ + "%[1]s", + "%[5]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "LifecycleEndorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[6]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[6]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[6]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[6]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[6]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[6]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[6]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[7]s" + ], + "root_certs": [ + "%[6]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[6]s" + ], + "tls_root_certs": [ + "%[6]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "ACLs": { + "mod_policy": "Admins", + "value": { + "acls": { + "acl1": { + "policy_ref": "hi" + } + } + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "sequence": "0" +} +`, org1CertBase64, newIntermediateCertBase64, org1CRLBase64, newCRLBase64, newRootCertBase64, org2CertBase64, org2CRLBase64) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, c.updated) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(MatchJSON(expectedConfigJSON)) +} + +func baseApplication(t *testing.T) (Application, []*ecdsa.PrivateKey) { + org1BaseMSP, org1PrivKey := baseMSP(t) + org2BaseMSP, org2PrivKey := baseMSP(t) + return Application{ + Policies: standardPolicies(), + Organizations: []Organization{ + { + Name: "Org1", + Policies: applicationOrgStandardPolicies(), + MSP: org1BaseMSP, + }, + { + Name: "Org2", + Policies: applicationOrgStandardPolicies(), + MSP: org2BaseMSP, + }, + }, + Capabilities: []string{ + "V1_3", + }, + ACLs: map[string]string{ + "acl1": "hi", + }, + ModPolicy: AdminsPolicyKey, + }, []*ecdsa.PrivateKey{org1PrivKey, org2PrivKey} +} diff --git a/v2/configtx/capabilities.go b/v2/configtx/capabilities.go new file mode 100644 index 0000000..c3fbe3d --- /dev/null +++ b/v2/configtx/capabilities.go @@ -0,0 +1,93 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "errors" + "fmt" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + "google.golang.org/protobuf/proto" +) + +// capabilitiesValue returns the config definition for a set of capabilities. +// It is a value for the /Channel/Orderer, Channel/Application/, and /Channel groups. +func capabilitiesValue(capabilities []string) *standardConfigValue { + c := &cb.Capabilities{ + Capabilities: make(map[string]*cb.Capability), + } + + for _, capability := range capabilities { + c.Capabilities[capability] = &cb.Capability{} + } + + return &standardConfigValue{ + key: CapabilitiesKey, + value: c, + } +} + +func addCapability(configGroup *cb.ConfigGroup, capabilities []string, modPolicy string, capability string) error { + for _, c := range capabilities { + if c == capability { + // if capability already exist, do nothing. + return nil + } + } + capabilities = append(capabilities, capability) + + err := setValue(configGroup, capabilitiesValue(capabilities), modPolicy) + if err != nil { + return fmt.Errorf("adding capability: %v", err) + } + + return nil +} + +func removeCapability(configGroup *cb.ConfigGroup, capabilities []string, modPolicy string, capability string) error { + var updatedCapabilities []string + + for _, c := range capabilities { + if c != capability { + updatedCapabilities = append(updatedCapabilities, c) + } + } + + if len(updatedCapabilities) == len(capabilities) { + return errors.New("capability not set") + } + + err := setValue(configGroup, capabilitiesValue(updatedCapabilities), modPolicy) + if err != nil { + return fmt.Errorf("removing capability: %v", err) + } + + return nil +} + +func getCapabilities(configGroup *cb.ConfigGroup) ([]string, error) { + capabilitiesValue, ok := configGroup.Values[CapabilitiesKey] + if !ok { + // no capabilities defined/enabled + return nil, nil + } + + capabilitiesProto := &cb.Capabilities{} + + err := proto.Unmarshal(capabilitiesValue.Value, capabilitiesProto) + if err != nil { + return nil, fmt.Errorf("unmarshaling capabilities: %v", err) + } + + capabilities := []string{} + + for capability := range capabilitiesProto.Capabilities { + capabilities = append(capabilities, capability) + } + + return capabilities, nil +} diff --git a/v2/configtx/channel.go b/v2/configtx/channel.go new file mode 100644 index 0000000..924aa54 --- /dev/null +++ b/v2/configtx/channel.go @@ -0,0 +1,170 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "errors" + "fmt" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" +) + +// ChannelGroup encapsulates the parts of the config that control channels. +// This type implements retrieval of the various channel config values. +type ChannelGroup struct { + channelGroup *cb.ConfigGroup +} + +// Channel returns the channel group from the updated config. +func (c *ConfigTx) Channel() *ChannelGroup { + return &ChannelGroup{channelGroup: c.updated.ChannelGroup} +} + +// Configuration returns a channel configuration value from a config transaction. +func (c *ChannelGroup) Configuration() (Channel, error) { + var ( + config Channel + err error + ) + + if _, ok := c.channelGroup.Values[ConsortiumKey]; ok { + consortiumProto := &cb.Consortium{} + err := unmarshalConfigValueAtKey(c.channelGroup, ConsortiumKey, consortiumProto) + if err != nil { + return Channel{}, err + } + config.Consortium = consortiumProto.Name + } + + if applicationGroup, ok := c.channelGroup.Groups[ApplicationGroupKey]; ok { + a := &ApplicationGroup{applicationGroup: applicationGroup} + config.Application, err = a.Configuration() + if err != nil { + return Channel{}, err + } + } + + if ordererGroup, ok := c.channelGroup.Groups[OrdererGroupKey]; ok { + o := &OrdererGroup{ordererGroup: ordererGroup, channelGroup: c.channelGroup} + config.Orderer, err = o.Configuration() + if err != nil { + return Channel{}, err + } + } + + if consortiumsGroup, ok := c.channelGroup.Groups[ConsortiumsGroupKey]; ok { + c := &ConsortiumsGroup{consortiumsGroup: consortiumsGroup} + config.Consortiums, err = c.Configuration() + if err != nil { + return Channel{}, err + } + } + + if _, ok := c.channelGroup.Values[CapabilitiesKey]; ok { + config.Capabilities, err = c.Capabilities() + if err != nil { + return Channel{}, err + } + } + + config.Policies, err = c.Policies() + if err != nil { + return Channel{}, err + } + + return config, nil +} + +// Policies returns a map of policies for channel configuration. +func (c *ChannelGroup) Policies() (map[string]Policy, error) { + return getPolicies(c.channelGroup.Policies) +} + +// SetModPolicy sets the specified modification policy for the channel group. +func (c *ChannelGroup) SetModPolicy(modPolicy string) error { + if modPolicy == "" { + return errors.New("non empty mod policy is required") + } + + c.channelGroup.ModPolicy = modPolicy + + return nil +} + +// SetPolicy sets the specified policy in the channel group's config policy map. +// If the policy already exists in current configuration, its value will be overwritten. +func (c *ChannelGroup) SetPolicy(policyName string, policy Policy) error { + return setPolicy(c.channelGroup, policyName, policy) +} + +// SetPolicies sets the specified policies in the channel group's config policy map. +// If the policies already exist in current configuration, the values will be replaced with new policies. +func (c *ChannelGroup) SetPolicies(policies map[string]Policy) error { + return setPolicies(c.channelGroup, policies) +} + +// RemovePolicy removes an existing channel level policy. +func (c *ChannelGroup) RemovePolicy(policyName string) error { + policies, err := c.Policies() + if err != nil { + return err + } + + removePolicy(c.channelGroup, policyName, policies) + return nil +} + +// Capabilities returns a map of enabled channel capabilities +// from a config transaction's updated config. +func (c *ChannelGroup) Capabilities() ([]string, error) { + capabilities, err := getCapabilities(c.channelGroup) + if err != nil { + return nil, fmt.Errorf("retrieving channel capabilities: %v", err) + } + + return capabilities, nil +} + +// AddCapability adds capability to the provided channel config. +// If the provided capability already exists in current configuration, this action +// will be a no-op. +func (c *ChannelGroup) AddCapability(capability string) error { + capabilities, err := c.Capabilities() + if err != nil { + return err + } + + err = addCapability(c.channelGroup, capabilities, AdminsPolicyKey, capability) + if err != nil { + return err + } + + return nil +} + +// RemoveCapability removes capability to the provided channel config. +func (c *ChannelGroup) RemoveCapability(capability string) error { + capabilities, err := c.Capabilities() + if err != nil { + return err + } + + err = removeCapability(c.channelGroup, capabilities, AdminsPolicyKey, capability) + if err != nil { + return err + } + + return nil +} + +// RemoveLegacyOrdererAddresses removes the deprecated top level orderer addresses config key and value +// from the channel config. +// In fabric 1.4, top level orderer addresses were migrated to the org level orderer endpoints +// While top-level orderer addresses are still supported, the organization value is preferred. +func (c *ChannelGroup) RemoveLegacyOrdererAddresses() { + delete(c.channelGroup.Values, OrdererAddressesKey) +} diff --git a/v2/configtx/channel_test.go b/v2/configtx/channel_test.go new file mode 100644 index 0000000..e90f7af --- /dev/null +++ b/v2/configtx/channel_test.go @@ -0,0 +1,595 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "bytes" + "testing" + + "github.com/hyperledger/fabric-config/v2/protolator" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/commonext" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + . "github.com/onsi/gomega" +) + +func TestChannelCapabilities(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + expectedCapabilities := []string{"V1_3"} + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{}, + }, + } + + err := setValue(config.ChannelGroup, capabilitiesValue(expectedCapabilities), AdminsPolicyKey) + gt.Expect(err).NotTo(HaveOccurred()) + + c := New(config) + + channelCapabilities, err := c.Channel().Capabilities() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(channelCapabilities).To(Equal(expectedCapabilities)) + + // Delete the capabilities key and assert retrieval to return nil + delete(c.Channel().channelGroup.Values, CapabilitiesKey) + channelCapabilities, err = c.Channel().Capabilities() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(channelCapabilities).To(BeNil()) +} + +func TestSetChannelCapability(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{ + CapabilitiesKey: {}, + }, + }, + } + + c := New(config) + + expectedConfigGroupJSON := `{ + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": { + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V3_0": {} + } + }, + "version": "0" + } + }, + "version": "0" +} +` + + err := c.Channel().AddCapability("V3_0") + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &commonext.DynamicChannelGroup{ConfigGroup: c.Channel().channelGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) +} + +func TestSetChannelCapabilityFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + capability string + config *cb.Config + expectedErr string + }{ + { + testName: "when retrieving existing capabilities", + capability: "V2_0", + config: &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{ + CapabilitiesKey: { + Value: []byte("foobar"), + }, + }, + }, + }, + expectedErr: "retrieving channel capabilities: unmarshaling capabilities: proto", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + c := New(tt.config) + + err := c.Channel().AddCapability(tt.capability) + gt.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) + }) + } +} + +func TestRemoveChannelCapability(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{ + CapabilitiesKey: { + Value: marshalOrPanic(&cb.Capabilities{Capabilities: map[string]*cb.Capability{ + "V3_0": {}, + }}), + ModPolicy: AdminsPolicyKey, + }, + }, + }, + } + + c := New(config) + + expectedConfigGroupJSON := `{ + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": { + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": {} + }, + "version": "0" + } + }, + "version": "0" +} +` + + err := c.Channel().RemoveCapability("V3_0") + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &commonext.DynamicChannelGroup{ConfigGroup: c.Channel().channelGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) +} + +func TestRemoveChannelCapabilityFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + capability string + config *cb.Config + expectedErr string + }{ + { + testName: "when capability does not exist", + capability: "V2_0", + config: &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{ + CapabilitiesKey: { + ModPolicy: AdminsPolicyKey, + }, + }, + }, + }, + expectedErr: "capability not set", + }, + { + testName: "when retrieving existing capabilities", + capability: "V2_0", + config: &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{ + CapabilitiesKey: { + Value: []byte("foobar"), + }, + }, + }, + }, + expectedErr: "retrieving channel capabilities: unmarshaling capabilities: proto", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + c := New(tt.config) + + err := c.Channel().RemoveCapability(tt.capability) + gt.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) + }) + } +} + +func TestSetChannelModPolicy(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel, _, err := baseApplicationChannelGroup(t) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: channel, + } + c := New(config) + + err = c.Channel().SetModPolicy("TestModPolicy") + gt.Expect(err).NotTo(HaveOccurred()) + + updatedChannelModPolicy := c.Channel().channelGroup.GetModPolicy() + + gt.Expect(updatedChannelModPolicy).To(Equal("TestModPolicy")) +} + +func TestSetChannelModPolicFailure(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel, _, err := baseApplicationChannelGroup(t) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: channel, + } + c := New(config) + + err = c.Channel().SetModPolicy("") + gt.Expect(err).To(MatchError("non empty mod policy is required")) +} + +func TestSetChannelPolicy(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel, _, err := baseApplicationChannelGroup(t) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: channel, + } + c := New(config) + + expectedPolicies := map[string]Policy{ + "TestPolicy": {Type: ImplicitMetaPolicyType, Rule: "ANY Readers", ModPolicy: AdminsPolicyKey}, + } + + err = c.Channel().SetPolicy("TestPolicy", Policy{Type: ImplicitMetaPolicyType, Rule: "ANY Readers"}) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedChannelPolicy, err := getPolicies(c.updated.ChannelGroup.Policies) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedChannelPolicy).To(Equal(expectedPolicies)) + + baseChannel := c.original.ChannelGroup + gt.Expect(baseChannel.Policies).To(HaveLen(0)) + gt.Expect(baseChannel.Policies["TestPolicy"]).To(BeNil()) +} + +func TestSetChannelPolicies(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel, _, err := baseApplicationChannelGroup(t) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: channel, + } + basePolicies := standardPolicies() + basePolicies["TestPolicy_Remove"] = Policy{Type: ImplicitMetaPolicyType, Rule: "ANY Readers"} + err = setPolicies(channel, basePolicies) + gt.Expect(err).NotTo(HaveOccurred()) + + c := New(config) + + newPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Admins", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add1": { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add2": { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + } + + err = c.Channel().SetPolicies(newPolicies) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedChannelPolicies, err := c.Channel().Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedChannelPolicies).To(Equal(newPolicies)) + + originalChannel := c.original.ChannelGroup + gt.Expect(originalChannel.Policies).To(Equal(config.ChannelGroup.Policies)) +} + +func TestSetChannelPoliciesFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel, _, err := baseApplicationChannelGroup(t) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: channel, + } + c := New(config) + + newPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + "TestPolicy": {}, + } + + err = c.Channel().SetPolicies(newPolicies) + gt.Expect(err).To(MatchError("unknown policy type: ")) +} + +func TestRemoveChannelPolicy(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel, _, err := baseApplicationChannelGroup(t) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: channel, + } + policies := standardPolicies() + err = setPolicies(channel, policies) + gt.Expect(err).NotTo(HaveOccurred()) + c := New(config) + + expectedPolicies := map[string]Policy{ + "Admins": { + Type: "ImplicitMeta", + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + "Writers": { + Type: "ImplicitMeta", + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + } + + err = c.Channel().RemovePolicy(ReadersPolicyKey) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedChannelPolicy, err := c.Channel().Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedChannelPolicy).To(Equal(expectedPolicies)) + + originalChannel := c.original.ChannelGroup + gt.Expect(originalChannel.Policies).To(HaveLen(3)) + gt.Expect(originalChannel.Policies[ReadersPolicyKey]).ToNot(BeNil()) +} + +func TestRemoveChannelPolicyFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel, _, err := baseApplicationChannelGroup(t) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: channel, + } + policies := standardPolicies() + err = setPolicies(channel, policies) + gt.Expect(err).NotTo(HaveOccurred()) + channel.Policies[ReadersPolicyKey] = &cb.ConfigPolicy{ + Policy: &cb.Policy{ + Type: 15, + }, + } + c := New(config) + + err = c.Channel().RemovePolicy(ReadersPolicyKey) + gt.Expect(err).To(MatchError("unknown policy type: 15")) +} + +func TestRemoveLegacyOrdererAddresses(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{ + OrdererAddressesKey: { + ModPolicy: AdminsPolicyKey, + Value: marshalOrPanic(&cb.OrdererAddresses{ + Addresses: []string{"127.0.0.1:8050"}, + }), + }, + }, + }, + } + + c := New(config) + + c.Channel().RemoveLegacyOrdererAddresses() + + _, exists := c.Channel().channelGroup.Values[OrdererAddressesKey] + gt.Expect(exists).To(BeFalse()) +} + +func TestConfigurationFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + config *cb.Config + expectedErr string + }{ + { + testName: "when retrieving existing Consortium", + config: &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{ + ConsortiumKey: { + Value: []byte("foobar"), + }, + }, + }, + }, + expectedErr: "unmarshaling Consortium: proto:", + }, + { + testName: "when retrieving existing orderer group", + config: &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: {}, + }, + }, + }, + expectedErr: "cannot determine consensus type of orderer", + }, + { + testName: "when retrieving existing application group", + config: &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: { + Groups: map[string]*cb.ConfigGroup{ + "Org1": { + Values: map[string]*cb.ConfigValue{ + "foobar": { + Value: []byte("foobar"), + }, + }, + }, + }, + }, + }, + }, + }, + expectedErr: "retrieving application org Org1: config does not contain value for MSP", + }, + { + testName: "when retrieving existing consortiums group", + config: &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: { + Groups: map[string]*cb.ConfigGroup{ + "Consortium1": { + Groups: map[string]*cb.ConfigGroup{ + "Org1": { + Values: map[string]*cb.ConfigValue{ + "foobar": { + Value: []byte("foobar"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + expectedErr: "failed to retrieve organization Org1 from consortium Consortium1: ", + }, + { + testName: "when retrieving existing policies", + config: &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Policies: map[string]*cb.ConfigPolicy{ + AdminsPolicyKey: { + Policy: &cb.Policy{ + Value: []byte("foobar"), + }, + }, + }, + }, + }, + expectedErr: "unknown policy type: 0", + }, + { + testName: "when retrieving existing capabilities", + config: &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{ + CapabilitiesKey: { + Value: []byte("foobar"), + }, + }, + }, + }, + expectedErr: "retrieving channel capabilities: unmarshaling capabilities: proto", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + c := New(tt.config) + + _, err := c.Channel().Configuration() + gt.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) + }) + } +} diff --git a/v2/configtx/config.go b/v2/configtx/config.go new file mode 100644 index 0000000..3eea6f0 --- /dev/null +++ b/v2/configtx/config.go @@ -0,0 +1,638 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package configtx provides utilities to create and modify a channel configuration transaction. +// Channel transactions contain the configuration data defining members and policies for a +// system or application channel and can be used to either create or modify existing channels. +// Both the creation of a new channel or modification of an existing channel outputs an unsigned +// transaction represented in a protobuf binary format that must be signed by the requisite number +// of members such that the transaction fulfills the channel's modification policy. +// +// See https://hyperledger-fabric.readthedocs.io/en/master/configtx.html#anatomy-of-a-configuration +// for an in-depth description of channel configuration's anatomy. +package configtx + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "strconv" + "strings" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// Channel is a channel configuration. +type Channel struct { + Consortium string + Application Application + Orderer Orderer + Consortiums []Consortium + Capabilities []string + Policies map[string]Policy + ModPolicy string +} + +// Policy is an expression used to define rules for access to channels, chaincodes, etc. +type Policy struct { + Type string + Rule string + ModPolicy string +} + +// Organization is an organization in the channel configuration. +type Organization struct { + Name string + Policies map[string]Policy + MSP MSP + + // AnchorPeers contains the endpoints of anchor peers for each + // application organization. + AnchorPeers []Address + OrdererEndpoints []string + ModPolicy string +} + +// Address contains the hostname and port for an endpoint. +type Address struct { + Host string + Port int +} + +type standardConfigValue struct { + key string + value proto.Message +} + +type standardConfigPolicy struct { + key string + value *cb.Policy +} + +// ConfigTx wraps a config transaction. +type ConfigTx struct { + // original state of the config + original *cb.Config + // modified state of the config + updated *cb.Config +} + +// New creates a new ConfigTx from a Config protobuf. +// New will panic if given an empty config. +func New(config *cb.Config) ConfigTx { + return ConfigTx{ + original: config, + // Clone the base config for processing updates + updated: proto.Clone(config).(*cb.Config), + } +} + +// OriginalConfig returns the original unedited config. +func (c *ConfigTx) OriginalConfig() *cb.Config { + return c.original +} + +// UpdatedConfig returns the modified config. +func (c *ConfigTx) UpdatedConfig() *cb.Config { + return c.updated +} + +// ComputeMarshaledUpdate computes the ConfigUpdate from a base and modified +// config transaction and returns the marshaled bytes. +func (c *ConfigTx) ComputeMarshaledUpdate(channelID string) ([]byte, error) { + if channelID == "" { + return nil, errors.New("channel ID is required") + } + + update, err := computeConfigUpdate(c.original, c.updated) + if err != nil { + return nil, fmt.Errorf("failed to compute update: %v", err) + } + + update.ChannelId = channelID + + marshaledUpdate, err := proto.Marshal(update) + if err != nil { + return nil, fmt.Errorf("marshaling config update: %v", err) + } + + return marshaledUpdate, nil +} + +// NewEnvelope creates an envelope with the provided marshaled config update +// and config signatures. +func NewEnvelope(marshaledUpdate []byte, signatures ...*cb.ConfigSignature) (*cb.Envelope, error) { + configUpdateEnvelope := &cb.ConfigUpdateEnvelope{ + ConfigUpdate: marshaledUpdate, + Signatures: signatures, + } + + c := &cb.ConfigUpdate{} + err := proto.Unmarshal(marshaledUpdate, c) + if err != nil { + return nil, fmt.Errorf("unmarshaling config update: %v", err) + } + + envelope, err := newEnvelope(cb.HeaderType_CONFIG_UPDATE, c.ChannelId, configUpdateEnvelope) + if err != nil { + return nil, err + } + + return envelope, nil +} + +// NewMarshaledCreateChannelTx creates a create channel config update +// transaction using the provided application channel configuration and returns +// the marshaled bytes. +func NewMarshaledCreateChannelTx(channelConfig Channel, channelID string) ([]byte, error) { + if channelID == "" { + return nil, errors.New("profile's channel ID is required") + } + + ct, err := defaultConfigTemplate(channelConfig) + if err != nil { + return nil, fmt.Errorf("creating default config template: %v", err) + } + + update, err := newChannelCreateConfigUpdate(channelID, channelConfig, ct) + if err != nil { + return nil, fmt.Errorf("creating channel create config update: %v", err) + } + + marshaledUpdate, err := proto.Marshal(update) + if err != nil { + return nil, fmt.Errorf("marshaling config update: %v", err) + } + return marshaledUpdate, nil +} + +// NewSystemChannelGenesisBlock creates a genesis block using the provided +// consortiums and orderer configuration and returns a block. +func NewSystemChannelGenesisBlock(channelConfig Channel, channelID string) (*cb.Block, error) { + if channelID == "" { + return nil, errors.New("system channel ID is required") + } + + systemChannelGroup, err := newSystemChannelGroup(channelConfig) + if err != nil { + return nil, fmt.Errorf("creating system channel group: %v", err) + } + + block, err := newGenesisBlock(systemChannelGroup, channelID) + if err != nil { + return nil, fmt.Errorf("creating system channel genesis block: %v", err) + } + + return block, nil +} + +// NewApplicationChannelGenesisBlock creates a genesis block using the provided +// application and orderer configuration and returns a block. +func NewApplicationChannelGenesisBlock(channelConfig Channel, channelID string) (*cb.Block, error) { + if channelID == "" { + return nil, errors.New("application channel ID is required") + } + + applicationChannelGroup, err := newApplicationChannelGroup(channelConfig) + if err != nil { + return nil, fmt.Errorf("creating application channel group: %v", err) + } + + block, err := newGenesisBlock(applicationChannelGroup, channelID) + if err != nil { + return nil, fmt.Errorf("creating application channel genesis block: %v", err) + } + + return block, nil +} + +// newSystemChannelGroup defines the root of the system channel configuration. +func newSystemChannelGroup(channelConfig Channel) (*cb.ConfigGroup, error) { + channelGroup, err := newChannelGroupWithOrderer(channelConfig) + if err != nil { + return nil, err + } + + consortiumsGroup, err := newConsortiumsGroup(channelConfig.Consortiums) + if err != nil { + return nil, err + } + channelGroup.Groups[ConsortiumsGroupKey] = consortiumsGroup + + channelGroup.ModPolicy = AdminsPolicyKey + + if channelConfig.ModPolicy != "" { + channelGroup.ModPolicy = channelConfig.ModPolicy + } + + return channelGroup, nil +} + +// newApplicationChannelGroup defines the root of the application +// channel configuration. +func newApplicationChannelGroup(channelConfig Channel) (*cb.ConfigGroup, error) { + channelGroup, err := newChannelGroupWithOrderer(channelConfig) + if err != nil { + return nil, err + } + + applicationGroup, err := newApplicationGroup(channelConfig.Application) + if err != nil { + return nil, err + } + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + + channelGroup.ModPolicy = AdminsPolicyKey + + if channelConfig.ModPolicy != "" { + channelGroup.ModPolicy = channelConfig.ModPolicy + } + + return channelGroup, nil +} + +func newChannelGroupWithOrderer(channelConfig Channel) (*cb.ConfigGroup, error) { + channelGroup := newConfigGroup() + + err := setPolicies(channelGroup, channelConfig.Policies) + if err != nil { + return nil, fmt.Errorf("setting channel policies: %v", err) + } + + err = setValue(channelGroup, hashingAlgorithmValue(), AdminsPolicyKey) + if err != nil { + return nil, err + } + + err = setValue(channelGroup, blockDataHashingStructureValue(), AdminsPolicyKey) + if err != nil { + return nil, err + } + + if len(channelConfig.Capabilities) == 0 { + return nil, errors.New("capabilities is not defined in channel config") + } + + err = setValue(channelGroup, capabilitiesValue(channelConfig.Capabilities), AdminsPolicyKey) + if err != nil { + return nil, err + } + + ordererGroup, err := newOrdererGroup(channelConfig.Orderer) + if err != nil { + return nil, err + } + channelGroup.Groups[OrdererGroupKey] = ordererGroup + + return channelGroup, nil +} + +// newGenesisBlock generates a genesis block from the config group and +// channel ID. The block number is always zero. +func newGenesisBlock(cg *cb.ConfigGroup, channelID string) (*cb.Block, error) { + payloadChannelHeader := channelHeader(cb.HeaderType_CONFIG, msgVersion, channelID, epoch) + nonce, err := newNonce() + if err != nil { + return nil, fmt.Errorf("creating nonce: %v", err) + } + payloadSignatureHeader := &cb.SignatureHeader{Creator: nil, Nonce: nonce} + payloadChannelHeader.TxId = computeTxID(payloadSignatureHeader.Nonce, payloadSignatureHeader.Creator) + payloadHeader, err := payloadHeader(payloadChannelHeader, payloadSignatureHeader) + if err != nil { + return nil, fmt.Errorf("construct payload header: %v", err) + } + payloadData, err := proto.Marshal(&cb.ConfigEnvelope{Config: &cb.Config{ChannelGroup: cg}}) + if err != nil { + return nil, fmt.Errorf("marshaling payload data: %v", err) + } + payload := &cb.Payload{Header: payloadHeader, Data: payloadData} + envelopePayload, err := proto.Marshal(payload) + if err != nil { + return nil, fmt.Errorf("marshaling envelope payload: %v", err) + } + envelope := &cb.Envelope{Payload: envelopePayload, Signature: nil} + blockData, err := proto.Marshal(envelope) + if err != nil { + return nil, fmt.Errorf("marshaling envelope: %v", err) + } + + block := newBlock(0, nil) + block.Data = &cb.BlockData{Data: [][]byte{blockData}} + block.Header.DataHash = blockDataHash(block.Data) + + lastConfigValue, err := proto.Marshal(&cb.LastConfig{Index: 0}) + if err != nil { + return nil, fmt.Errorf("marshaling metadata last config value: %v", err) + } + lastConfigMetadata, err := proto.Marshal(&cb.Metadata{Value: lastConfigValue}) + if err != nil { + return nil, fmt.Errorf("marshaling metadata last config: %v", err) + } + block.Metadata.Metadata[cb.BlockMetadataIndex_LAST_CONFIG] = lastConfigMetadata + + signatureValue, err := proto.Marshal(&cb.OrdererBlockMetadata{ + LastConfig: &cb.LastConfig{Index: 0}, + }) + if err != nil { + return nil, fmt.Errorf("marshaling metadata signature value: %v", err) + } + signatureMetadata, err := proto.Marshal(&cb.Metadata{Value: signatureValue}) + if err != nil { + return nil, fmt.Errorf("marshaling metadata signature: %v", err) + } + block.Metadata.Metadata[cb.BlockMetadataIndex_SIGNATURES] = signatureMetadata + + return block, nil +} + +// setValue sets the value as ConfigValue in the ConfigGroup. +func setValue(cg *cb.ConfigGroup, value *standardConfigValue, modPolicy string) error { + v, err := proto.Marshal(value.value) + if err != nil { + return fmt.Errorf("marshaling standard config value '%s': %v", value.key, err) + } + + if cg.Values == nil { + cg.Values = map[string]*cb.ConfigValue{} + } + + cg.Values[value.key] = &cb.ConfigValue{ + Value: v, + ModPolicy: modPolicy, + } + + return nil +} + +// implicitMetaFromString parses a *cb.ImplicitMetaPolicy from an input string. +func implicitMetaFromString(input string) (*cb.ImplicitMetaPolicy, error) { + args := strings.Split(input, " ") + if len(args) != 2 { + return nil, fmt.Errorf("expected two space separated tokens, but got %d", len(args)) + } + + res := &cb.ImplicitMetaPolicy{ + SubPolicy: args[1], + } + + switch args[0] { + case cb.ImplicitMetaPolicy_ANY.String(): + res.Rule = cb.ImplicitMetaPolicy_ANY + case cb.ImplicitMetaPolicy_ALL.String(): + res.Rule = cb.ImplicitMetaPolicy_ALL + case cb.ImplicitMetaPolicy_MAJORITY.String(): + res.Rule = cb.ImplicitMetaPolicy_MAJORITY + default: + return nil, fmt.Errorf("unknown rule type '%s', expected ALL, ANY, or MAJORITY", args[0]) + } + + return res, nil +} + +// mspValue returns the config definition for an MSP. +// It is a value for the /Channel/Orderer/*, /Channel/Application/*, and /Channel/Consortiums/*/*/* groups. +func mspValue(mspDef *mb.MSPConfig) *standardConfigValue { + return &standardConfigValue{ + key: MSPKey, + value: mspDef, + } +} + +// defaultConfigTemplate generates a config template based on the assumption that +// the input profile is a channel creation template and no system channel context +// is available. +func defaultConfigTemplate(channelConfig Channel) (*cb.ConfigGroup, error) { + channelGroup, err := newChannelGroup(channelConfig) + if err != nil { + return nil, err + } + + if _, ok := channelGroup.Groups[ApplicationGroupKey]; !ok { + return nil, errors.New("channel template config must contain an application section") + } + + channelGroup.Groups[ApplicationGroupKey].Values = nil + channelGroup.Groups[ApplicationGroupKey].Policies = nil + + return channelGroup, nil +} + +// newChannelGroup defines the root of the channel configuration. +func newChannelGroup(channelConfig Channel) (*cb.ConfigGroup, error) { + channelGroup := newConfigGroup() + + if channelConfig.Consortium == "" { + return nil, errors.New("consortium is not defined in channel config") + } + + err := setValue(channelGroup, consortiumValue(channelConfig.Consortium), "") + if err != nil { + return nil, err + } + + channelGroup.Groups[ApplicationGroupKey], err = newApplicationGroupTemplate(channelConfig.Application) + if err != nil { + return nil, fmt.Errorf("failed to create application group: %v", err) + } + + channelGroup.ModPolicy = AdminsPolicyKey + + if channelConfig.ModPolicy != "" { + channelGroup.ModPolicy = channelConfig.ModPolicy + } + + return channelGroup, nil +} + +// newChannelCreateConfigUpdate generates a ConfigUpdate which can be sent to the orderer to create a new channel. +// Optionally, the channel group of the ordering system channel may be passed in, and the resulting ConfigUpdate +// will extract the appropriate versions from this file. +func newChannelCreateConfigUpdate(channelID string, channelConfig Channel, templateConfig *cb.ConfigGroup) (*cb.ConfigUpdate, error) { + newChannelGroup, err := newChannelGroup(channelConfig) + if err != nil { + return nil, err + } + + updt, err := computeConfigUpdate(&cb.Config{ChannelGroup: templateConfig}, &cb.Config{ChannelGroup: newChannelGroup}) + if err != nil { + return nil, fmt.Errorf("computing update: %v", err) + } + + wsValue, err := proto.Marshal(&cb.Consortium{ + Name: channelConfig.Consortium, + }) + if err != nil { + return nil, fmt.Errorf("marshaling consortium: %v", err) + } + + // Add the consortium name to create the channel for into the write set as required + updt.ChannelId = channelID + updt.ReadSet.Values[ConsortiumKey] = &cb.ConfigValue{Version: 0} + updt.WriteSet.Values[ConsortiumKey] = &cb.ConfigValue{ + Version: 0, + Value: wsValue, + } + + return updt, nil +} + +// newConfigGroup creates an empty *cb.ConfigGroup. +func newConfigGroup() *cb.ConfigGroup { + return &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{}, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + } +} + +// newEnvelope creates an unsigned envelope of the desired type containing +// a payload Header and the marshaled proto message as the payload Data. +func newEnvelope( + txType cb.HeaderType, + channelID string, + dataMsg proto.Message, +) (*cb.Envelope, error) { + payloadChannelHeader := channelHeader(txType, msgVersion, channelID, epoch) + payloadSignatureHeader := &cb.SignatureHeader{} + + data, err := proto.Marshal(dataMsg) + if err != nil { + return nil, fmt.Errorf("marshaling envelope data: %v", err) + } + + payloadHeader, err := payloadHeader(payloadChannelHeader, payloadSignatureHeader) + if err != nil { + return nil, fmt.Errorf("making payload header: %v", err) + } + + paylBytes, err := proto.Marshal( + &cb.Payload{ + Header: payloadHeader, + Data: data, + }, + ) + if err != nil { + return nil, fmt.Errorf("marshaling payload: %v", err) + } + + env := &cb.Envelope{ + Payload: paylBytes, + } + + return env, nil +} + +// channelHeader creates a ChannelHeader. +func channelHeader(headerType cb.HeaderType, version int32, channelID string, epoch uint64) *cb.ChannelHeader { + return &cb.ChannelHeader{ + Type: int32(headerType), + Version: version, + Timestamp: timestamppb.Now(), + ChannelId: channelID, + Epoch: epoch, + } +} + +// payloadHeader creates a Payload Header. +func payloadHeader(ch *cb.ChannelHeader, sh *cb.SignatureHeader) (*cb.Header, error) { + channelHeader, err := proto.Marshal(ch) + if err != nil { + return nil, fmt.Errorf("marshaling channel header: %v", err) + } + + signatureHeader, err := proto.Marshal(sh) + if err != nil { + return nil, fmt.Errorf("marshaling signature header: %v", err) + } + + return &cb.Header{ + ChannelHeader: channelHeader, + SignatureHeader: signatureHeader, + }, nil +} + +// concatenateBytes combines multiple arrays of bytes, for signatures or digests +// over multiple fields. +func concatenateBytes(data ...[]byte) []byte { + res := []byte{} + for i := range data { + res = append(res, data[i]...) + } + + return res +} + +// unmarshalConfigValueAtKey unmarshals the value for the specified key in a config group +// into the designated proto message. +func unmarshalConfigValueAtKey(group *cb.ConfigGroup, key string, msg proto.Message) error { + valueAtKey, ok := group.Values[key] + if !ok { + return fmt.Errorf("config does not contain value for %s", key) + } + + err := proto.Unmarshal(valueAtKey.Value, msg) + if err != nil { + return fmt.Errorf("unmarshaling %s: %v", key, err) + } + + return nil +} + +func parseAddress(address string) (string, int, error) { + hostport := strings.Split(address, ":") + if len(hostport) != 2 { + return "", 0, fmt.Errorf("unable to parse host and port from %s", address) + } + + host := hostport[0] + port := hostport[1] + + portNum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + + return host, portNum, nil +} + +// newBlock constructs a block with no data and no metadata. +func newBlock(seqNum uint64, previousHash []byte) *cb.Block { + block := &cb.Block{} + block.Header = &cb.BlockHeader{} + block.Header.Number = seqNum + block.Header.PreviousHash = previousHash + block.Header.DataHash = []byte{} + block.Data = &cb.BlockData{} + + var metadataContents [][]byte + for i := 0; i < len(cb.BlockMetadataIndex_name); i++ { + metadataContents = append(metadataContents, []byte{}) + } + block.Metadata = &cb.BlockMetadata{Metadata: metadataContents} + + return block +} + +// computeTxID computes TxID as the Hash computed +// over the concatenation of nonce and creator. +func computeTxID(nonce, creator []byte) string { + hasher := sha256.New() + hasher.Write(nonce) + hasher.Write(creator) + return hex.EncodeToString(hasher.Sum(nil)) +} + +// blockDataHash computes block data as the Hash +func blockDataHash(b *cb.BlockData) []byte { + sum := sha256.Sum256(bytes.Join(b.Data, nil)) + return sum[:] +} diff --git a/v2/configtx/config_test.go b/v2/configtx/config_test.go new file mode 100644 index 0000000..408b8ee --- /dev/null +++ b/v2/configtx/config_test.go @@ -0,0 +1,2251 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "bytes" + "crypto/ecdsa" + "errors" + "fmt" + "testing" + + "github.com/hyperledger/fabric-config/v2/configtx/orderer" + "github.com/hyperledger/fabric-config/v2/protolator" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func TestNewConfigTx(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + channel, _, err := baseApplicationChannelGroup(t) + gt.Expect(err).NotTo(HaveOccurred()) + + original := &cb.Config{ + ChannelGroup: channel, + } + + c := New(original) + gt.Expect(proto.Equal(c.OriginalConfig(), original)).To(BeTrue()) + gt.Expect(proto.Equal(c.UpdatedConfig(), original)).To(BeTrue()) + + err = c.Application().AddCapability("fake-capability") + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(proto.Equal(c.OriginalConfig(), original)).To(BeTrue()) + gt.Expect(proto.Equal(c.UpdatedConfig(), original)).To(BeFalse()) +} + +func TestNewCreateChannelTx(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + // The TwoOrgsChannel profile is defined in standard_networks.go under the BasicSolo configuration + // configtxgen -profile TwoOrgsChannel -channelID testChannel + expectedEnvelopeJSON := `{ + "payload": { + "data": { + "config_update": { + "channel_id": "testchannel", + "isolated_data": {}, + "read_set": { + "groups": { + "Application": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": { + "Consortium": { + "mod_policy": "", + "value": null, + "version": "0" + } + }, + "version": "0" + }, + "write_set": { + "groups": { + "Application": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ACLs": { + "mod_policy": "Admins", + "value": { + "acls": { + "acl1": { + "policy_ref": "hi" + } + } + }, + "version": "0" + } + }, + "version": "1" + } + }, + "mod_policy": "", + "policies": {}, + "values": { + "Consortium": { + "mod_policy": "", + "value": { + "name": "SampleConsortium" + }, + "version": "0" + } + }, + "version": "0" + } + }, + "signatures": [] + }, + "header": { + "channel_header": { + "channel_id": "testchannel", + "epoch": "0", + "extension": null, + "timestamp": "2020-02-17T15:49:56Z", + "tls_cert_hash": null, + "tx_id": "", + "type": 2, + "version": 0 + }, + "signature_header": null + } + }, + "signature": null + }` + + profile := baseProfile(t) + + // creating a create channel transaction + marshaledCreateChannelTx, err := NewMarshaledCreateChannelTx(profile, "testchannel") + gt.Expect(err).NotTo(HaveOccurred()) + envelope, err := NewEnvelope(marshaledCreateChannelTx) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(envelope).ToNot(BeNil()) + + // Unmarshaling actual and expected envelope to set + // the expected timestamp to the actual timestamp + expectedEnvelope := &cb.Envelope{} + err = protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectedEnvelopeJSON), expectedEnvelope) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedPayload := &cb.Payload{} + err = proto.Unmarshal(expectedEnvelope.Payload, expectedPayload) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedHeader := &cb.ChannelHeader{} + err = proto.Unmarshal(expectedPayload.Header.ChannelHeader, expectedHeader) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedData := &cb.ConfigUpdateEnvelope{} + err = proto.Unmarshal(expectedPayload.Data, expectedData) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedConfigUpdate := &cb.ConfigUpdate{} + err = proto.Unmarshal(expectedData.ConfigUpdate, expectedConfigUpdate) + gt.Expect(err).NotTo(HaveOccurred()) + + actualPayload := &cb.Payload{} + err = proto.Unmarshal(envelope.Payload, actualPayload) + gt.Expect(err).NotTo(HaveOccurred()) + + actualHeader := &cb.ChannelHeader{} + err = proto.Unmarshal(actualPayload.Header.ChannelHeader, actualHeader) + gt.Expect(err).NotTo(HaveOccurred()) + + actualData := &cb.ConfigUpdateEnvelope{} + err = proto.Unmarshal(actualPayload.Data, actualData) + gt.Expect(err).NotTo(HaveOccurred()) + + actualConfigUpdate := &cb.ConfigUpdate{} + err = proto.Unmarshal(actualData.ConfigUpdate, actualConfigUpdate) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(proto.Equal(actualConfigUpdate, expectedConfigUpdate)).To(BeTrue()) + + // setting timestamps to match in ConfigUpdate + actualTimestamp := actualHeader.Timestamp + + expectedHeader.Timestamp = actualTimestamp + + expectedData.ConfigUpdate = actualData.ConfigUpdate + + // Remarshaling envelopes with updated timestamps + expectedPayload.Data, err = proto.Marshal(expectedData) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedPayload.Header.ChannelHeader, err = proto.Marshal(expectedHeader) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedEnvelope.Payload, err = proto.Marshal(expectedPayload) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(proto.Equal(envelope, expectedEnvelope)).To(BeTrue()) +} + +func TestNewCreateChannelTxFailure(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + profileMod func() Channel + channelID string + err error + }{ + { + testName: "When creating the default config template with no Admins policies defined fails", + profileMod: func() Channel { + profile := baseProfile(t) + delete(profile.Application.Policies, AdminsPolicyKey) + return profile + }, + channelID: "testchannel", + err: errors.New("creating default config template: failed to create application group: " + + "no Admins policy defined"), + }, + { + testName: "When creating the default config template with no Readers policies defined fails", + profileMod: func() Channel { + profile := baseProfile(t) + delete(profile.Application.Policies, ReadersPolicyKey) + return profile + }, + channelID: "testchannel", + err: errors.New("creating default config template: failed to create application group: " + + "no Readers policy defined"), + }, + { + testName: "When creating the default config template with no Writers policies defined fails", + profileMod: func() Channel { + profile := baseProfile(t) + delete(profile.Application.Policies, WritersPolicyKey) + return profile + }, + channelID: "testchannel", + err: errors.New("creating default config template: failed to create application group: " + + "no Writers policy defined"), + }, + { + testName: "When creating the default config template with an invalid ImplicitMetaPolicy rule fails", + profileMod: func() Channel { + profile := baseProfile(t) + profile.Application.Policies[ReadersPolicyKey] = Policy{ + Rule: "ALL", + Type: ImplicitMetaPolicyType, + } + return profile + }, + channelID: "testchannel", + err: errors.New("creating default config template: failed to create application group: " + + "invalid implicit meta policy rule: 'ALL': expected two space separated " + + "tokens, but got 1"), + }, + { + testName: "When creating the default config template with an invalid ImplicitMetaPolicy rule fails", + profileMod: func() Channel { + profile := baseProfile(t) + profile.Application.Policies[ReadersPolicyKey] = Policy{ + Rule: "ANYY Readers", + Type: ImplicitMetaPolicyType, + } + return profile + }, + channelID: "testchannel", + err: errors.New("creating default config template: failed to create application group: " + + "invalid implicit meta policy rule: 'ANYY Readers': unknown rule type " + + "'ANYY', expected ALL, ANY, or MAJORITY"), + }, + { + testName: "When creating the default config template with SignatureTypePolicy and bad rule fails", + profileMod: func() Channel { + profile := baseProfile(t) + profile.Application.Policies[ReadersPolicyKey] = Policy{ + Rule: "ANYY Readers", + Type: SignaturePolicyType, + } + return profile + }, + channelID: "testchannel", + err: errors.New("creating default config template: failed to create application group: " + + "invalid signature policy rule: 'ANYY Readers': Cannot transition " + + "token types from VARIABLE [ANYY] to VARIABLE [Readers]"), + }, + { + testName: "When creating the default config template with an unknown policy type fails", + profileMod: func() Channel { + profile := baseProfile(t) + profile.Application.Policies[ReadersPolicyKey] = Policy{ + Rule: "ALL", + Type: "GreenPolicy", + } + return profile + }, + channelID: "testchannel", + err: errors.New("creating default config template: failed to create application group: " + + "unknown policy type: GreenPolicy"), + }, + { + testName: "When creating the default config template without consortium", + profileMod: func() Channel { + profile := baseProfile(t) + profile.Consortium = "" + return profile + }, + channelID: "testchannel", + err: errors.New("creating default config template: consortium is not defined in channel config"), + }, + { + testName: "When channel ID is not specified in config", + profileMod: func() Channel { + profile := baseProfile(t) + return profile + }, + channelID: "", + err: errors.New("profile's channel ID is required"), + }, + { + testName: "When creating the application group fails", + profileMod: func() Channel { + profile := baseProfile(t) + profile.Application.Policies = nil + return profile + }, + channelID: "testchannel", + err: errors.New("creating default config template: " + + "failed to create application group: no policies defined"), + }, + } + + for _, tt := range tests { + tt := tt // capture range variable + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + profile := tt.profileMod() + + marshaledCreateChannelTx, err := NewMarshaledCreateChannelTx(profile, tt.channelID) + gt.Expect(marshaledCreateChannelTx).To(BeNil()) + gt.Expect(err).To(MatchError(tt.err)) + }) + } +} + +func TestNewSystemChannelGenesisBlock(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + profile, _, _ := baseSystemChannelProfile(t) + + block, err := NewSystemChannelGenesisBlock(profile, "testsystemchannel") + if profile.Orderer.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).ToNot(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + gt.Expect(block).ToNot(BeNil()) + gt.Expect(block.Header.Number).To(Equal(uint64(0))) + + org1CertBase64, org1CrlBase64 := certCRLBase64(t, profile.Consortiums[0].Organizations[0].MSP) + org2CertBase64, org2CrlBase64 := certCRLBase64(t, profile.Consortiums[0].Organizations[1].MSP) + ordererOrgCertBase64, ordererOrgCrlBase64 := certCRLBase64(t, profile.Orderer.Organizations[0].MSP) + + expectBlockJSON := fmt.Sprintf(` +{ + "data": { + "data": [ + { + "payload": { + "data": { + "config": { + "channel_group": { + "groups": { + "Consortiums": { + "groups": { + "Consortium1": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[3]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[3]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[4]s" + ], + "root_certs": [ + "%[3]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[3]s" + ], + "tls_root_certs": [ + "%[3]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": {}, + "values": { + "ChannelCreationPolicy": { + "mod_policy": "/Channel/Orderer/Admins", + "value": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Admins" + } + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": { + "Admins": { + "mod_policy": "/Channel/Orderer/Admins", + "policy": { + "type": 1, + "value": { + "identities": [], + "rule": { + "n_out_of": { + "n": 0, + "rules": [] + } + }, + "version": 0 + } + }, + "version": "0" + } + }, + "values": {}, + "version": "0" + }, + "Orderer": { + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[5]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[5]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[6]s" + ], + "root_certs": [ + "%[5]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[5]s" + ], + "tls_root_certs": [ + "%[5]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": null, + "state": "STATE_NORMAL", + "type": "solo" + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BlockDataHashingStructure": { + "mod_policy": "Admins", + "value": { + "width": 4294967295 + }, + "version": "0" + }, + "HashingAlgorithm": { + "mod_policy": "Admins", + "value": { + "name": "SHA256" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V2_0": {} + } + }, + "version": "0" + } + }, + "version": "0" + }, + "sequence": "0" + }, + "last_update": null + }, + "header": { + "channel_header": { + "channel_id": "testsystemchannel", + "epoch": "0", + "extension": null, + "timestamp": "2020-04-08T11:59:02Z", + "tls_cert_hash": null, + "tx_id": "1b9fd2206484ebbfc960c772c2638f83474b957c7a83f4607e94c44205a5fc9f", + "type": 1, + "version": 0 + }, + "signature_header": { + "creator": null, + "nonce": "9GHTm16kXuzFu8OwUG+Ds3re67UXVPaz" + } + } + }, + "signature": null + } + ] + }, + "header": { + "data_hash": "zYnpX4Xe0k/Wue2m6lEEJwqMzdApznVVUw7n5SLNWmo=", + "number": "0", + "previous_hash": null + }, + "metadata": { + "metadata": [ + "CgIKAA==", + "", + "", + "", + "" + ] + } +} +`, org1CertBase64, org1CrlBase64, org2CertBase64, org2CrlBase64, ordererOrgCertBase64, ordererOrgCrlBase64) + + expectedBlock := &cb.Block{} + err = protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectBlockJSON), expectedBlock) + gt.Expect(err).ToNot(HaveOccurred()) + + expectedEnvelope := &cb.Envelope{} + err = proto.Unmarshal(expectedBlock.Data.Data[0], expectedEnvelope) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedPayload := &cb.Payload{} + err = proto.Unmarshal(expectedEnvelope.Payload, expectedPayload) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedData := &cb.ConfigEnvelope{} + err = proto.Unmarshal(expectedPayload.Data, expectedData) + gt.Expect(err).NotTo(HaveOccurred()) + + actualEnvelope := &cb.Envelope{} + err = proto.Unmarshal(block.Data.Data[0], actualEnvelope) + gt.Expect(err).NotTo(HaveOccurred()) + + actualPayload := &cb.Payload{} + err = proto.Unmarshal(actualEnvelope.Payload, actualPayload) + gt.Expect(err).NotTo(HaveOccurred()) + + actualData := &cb.ConfigEnvelope{} + err = proto.Unmarshal(actualPayload.Data, actualData) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(actualData).To(Equal(expectedData)) + + expectedChannelHeader := &cb.ChannelHeader{} + err = proto.Unmarshal(expectedPayload.Header.ChannelHeader, expectedChannelHeader) + gt.Expect(err).NotTo(HaveOccurred()) + + actualChannelHeader := &cb.ChannelHeader{} + err = proto.Unmarshal(actualPayload.Header.ChannelHeader, actualChannelHeader) + gt.Expect(err).NotTo(HaveOccurred()) + expectedChannelHeader.Timestamp = actualChannelHeader.Timestamp + expectedChannelHeader.TxId = actualChannelHeader.TxId + + gt.Expect(actualChannelHeader).To(Equal(expectedChannelHeader)) +} + +func TestNewSystemChannelGenesisBlockFailure(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + profileMod func() Channel + channelID string + err error + }{ + { + testName: "When channel ID is not specified in config", + profileMod: func() Channel { + profile, _, _ := baseSystemChannelProfile(t) + return profile + }, + channelID: "", + err: errors.New("system channel ID is required"), + }, + { + testName: "When creating the default system config template with empty orderer endpoints", + profileMod: func() Channel { + profile, _, _ := baseSystemChannelProfile(t) + profile.Orderer.Organizations[0].OrdererEndpoints = []string{} + return profile + }, + channelID: "testsystemchannel", + err: errors.New("creating system channel group: the solo consensus type is no longer supported"), + }, + { + testName: "When creating the default config template with empty capabilities", + profileMod: func() Channel { + profile, _, _ := baseSystemChannelProfile(t) + profile.Capabilities = []string{} + return profile + }, + channelID: "testsystemchannel", + err: errors.New("creating system channel group: capabilities is not defined in channel config"), + }, + { + testName: "When creating the default config template without orderer", + profileMod: func() Channel { + profile, _, _ := baseSystemChannelProfile(t) + profile.Orderer = Orderer{} + return profile + }, + channelID: "testsystemchannel", + err: errors.New("creating system channel group: no policies defined"), + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + profile := tt.profileMod() + + block, err := NewSystemChannelGenesisBlock(profile, tt.channelID) + gt.Expect(block).To(BeNil()) + gt.Expect(err).To(MatchError(tt.err)) + }) + } +} + +func TestNewApplicationChannelGenesisBlock(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + profile, _, _ := baseApplicationChannelProfile(t) + + block, err := NewApplicationChannelGenesisBlock(profile, "testapplicationchannel") + if profile.Orderer.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).ToNot(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + gt.Expect(block).ToNot(BeNil()) + gt.Expect(block.Header.Number).To(Equal(uint64(0))) + + org1CertBase64, org1CrlBase64 := certCRLBase64(t, profile.Application.Organizations[0].MSP) + org2CertBase64, org2CrlBase64 := certCRLBase64(t, profile.Application.Organizations[1].MSP) + ordererOrgCertBase64, ordererOrgCrlBase64 := certCRLBase64(t, profile.Orderer.Organizations[0].MSP) + + expectBlockJSON := fmt.Sprintf(` +{ + "data": { + "data": [ + { + "payload": { + "data": { + "config": { + "channel_group": { + "groups": { + "Application": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "LifecycleEndorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "LifecycleEndorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[3]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[3]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[4]s" + ], + "root_certs": [ + "%[3]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[3]s" + ], + "tls_root_certs": [ + "%[3]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "ACLs": { + "mod_policy": "Admins", + "value": { + "acls": { + "acl1": { + "policy_ref": "hi" + } + } + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + } + }, + "version": "0" + }, + "Orderer": { + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[5]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[5]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[6]s" + ], + "root_certs": [ + "%[5]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[5]s" + ], + "tls_root_certs": [ + "%[5]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": null, + "state": "STATE_NORMAL", + "type": "solo" + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BlockDataHashingStructure": { + "mod_policy": "Admins", + "value": { + "width": 4294967295 + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V2_0": {} + } + }, + "version": "0" + }, + "HashingAlgorithm": { + "mod_policy": "Admins", + "value": { + "name": "SHA256" + }, + "version": "0" + } + }, + "version": "0" + }, + "sequence": "0" + }, + "last_update": null + }, + "header": { + "channel_header": { + "channel_id": "testapplicationchannel", + "epoch": "0", + "extension": null, + "timestamp": "2020-06-25T17:39:55Z", + "tls_cert_hash": null, + "tx_id": "93fcf9cd1e2524021f6ea592801a8b15d5262d54b350c7fe8b6b760a062b7390", + "type": 1, + "version": 0 + }, + "signature_header": { + "creator": null, + "nonce": "yXFTP7Wz7bAtIMpzFB+WaLe45fYIXjl8" + } + } + }, + "signature": null + } + ] + }, + "header": { + "data_hash": "2FX2z5r8jRx6Jt5QKHt6Ch/eU0ay1bZPrncOL1Q7pIE=", + "number": "0", + "previous_hash": null + }, + "metadata": { + "metadata": [ + "CgIKAA==", + "", + "", + "", + "" + ] + } +} +`, org1CertBase64, org1CrlBase64, org2CertBase64, org2CrlBase64, ordererOrgCertBase64, ordererOrgCrlBase64) + + expectedBlock := &cb.Block{} + err = protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectBlockJSON), expectedBlock) + gt.Expect(err).ToNot(HaveOccurred()) + + expectedEnvelope := &cb.Envelope{} + err = proto.Unmarshal(expectedBlock.Data.Data[0], expectedEnvelope) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedPayload := &cb.Payload{} + err = proto.Unmarshal(expectedEnvelope.Payload, expectedPayload) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedData := &cb.ConfigEnvelope{} + err = proto.Unmarshal(expectedPayload.Data, expectedData) + gt.Expect(err).NotTo(HaveOccurred()) + + actualEnvelope := &cb.Envelope{} + err = proto.Unmarshal(block.Data.Data[0], actualEnvelope) + gt.Expect(err).NotTo(HaveOccurred()) + + actualPayload := &cb.Payload{} + err = proto.Unmarshal(actualEnvelope.Payload, actualPayload) + gt.Expect(err).NotTo(HaveOccurred()) + + actualData := &cb.ConfigEnvelope{} + err = proto.Unmarshal(actualPayload.Data, actualData) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(actualData).To(Equal(expectedData)) + + expectedChannelHeader := &cb.ChannelHeader{} + err = proto.Unmarshal(expectedPayload.Header.ChannelHeader, expectedChannelHeader) + gt.Expect(err).NotTo(HaveOccurred()) + + actualChannelHeader := &cb.ChannelHeader{} + err = proto.Unmarshal(actualPayload.Header.ChannelHeader, actualChannelHeader) + gt.Expect(err).NotTo(HaveOccurred()) + expectedChannelHeader.Timestamp = actualChannelHeader.Timestamp + expectedChannelHeader.TxId = actualChannelHeader.TxId + + gt.Expect(actualChannelHeader).To(Equal(expectedChannelHeader)) +} + +func TestNewApplicationChannelGenesisBlockFailure(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + profileMod func() Channel + channelID string + err error + }{ + { + testName: "When channel ID is not specified in config", + profileMod: func() Channel { + profile, _, _ := baseApplicationChannelProfile(t) + return profile + }, + channelID: "", + err: errors.New("application channel ID is required"), + }, + { + testName: "When creating the default application config template with empty orderer endpoints", + profileMod: func() Channel { + profile, _, _ := baseApplicationChannelProfile(t) + profile.Orderer.Organizations[0].OrdererEndpoints = []string{} + return profile + }, + channelID: "testapplicationchannel", + err: errors.New("creating application channel group: the solo consensus type is no longer supported"), + }, + { + testName: "When creating the default config template with empty capabilities", + profileMod: func() Channel { + profile, _, _ := baseApplicationChannelProfile(t) + profile.Capabilities = []string{} + return profile + }, + channelID: "testapplicationchannel", + err: errors.New("creating application channel group: capabilities is not defined in channel config"), + }, + { + testName: "When creating the default config template without application", + profileMod: func() Channel { + profile, _, _ := baseApplicationChannelProfile(t) + profile.Application = Application{} + return profile + }, + channelID: "testapplicationchannel", + err: errors.New("creating application channel group: the solo consensus type is no longer supported"), + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + profile := tt.profileMod() + + block, err := NewApplicationChannelGenesisBlock(profile, tt.channelID) + gt.Expect(block).To(BeNil()) + gt.Expect(err).To(MatchError(tt.err)) + }) + } +} + +func TestNewEnvelopeFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + spec string + marshaledUpdate []byte + expectedErr string + }{ + { + spec: "when the marshaled config update isn't a config update", + marshaledUpdate: []byte("not-a-config-update"), + expectedErr: "unmarshaling config update: proto", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.spec, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + env, err := NewEnvelope(tc.marshaledUpdate) + gt.Expect(err.Error()).To(ContainSubstring(tc.expectedErr)) + gt.Expect(env).To(BeNil()) + }) + } +} + +func TestComputeMarshaledUpdate(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + value1Name := "foo" + value2Name := "bar" + original := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Version: 7, + Values: map[string]*cb.ConfigValue{ + value1Name: { + Version: 3, + Value: []byte("value1value"), + }, + value2Name: { + Version: 6, + Value: []byte("value2value"), + }, + }, + }, + } + updated := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{ + value1Name: original.ChannelGroup.Values[value1Name], + value2Name: { + Value: []byte("updatedValued2Value"), + }, + }, + }, + } + + c := ConfigTx{ + original: original, + updated: updated, + } + + channelID := "testChannel" + + expectedReadSet := newConfigGroup() + expectedReadSet.Version = 7 + + expectedWriteSet := newConfigGroup() + expectedWriteSet.Version = 7 + expectedWriteSet.Values = map[string]*cb.ConfigValue{ + value2Name: { + Version: 7, + Value: []byte("updatedValued2Value"), + }, + } + + expectedConfig := cb.ConfigUpdate{ + ChannelId: channelID, + ReadSet: expectedReadSet, + WriteSet: expectedWriteSet, + } + + marshaledUpdate, err := c.ComputeMarshaledUpdate(channelID) + gt.Expect(err).NotTo(HaveOccurred()) + configUpdate := &cb.ConfigUpdate{} + err = proto.Unmarshal(marshaledUpdate, configUpdate) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(proto.Equal(configUpdate, &expectedConfig)).To(BeTrue()) +} + +func TestComputeUpdateFailures(t *testing.T) { + t.Parallel() + + original := &cb.Config{} + updated := &cb.Config{} + + c := ConfigTx{ + original: original, + updated: updated, + } + + for _, test := range []struct { + name string + channelID string + expectedErr string + }{ + { + name: "When channel ID is not specified", + channelID: "", + expectedErr: "channel ID is required", + }, + { + name: "When failing to compute update", + channelID: "testChannel", + expectedErr: "failed to compute update: no channel group included for original config", + }, + } { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + marshaledUpdate, err := c.ComputeMarshaledUpdate(test.channelID) + gt.Expect(err).To(MatchError(test.expectedErr)) + gt.Expect(marshaledUpdate).To(BeNil()) + }) + } +} + +func TestChannelConfiguration(t *testing.T) { + t.Parallel() + + baseApplication, _ := baseApplication(t) + baseConsortiums, _ := baseConsortiums(t) + baseOrderer, _ := baseSoloOrderer(t) + policies := standardPolicies() + + tests := []struct { + name string + configMod func(gt *GomegaWithT) *cb.Config + expectedChannel Channel + }{ + { + name: "retrieve application channel", + configMod: func(gt *GomegaWithT) *cb.Config { + channelGroup := newConfigGroup() + + applicationGroup, err := newApplicationGroup(baseApplication) + gt.Expect(err).NotTo(HaveOccurred()) + for _, org := range baseApplication.Organizations { + orgGroup, err := newOrgConfigGroup(org) + gt.Expect(err).NotTo(HaveOccurred()) + applicationGroup.Groups[org.Name] = orgGroup + } + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + err = setPolicies(channelGroup, standardPolicies()) + gt.Expect(err).NotTo(HaveOccurred()) + + return &cb.Config{ + ChannelGroup: channelGroup, + } + }, + expectedChannel: Channel{ + Application: baseApplication, + Policies: standardPolicies(), + }, + }, + { + name: "retrieve system channel", + configMod: func(gt *GomegaWithT) *cb.Config { + channel := Channel{ + Consortiums: baseConsortiums, + Orderer: baseOrderer, + Capabilities: []string{"V2_0"}, + Policies: policies, + Consortium: "testconsortium", + ModPolicy: AdminsPolicyKey, + } + channelGroup, err := newSystemChannelGroup(channel) + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + + return &cb.Config{ + ChannelGroup: channelGroup, + } + }, + expectedChannel: Channel{ + Consortiums: baseConsortiums, + Orderer: baseOrderer, + Capabilities: []string{"V2_0"}, + Policies: policies, + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + config := tt.configMod(gt) + c := New(config) + // when the consensus type is solo, the channelGroup is nil and an error is returned + if config.ChannelGroup == nil { + return + } + + channel, err := c.Channel().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(channel.Consortium).To(Equal(tt.expectedChannel.Consortium)) + gt.Expect(channel.Application.Organizations).To(ContainElements(tt.expectedChannel.Application.Organizations)) + gt.Expect(channel.Application.Capabilities).To(Equal(tt.expectedChannel.Application.Capabilities)) + gt.Expect(channel.Application.Policies).To(Equal(tt.expectedChannel.Application.Policies)) + gt.Expect(channel.Application.ACLs).To(Equal(tt.expectedChannel.Application.ACLs)) + gt.Expect(channel.Orderer).To(Equal(tt.expectedChannel.Orderer)) + gt.Expect(len(channel.Consortiums)).To(Equal(len(tt.expectedChannel.Consortiums))) + gt.Expect(channel.Capabilities).To(Equal(tt.expectedChannel.Capabilities)) + gt.Expect(channel.Policies).To(Equal(tt.expectedChannel.Policies)) + }) + } +} + +func baseProfile(t *testing.T) Channel { + application, _ := baseApplication(t) + return Channel{ + Consortium: "SampleConsortium", + Application: application, + Capabilities: []string{"V2_0"}, + } +} + +func baseSystemChannelProfile(t *testing.T) (Channel, []*ecdsa.PrivateKey, *ecdsa.PrivateKey) { + consortiums, consortiumsPrivKey := baseConsortiums(t) + orderer, ordererPrivKeys := baseSoloOrderer(t) + return Channel{ + Consortiums: consortiums, + Orderer: orderer, + Capabilities: []string{"V2_0"}, + Policies: standardPolicies(), + }, consortiumsPrivKey, ordererPrivKeys[0] +} + +func baseApplicationChannelProfile(t *testing.T) (Channel, []*ecdsa.PrivateKey, *ecdsa.PrivateKey) { + application, applicationPrivKey := baseApplication(t) + orderer, ordererPrivKeys := baseSoloOrderer(t) + return Channel{ + Application: application, + Orderer: orderer, + Capabilities: []string{"V2_0"}, + Policies: standardPolicies(), + ModPolicy: AdminsPolicyKey, + }, applicationPrivKey, ordererPrivKeys[0] +} + +func standardPolicies() map[string]Policy { + return map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + } +} + +func orgStandardPolicies() map[string]Policy { + policies := standardPolicies() + + policies[EndorsementPolicyKey] = Policy{ + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + } + + return policies +} + +func applicationOrgStandardPolicies() map[string]Policy { + policies := orgStandardPolicies() + + policies[LifecycleEndorsementPolicyKey] = Policy{ + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + } + + return policies +} + +func ordererStandardPolicies() map[string]Policy { + policies := standardPolicies() + + policies[BlockValidationPolicyKey] = Policy{ + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + } + + return policies +} + +// baseApplicationChannelGroup creates a channel config group +// that only contains an Application group. +func baseApplicationChannelGroup(t *testing.T) (*cb.ConfigGroup, []*ecdsa.PrivateKey, error) { + channelGroup := newConfigGroup() + + application, privKeys := baseApplication(t) + applicationGroup, err := newApplicationGroup(application) + if err != nil { + return nil, nil, err + } + + for _, org := range application.Organizations { + orgGroup, err := newOrgConfigGroup(org) + if err != nil { + return nil, nil, err + } + applicationGroup.Groups[org.Name] = orgGroup + } + + channelGroup.Groups[ApplicationGroupKey] = applicationGroup + + return channelGroup, privKeys, nil +} diff --git a/v2/configtx/consortiums.go b/v2/configtx/consortiums.go new file mode 100644 index 0000000..e6528a4 --- /dev/null +++ b/v2/configtx/consortiums.go @@ -0,0 +1,430 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "errors" + "fmt" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "google.golang.org/protobuf/proto" +) + +// Consortium is a group of non-orderer organizations used in channel transactions. +type Consortium struct { + Name string + Organizations []Organization +} + +// ConsortiumsGroup encapsulates the parts of the config that control consortiums. +type ConsortiumsGroup struct { + consortiumsGroup *cb.ConfigGroup +} + +// ConsortiumGroup encapsulates the parts of the config that control +// a specific consortium. This type implements retrieval of the various +// consortium config values. +type ConsortiumGroup struct { + consortiumGroup *cb.ConfigGroup + name string +} + +// ConsortiumOrg encapsulates the parts of the config that control a +// consortium organization's configuration. +type ConsortiumOrg struct { + orgGroup *cb.ConfigGroup + name string +} + +// MSP returns an OrganizationMSP object that can be used to configure the organization's MSP. +func (c *ConsortiumOrg) MSP() *OrganizationMSP { + return &OrganizationMSP{ + configGroup: c.orgGroup, + } +} + +// Consortiums returns the consortiums group from the updated config. +func (c *ConfigTx) Consortiums() *ConsortiumsGroup { + consortiumsGroup := c.updated.ChannelGroup.Groups[ConsortiumsGroupKey] + return &ConsortiumsGroup{consortiumsGroup: consortiumsGroup} +} + +// Consortium returns a consortium group from the updated config. +func (c *ConfigTx) Consortium(name string) *ConsortiumGroup { + consortiumGroup, ok := c.updated.ChannelGroup.Groups[ConsortiumsGroupKey].Groups[name] + if !ok { + return nil + } + return &ConsortiumGroup{name: name, consortiumGroup: consortiumGroup} +} + +// SetConsortium sets the consortium in a channel configuration. +// If the consortium already exists in the current configuration, its value will be overwritten. +func (c *ConsortiumsGroup) SetConsortium(consortium Consortium) error { + c.consortiumsGroup.Groups[consortium.Name] = newConfigGroup() + + for _, org := range consortium.Organizations { + err := c.consortium(consortium.Name).SetOrganization(org) + if err != nil { + return err + } + } + + return nil +} + +func (c *ConsortiumsGroup) consortium(name string) *ConsortiumGroup { + consortiumGroup := c.consortiumsGroup.Groups[name] + return &ConsortiumGroup{name: name, consortiumGroup: consortiumGroup} +} + +// RemoveConsortium removes a consortium from a channel configuration. +// Removal will panic if the consortiums group does not exist. +func (c *ConsortiumsGroup) RemoveConsortium(name string) { + delete(c.consortiumsGroup.Groups, name) +} + +// Organization returns the consortium org from the original config. +func (c *ConsortiumGroup) Organization(name string) *ConsortiumOrg { + orgGroup, ok := c.consortiumGroup.Groups[name] + if !ok { + return nil + } + return &ConsortiumOrg{name: name, orgGroup: orgGroup} +} + +// SetOrganization sets the organization config group for the given org key in +// an existing Consortium configuration's Groups map. +// If the consortium org already exists in the current configuration, its +// value will be overwritten. +func (c *ConsortiumGroup) SetOrganization(org Organization) error { + orgGroup, err := newOrgConfigGroup(org) + if err != nil { + return fmt.Errorf("failed to create consortium org %s: %v", org.Name, err) + } + + c.consortiumGroup.Groups[org.Name] = orgGroup + + return nil +} + +// RemoveOrganization removes an org from a consortium group. +// Removal will panic if either the consortiums group or consortium group does not exist. +func (c *ConsortiumGroup) RemoveOrganization(name string) { + delete(c.consortiumGroup.Groups, name) +} + +// Configuration returns a list of consortium configurations from the updated +// config. Consortiums are only defined for the ordering system channel. +func (c *ConsortiumsGroup) Configuration() ([]Consortium, error) { + consortiums := []Consortium{} + for consortiumName := range c.consortiumsGroup.Groups { + consortium, err := c.consortium(consortiumName).Configuration() + if err != nil { + return nil, err + } + consortiums = append(consortiums, consortium) + } + + return consortiums, nil +} + +// Configuration returns the configuration for a consortium group. +func (c *ConsortiumGroup) Configuration() (Consortium, error) { + orgs := []Organization{} + for orgName, orgGroup := range c.consortiumGroup.Groups { + org, err := getOrganization(orgGroup, orgName) + if err != nil { + return Consortium{}, fmt.Errorf("failed to retrieve organization %s from consortium %s: ", orgName, c.name) + } + orgs = append(orgs, org) + } + return Consortium{ + Name: c.name, + Organizations: orgs, + }, nil +} + +// Configuration retrieves an existing org's configuration from a consortium +// organization config group in the updated config. +func (c *ConsortiumOrg) Configuration() (Organization, error) { + org, err := getOrganization(c.orgGroup, c.name) + if err != nil { + return Organization{}, err + } + + // Remove AnchorPeers which are application org specific. + org.AnchorPeers = nil + + return org, err +} + +// SetMSP updates the MSP config for the specified consortium org group. +func (c *ConsortiumOrg) SetMSP(updatedMSP MSP) error { + currentMSP, err := c.MSP().Configuration() + if err != nil { + return fmt.Errorf("retrieving msp: %v", err) + } + + if currentMSP.Name != updatedMSP.Name { + return errors.New("MSP name cannot be changed") + } + + err = updatedMSP.validateCACerts() + if err != nil { + return err + } + + err = c.setMSPConfig(updatedMSP) + if err != nil { + return err + } + + return nil +} + +func (c *ConsortiumOrg) setMSPConfig(updatedMSP MSP) error { + mspConfig, err := newMSPConfig(updatedMSP) + if err != nil { + return fmt.Errorf("new msp config: %v", err) + } + + err = setValue(c.orgGroup, mspValue(mspConfig), AdminsPolicyKey) + if err != nil { + return err + } + + return nil +} + +// SetChannelCreationPolicy sets the ConsortiumChannelCreationPolicy for +// the given configuration Group. +// If the policy already exists in current configuration, its value will be overwritten. +func (c *ConsortiumGroup) SetChannelCreationPolicy(policy Policy) error { + imp, err := implicitMetaFromString(policy.Rule) + if err != nil { + return fmt.Errorf("invalid implicit meta policy rule '%s': %v", policy.Rule, err) + } + + implicitMetaPolicy, err := implicitMetaPolicy(imp.SubPolicy, imp.Rule) + if err != nil { + return fmt.Errorf("failed to make implicit meta policy: %v", err) + } + + // update channel creation policy value back to consortium + if err = setValue(c.consortiumGroup, channelCreationPolicyValue(implicitMetaPolicy), ordererAdminsPolicyName); err != nil { + return fmt.Errorf("failed to update channel creation policy to consortium %s: %v", c.name, err) + } + + return nil +} + +// Policies returns a map of policies for a specific consortium org. +func (c *ConsortiumOrg) Policies() (map[string]Policy, error) { + return getPolicies(c.orgGroup.Policies) +} + +// SetModPolicy sets the specified modification policy for the consortium org group. +func (c *ConsortiumOrg) SetModPolicy(modPolicy string) error { + if modPolicy == "" { + return errors.New("non empty mod policy is required") + } + + c.orgGroup.ModPolicy = modPolicy + + return nil +} + +// SetPolicy sets the specified policy in the consortium org group's config policy map. +// If the policy already exists in current configuration, its value will be overwritten. +func (c *ConsortiumOrg) SetPolicy(name string, policy Policy) error { + err := setPolicy(c.orgGroup, name, policy) + if err != nil { + return fmt.Errorf("failed to set policy '%s' to consortium org '%s': %v", name, c.name, err) + } + + return nil +} + +// SetPolicies sets the specified policies in the consortium org group's config policy map. +// If the policies already exist in current configuration, the values will be replaced with new policies. +func (c *ConsortiumOrg) SetPolicies(policies map[string]Policy) error { + err := setPolicies(c.orgGroup, policies) + if err != nil { + return fmt.Errorf("failed to set policies to consortium org '%s': %v", c.name, err) + } + + return nil +} + +// RemovePolicy removes an existing policy from a consortium's organization. +// Removal will panic if either the consortiums group, consortium group, or consortium org group does not exist. +func (c *ConsortiumOrg) RemovePolicy(name string) { + delete(c.orgGroup.Policies, name) +} + +// newConsortiumsGroup returns the consortiums component of the channel configuration. This element is only defined for +// the ordering system channel. +// It sets the mod_policy for all elements to "/Channel/Orderer/Admins". +func newConsortiumsGroup(consortiums []Consortium) (*cb.ConfigGroup, error) { + var err error + + consortiumsGroup := newConfigGroup() + consortiumsGroup.ModPolicy = ordererAdminsPolicyName + + // acceptAllPolicy always evaluates to true + acceptAllPolicy := envelope(nOutOf(0, []*cb.SignaturePolicy{}), [][]byte{}) + + // This policy is not referenced anywhere, it is only used as part of the implicit meta policy rule at the + // channel level, so this setting effectively degrades control of the ordering system channel to the ordering admins + signaturePolicy, err := signaturePolicy(AdminsPolicyKey, acceptAllPolicy) + if err != nil { + return nil, err + } + + consortiumsGroup.Policies[signaturePolicy.key] = &cb.ConfigPolicy{ + Policy: signaturePolicy.value, + ModPolicy: ordererAdminsPolicyName, + } + + for _, consortium := range consortiums { + consortiumsGroup.Groups[consortium.Name], err = newConsortiumGroup(consortium) + if err != nil { + return nil, err + } + } + + return consortiumsGroup, nil +} + +// newConsortiumGroup returns a consortiums component of the channel configuration. +func newConsortiumGroup(consortium Consortium) (*cb.ConfigGroup, error) { + var err error + + consortiumGroup := newConfigGroup() + consortiumGroup.ModPolicy = ordererAdminsPolicyName + + for _, org := range consortium.Organizations { + consortiumGroup.Groups[org.Name], err = newOrgConfigGroup(org) + if err != nil { + return nil, fmt.Errorf("org group '%s': %v", org.Name, err) + } + } + + implicitMetaAnyPolicy, err := implicitMetaAnyPolicy(AdminsPolicyKey) + if err != nil { + return nil, err + } + + err = setValue(consortiumGroup, channelCreationPolicyValue(implicitMetaAnyPolicy.value), ordererAdminsPolicyName) + if err != nil { + return nil, err + } + + return consortiumGroup, nil +} + +// consortiumValue returns the config definition for the consortium name +// It is a value for the channel group. +func consortiumValue(name string) *standardConfigValue { + return &standardConfigValue{ + key: ConsortiumKey, + value: &cb.Consortium{ + Name: name, + }, + } +} + +// channelCreationPolicyValue returns the config definition for a consortium's channel creation policy +// It is a value for the /Channel/Consortiums/*/*. +func channelCreationPolicyValue(policy *cb.Policy) *standardConfigValue { + return &standardConfigValue{ + key: ChannelCreationPolicyKey, + value: policy, + } +} + +// envelope builds an envelope message embedding a SignaturePolicy. +func envelope(policy *cb.SignaturePolicy, identities [][]byte) *cb.SignaturePolicyEnvelope { + ids := make([]*mb.MSPPrincipal, len(identities)) + for i := range ids { + ids[i] = &mb.MSPPrincipal{PrincipalClassification: mb.MSPPrincipal_IDENTITY, Principal: identities[i]} + } + + return &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policy, + Identities: ids, + } +} + +// nOutOf creates a policy which requires N out of the slice of policies to evaluate to true. +func nOutOf(n int32, policies []*cb.SignaturePolicy) *cb.SignaturePolicy { + return &cb.SignaturePolicy{ + Type: &cb.SignaturePolicy_NOutOf_{ + NOutOf: &cb.SignaturePolicy_NOutOf{ + N: n, + Rules: policies, + }, + }, + } +} + +// signaturePolicy defines a policy with key policyName and the given signature policy. +func signaturePolicy(policyName string, sigPolicy *cb.SignaturePolicyEnvelope) (*standardConfigPolicy, error) { + signaturePolicy, err := proto.Marshal(sigPolicy) + if err != nil { + return nil, fmt.Errorf("marshaling signature policy: %v", err) + } + + return &standardConfigPolicy{ + key: policyName, + value: &cb.Policy{ + Type: int32(cb.Policy_SIGNATURE), + Value: signaturePolicy, + }, + }, nil +} + +// implicitMetaPolicy creates a new *cb.Policy of cb.Policy_IMPLICIT_META type. +func implicitMetaPolicy(subPolicyName string, rule cb.ImplicitMetaPolicy_Rule) (*cb.Policy, error) { + implicitMetaPolicy, err := proto.Marshal(&cb.ImplicitMetaPolicy{ + Rule: rule, + SubPolicy: subPolicyName, + }) + if err != nil { + return nil, fmt.Errorf("failed to marshal implicit meta policy: %v", err) + } + + return &cb.Policy{ + Type: int32(cb.Policy_IMPLICIT_META), + Value: implicitMetaPolicy, + }, nil +} + +// implicitMetaAnyPolicy defines an implicit meta policy whose sub_policy and key is policyname with rule ANY. +func implicitMetaAnyPolicy(policyName string) (*standardConfigPolicy, error) { + implicitMetaPolicy, err := implicitMetaPolicy(policyName, cb.ImplicitMetaPolicy_ANY) + if err != nil { + return nil, fmt.Errorf("failed to make implicit meta ANY policy: %v", err) + } + + return &standardConfigPolicy{ + key: policyName, + value: implicitMetaPolicy, + }, nil +} + +// getConsortiumOrg returns the organization config group for a consortium org in the +// provided config. It will panic if the consortium doesn't exist, and it +// will return nil if the org doesn't exist in the config. +func getConsortiumOrg(config *cb.Config, consortiumName string, orgName string) *cb.ConfigGroup { + consortiumsGroup := config.ChannelGroup.Groups[ConsortiumsGroupKey].Groups + consortiumGroup := consortiumsGroup[consortiumName] + return consortiumGroup.Groups[orgName] +} diff --git a/v2/configtx/consortiums_test.go b/v2/configtx/consortiums_test.go new file mode 100644 index 0000000..e388aa9 --- /dev/null +++ b/v2/configtx/consortiums_test.go @@ -0,0 +1,2322 @@ +/* +Copyright IBM Corp All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "bytes" + "crypto/ecdsa" + "crypto/x509" + "encoding/base64" + "fmt" + "math/big" + "testing" + + "github.com/hyperledger/fabric-config/v2/configtx/orderer" + "github.com/hyperledger/fabric-config/v2/protolator" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/commonext" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func TestNewConsortiumsGroup(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + org1CertBase64, org1CRLBase64 := certCRLBase64(t, consortiums[0].Organizations[0].MSP) + org2CertBase64, org2CRLBase64 := certCRLBase64(t, consortiums[0].Organizations[1].MSP) + + expectedConsortiumsGroup := fmt.Sprintf(`{ + "groups": { + "Consortium1": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[3]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[3]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[4]s" + ], + "root_certs": [ + "%[3]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[3]s" + ], + "tls_root_certs": [ + "%[3]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": {}, + "values": { + "ChannelCreationPolicy": { + "mod_policy": "/Channel/Orderer/Admins", + "value": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Admins" + } + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": { + "Admins": { + "mod_policy": "/Channel/Orderer/Admins", + "policy": { + "type": 1, + "value": { + "identities": [], + "rule": { + "n_out_of": { + "n": 0, + "rules": [] + } + }, + "version": 0 + } + }, + "version": "0" + } + }, + "values": {}, + "version": "0" +} +`, org1CertBase64, org1CRLBase64, org2CertBase64, org2CRLBase64) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &commonext.DynamicConsortiumsGroup{ConfigGroup: consortiumsGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConsortiumsGroup)) +} + +func TestNewConsortiumsGroupFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + consortiums[0].Organizations[0].Policies = nil + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).To(MatchError("org group 'Org1': no policies defined")) + gt.Expect(consortiumsGroup).To(BeNil()) +} + +func TestSetConsortiumOrg(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + org1CertBase64, org1CRLBase64 := certCRLBase64(t, consortiums[0].Organizations[0].MSP) + org2CertBase64, org2CRLBase64 := certCRLBase64(t, consortiums[0].Organizations[1].MSP) + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Consortiums": consortiumsGroup, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + } + + c := New(config) + + msp, _ := baseMSP(t) + orgToAdd := Organization{ + Name: "Org3", + Policies: orgStandardPolicies(), + MSP: msp, + } + org3CertBase64, org3CRLBase64 := certCRLBase64(t, orgToAdd.MSP) + + expectedConfigJSON := fmt.Sprintf(` +{ + "channel_group": { + "groups": { + "Consortiums": { + "groups": { + "Consortium1": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[3]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[3]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[4]s" + ], + "root_certs": [ + "%[3]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[3]s" + ], + "tls_root_certs": [ + "%[3]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + }, + "Org3": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[5]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[5]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[5]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[6]s" + ], + "root_certs": [ + "%[5]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[5]s" + ], + "tls_root_certs": [ + "%[5]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": {}, + "values": { + "ChannelCreationPolicy": { + "mod_policy": "/Channel/Orderer/Admins", + "value": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Admins" + } + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": { + "Admins": { + "mod_policy": "/Channel/Orderer/Admins", + "policy": { + "type": 1, + "value": { + "identities": [], + "rule": { + "n_out_of": { + "n": 0, + "rules": [] + } + }, + "version": 0 + } + }, + "version": "0" + } + }, + "values": {}, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "sequence": "0" +} +`, org1CertBase64, org1CRLBase64, org2CertBase64, org2CRLBase64, org3CertBase64, org3CRLBase64) + + expectedConfigProto := &cb.Config{} + err = protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectedConfigJSON), expectedConfigProto) + gt.Expect(err).NotTo(HaveOccurred()) + + err = c.Consortium("Consortium1").SetOrganization(orgToAdd) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(proto.Equal(c.updated, expectedConfigProto)).To(BeTrue()) +} + +func TestSetConsortiumOrgFailures(t *testing.T) { + t.Parallel() + + orgToAdd := Organization{ + Name: "test-org", + } + + for _, test := range []struct { + name string + org Organization + consortium string + config *cb.Config + expectedErr string + }{ + { + name: "When the organization doesn't have policies defined", + org: orgToAdd, + consortium: "", + expectedErr: "failed to create consortium org test-org: no policies defined", + }, + } { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Consortiums": consortiumsGroup, + }, + }, + } + + c := New(config) + + err = c.Consortium(test.consortium).SetOrganization(test.org) + gt.Expect(err).To(MatchError(test.expectedErr)) + }) + } +} + +func TestRemoveConsortium(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + } + + c := New(config) + + c.Consortiums().RemoveConsortium("Consortium1") + + gt.Expect(c.Consortium("Consortium1")).To(BeNil()) +} + +func TestGetConsortiums(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + baseConsortiums, _ := baseConsortiums(t) + baseOrderer, _ := baseSoloOrderer(t) + policies := standardPolicies() + + channel := Channel{ + Consortiums: baseConsortiums, + Orderer: baseOrderer, + Capabilities: []string{"V2_0"}, + Policies: policies, + Consortium: "testconsortium", + } + channelGroup, err := newSystemChannelGroup(channel) + if channel.Orderer.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ChannelGroup: channelGroup} + c := New(config) + + consortiums, err := c.Consortiums().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(len(baseConsortiums)).To(Equal(len(consortiums))) +} + +func TestGetConsortiumOrg(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + consortiumGroup, _, err := baseConsortiumChannelGroup(t) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: consortiumGroup, + } + + org1ConfigGroup := getConsortiumOrg(config, "Consortium1", "Org1") + gt.Expect(org1ConfigGroup).To(Equal(config.ChannelGroup.Groups[ConsortiumsGroupKey].Groups["Consortium1"].Groups["Org1"])) +} + +func TestSetConsortium(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Consortiums": consortiumsGroup, + }, + }, + } + + c := New(config) + + newConsortium := consortiums[0] + newConsortium.Name = "Consortium2" + + err = c.Consortiums().SetConsortium(newConsortium) + gt.Expect(err).NotTo(HaveOccurred()) + + org1CertBase64, org1CRLBase64 := certCRLBase64(t, consortiums[0].Organizations[0].MSP) + org2CertBase64, org2CRLBase64 := certCRLBase64(t, consortiums[0].Organizations[1].MSP) + + expectedConfigJSON := fmt.Sprintf(` +{ + "channel_group": { + "groups": { + "Consortiums": { + "groups": { + "Consortium1": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[3]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[3]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[4]s" + ], + "root_certs": [ + "%[3]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[3]s" + ], + "tls_root_certs": [ + "%[3]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": {}, + "values": { + "ChannelCreationPolicy": { + "mod_policy": "/Channel/Orderer/Admins", + "value": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Admins" + } + }, + "version": "0" + } + }, + "version": "0" + }, + "Consortium2": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[3]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[3]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[3]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[4]s" + ], + "root_certs": [ + "%[3]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[3]s" + ], + "tls_root_certs": [ + "%[3]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": { + "Admins": { + "mod_policy": "/Channel/Orderer/Admins", + "policy": { + "type": 1, + "value": { + "identities": [], + "rule": { + "n_out_of": { + "n": 0, + "rules": [] + } + }, + "version": 0 + } + }, + "version": "0" + } + }, + "values": {}, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "sequence": "0" +} +`, org1CertBase64, org1CRLBase64, org2CertBase64, org2CRLBase64) + + expectedConfigProto := &cb.Config{} + err = protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectedConfigJSON), expectedConfigProto) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(proto.Equal(c.updated, expectedConfigProto)).To(BeTrue()) +} + +func TestConsortiumOrg(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel, _, _ := baseSystemChannelProfile(t) + channelGroup, err := newSystemChannelGroup(channel) + if channel.Orderer.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + expectedOrg := channel.Consortiums[0].Organizations[0] + + tests := []struct { + name string + consortiumName string + orgName string + expectedErr string + }{ + { + name: "success", + consortiumName: "Consortium1", + orgName: "Org1", + expectedErr: "", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + org, err := c.Consortium(tc.consortiumName).Organization(tc.orgName).Configuration() + if tc.expectedErr != "" { + gt.Expect(Organization{}).To(Equal(org)) + gt.Expect(err).To(MatchError(tc.expectedErr)) + } else { + gt.Expect(err).ToNot(HaveOccurred()) + gt.Expect(expectedOrg).To(Equal(org)) + } + }) + } +} + +func TestRemoveConsortiumOrg(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel, _, _ := baseSystemChannelProfile(t) + channelGroup, err := newSystemChannelGroup(channel) + if channel.Orderer.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + c.Consortium("Consortium1").RemoveOrganization("Org1") + gt.Expect(c.Consortium("Consortium1").Organization("Org1")).To(BeNil()) +} + +func TestSetConsortiumOrgModPolicy(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + }, + } + + c := New(config) + + consortium1Org1 := c.Consortium("Consortium1").Organization("Org1") + err = consortium1Org1.SetModPolicy("TestModPolicy") + gt.Expect(err).NotTo(HaveOccurred()) + + updatedModPolicy := consortium1Org1.orgGroup.GetModPolicy() + gt.Expect(updatedModPolicy).To(Equal("TestModPolicy")) +} + +func TestSetConsortiumOrgModPolicyFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + }, + } + + c := New(config) + + err = c.Consortium("Consortium1").Organization("Org1").SetModPolicy("") + gt.Expect(err).To(MatchError("non empty mod policy is required")) +} + +func TestSetConsortiumOrgPolicy(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + }, + } + + c := New(config) + + expectedPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + EndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy": { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + + consortium1Org1 := c.Consortium("Consortium1").Organization("Org1") + err = consortium1Org1.SetPolicy("TestPolicy", Policy{Type: ImplicitMetaPolicyType, Rule: "MAJORITY Endorsement"}) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := consortium1Org1.Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(expectedPolicies)) +} + +func TestSetConsortiumOrgPolicyFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + }, + } + + c := New(config) + + for _, test := range []struct { + name string + consortium string + org string + policy Policy + expectedErr string + }{ + { + name: "When setting empty policy fails", + consortium: "Consortium1", + org: "Org1", + policy: Policy{}, + expectedErr: "failed to set policy 'TestPolicy' to consortium org 'Org1': unknown policy type: ", + }, + } { + err := c.Consortium(test.consortium).Organization(test.org).SetPolicy("TestPolicy", test.policy) + gt.Expect(err).To(MatchError(test.expectedErr)) + } +} + +func TestSetConsortiumOrgPolicies(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + consortiums[0].Organizations[0].Policies["TestPolicy_Remove"] = Policy{Type: ImplicitMetaPolicyType, Rule: "MAJORITY Endorsement"} + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + }, + } + + c := New(config) + + newPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + EndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add1": { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add2": { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + + consortium1Org1 := c.Consortium("Consortium1").Organization("Org1") + err = consortium1Org1.SetPolicies(newPolicies) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := consortium1Org1.Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(newPolicies)) +} + +func TestSetConsortiumOrgPoliciesFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + }, + } + + c := New(config) + + newPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + EndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + "TestPolicy": {}, + } + + consortium1Org1 := c.Consortium("Consortium1").Organization("Org1") + err = consortium1Org1.SetPolicies(newPolicies) + gt.Expect(err).To(MatchError("failed to set policies to consortium org 'Org1': unknown policy type: ")) +} + +func TestRemoveConsortiumOrgPolicy(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + + consortiums[0].Organizations[0].Policies["TestPolicy"] = Policy{Type: ImplicitMetaPolicyType, Rule: "MAJORITY Endorsement"} + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + }, + } + + c := New(config) + + expectedPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + EndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + + consortium1Org1 := c.Consortium("Consortium1").Organization("Org1") + consortium1Org1.RemovePolicy("TestPolicy") + + updatedPolicies, err := consortium1Org1.Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(expectedPolicies)) +} + +func TestConsortiumOrgPolicies(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + }, + } + + c := New(config) + + expectedPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + EndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + + policies, err := c.Consortium("Consortium1").Organization("Org1").Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(policies).To(Equal(expectedPolicies)) +} + +func TestConsortiumMSP(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + expectedMSP := consortiums[0].Organizations[0].MSP + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + }, + } + + c := New(config) + + msp, err := c.Consortium("Consortium1").Organization("Org1").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp).To(Equal(expectedMSP)) +} + +func TestSetConsortiumMSP(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + consortiumGroup, privKeys, err := baseConsortiumChannelGroup(t) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: consortiumGroup, + } + c := New(config) + + consortium1 := c.Consortium("Consortium1") + consortiumOrg1MSP, err := consortium1.Organization("Org1").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + consortiumOrg2MSP, err := consortium1.Organization("Org2").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + consortiumOrg1CertBase64, consortiumOrg1CRLBase64 := certCRLBase64(t, consortiumOrg1MSP) + consortiumOrg2CertBase64, consortiumOrg2CRLBase64 := certCRLBase64(t, consortiumOrg2MSP) + + newRootCert, newRootPrivKey := generateCACertAndPrivateKey(t, "anotherca-org1.example.com") + newRootCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(newRootCert)) + consortiumOrg1MSP.RootCerts = append(consortiumOrg1MSP.RootCerts, newRootCert) + + newIntermediateCert, _ := generateIntermediateCACertAndPrivateKey(t, "anotherca-org1.example.com", newRootCert, newRootPrivKey) + newIntermediateCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(newIntermediateCert)) + consortiumOrg1MSP.IntermediateCerts = append(consortiumOrg1MSP.IntermediateCerts, newIntermediateCert) + + cert := consortiumOrg1MSP.RootCerts[0] + privKey := privKeys[0] + certToRevoke, _ := generateCertAndPrivateKeyFromCACert(t, "org1.example.com", cert, privKey) + signingIdentity := &SigningIdentity{ + Certificate: cert, + PrivateKey: privKey, + MSPID: "MSPID", + } + newCRL, err := consortiumOrg1MSP.CreateMSPCRL(signingIdentity, certToRevoke) + gt.Expect(err).NotTo(HaveOccurred()) + pemNewCRL, err := pemEncodeCRL(newCRL) + gt.Expect(err).NotTo(HaveOccurred()) + newCRLBase64 := base64.StdEncoding.EncodeToString(pemNewCRL) + consortiumOrg1MSP.RevocationList = append(consortiumOrg1MSP.RevocationList, newCRL) + + err = consortium1.Organization("Org1").SetMSP(consortiumOrg1MSP) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedConfigJSON := fmt.Sprintf(` +{ + "channel_group": { + "groups": { + "Consortiums": { + "groups": { + "Consortium1": { + "groups": { + "Org1": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s", + "%[2]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[3]s", + "%[4]s" + ], + "root_certs": [ + "%[1]s", + "%[5]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + }, + "Org2": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[6]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[6]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[6]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[6]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[6]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[6]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[6]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[7]s" + ], + "root_certs": [ + "%[6]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[6]s" + ], + "tls_root_certs": [ + "%[6]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": {}, + "values": { + "ChannelCreationPolicy": { + "mod_policy": "/Channel/Orderer/Admins", + "value": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Admins" + } + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": { + "Admins": { + "mod_policy": "/Channel/Orderer/Admins", + "policy": { + "type": 1, + "value": { + "identities": [], + "rule": { + "n_out_of": { + "n": 0, + "rules": [] + } + }, + "version": 0 + } + }, + "version": "0" + } + }, + "values": {}, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "sequence": "0" +} +`, consortiumOrg1CertBase64, newIntermediateCertBase64, consortiumOrg1CRLBase64, newCRLBase64, newRootCertBase64, consortiumOrg2CertBase64, consortiumOrg2CRLBase64) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, c.updated) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(MatchJSON(expectedConfigJSON)) +} + +func TestSetConsortiumMSPFailure(t *testing.T) { + t.Parallel() + + tests := []struct { + spec string + mspMod func(MSP) MSP + consortiumName string + orgName string + expectedErr string + }{ + { + spec: "updating msp name", + mspMod: func(msp MSP) MSP { + msp.Name = "thiscantbegood" + return msp + }, + consortiumName: "Consortium1", + orgName: "Org1", + expectedErr: "MSP name cannot be changed", + }, + { + spec: "invalid root ca cert keyusage", + mspMod: func(msp MSP) MSP { + msp.RootCerts = []*x509.Certificate{ + { + SerialNumber: big.NewInt(7), + KeyUsage: x509.KeyUsageKeyAgreement, + }, + } + return msp + }, + consortiumName: "Consortium1", + orgName: "Org1", + expectedErr: "invalid root cert: KeyUsage must be x509.KeyUsageCertSign. serial number: 7", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.spec, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + consortiumGroup, _, err := baseConsortiumChannelGroup(t) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: consortiumGroup, + } + c := New(config) + + consortiumOrg1 := c.Consortium("Consortium1").Organization("Org1") + consortiumOrg1MSP, err := consortiumOrg1.MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + + consortiumOrg1MSP = tc.mspMod(consortiumOrg1MSP) + err = consortiumOrg1.SetMSP(consortiumOrg1MSP) + gt.Expect(err).To(MatchError(tc.expectedErr)) + }) + } +} + +func baseConsortiums(t *testing.T) ([]Consortium, []*ecdsa.PrivateKey) { + org1MSP, org1PrivKey := baseMSP(t) + org2MSP, org2PrivKey := baseMSP(t) + + return []Consortium{ + { + Name: "Consortium1", + Organizations: []Organization{ + { + Name: "Org1", + Policies: orgStandardPolicies(), + MSP: org1MSP, + }, + { + Name: "Org2", + Policies: orgStandardPolicies(), + MSP: org2MSP, + }, + }, + }, + }, []*ecdsa.PrivateKey{org1PrivKey, org2PrivKey} +} + +func baseConsortiumChannelGroup(t *testing.T) (*cb.ConfigGroup, []*ecdsa.PrivateKey, error) { + channelGroup := newConfigGroup() + + consortiums, privKeys := baseConsortiums(t) + consortiumsGroup, err := newConsortiumsGroup(consortiums) + if err != nil { + return nil, nil, err + } + + channelGroup.Groups[ConsortiumsGroupKey] = consortiumsGroup + + return channelGroup, privKeys, nil +} diff --git a/v2/configtx/constants.go b/v2/configtx/constants.go new file mode 100644 index 0000000..4addb35 --- /dev/null +++ b/v2/configtx/constants.go @@ -0,0 +1,90 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +const ( + // These values are fixed for the genesis block. + msgVersion = 0 + epoch = 0 + + // ConsortiumKey is the key for the ConfigValue of a + // Consortium. + ConsortiumKey = "Consortium" + + // HashingAlgorithmKey is the key for the ConfigValue of a + // HashingAlgorithm. + HashingAlgorithmKey = "HashingAlgorithm" + + // BlockDataHashingStructureKey is the key for the ConfigValue + // of a BlockDataHashingStructure. + BlockDataHashingStructureKey = "BlockDataHashingStructure" + + // CapabilitiesKey is the key for the ConfigValue, capabilities. + // CapabiltiesKey can be used at the channel, application, and orderer levels. + CapabilitiesKey = "Capabilities" + + // EndpointsKey is the key for the ConfigValue, Endpoints in + // a OrdererOrgGroup. + EndpointsKey = "Endpoints" + + // MSPKey is the key for the ConfigValue, MSP. + MSPKey = "MSP" + + // AdminsPolicyKey is the key used for the admin policy. + AdminsPolicyKey = "Admins" + + // ReadersPolicyKey is the key used for the read policy. + ReadersPolicyKey = "Readers" + + // WritersPolicyKey is the key used for the write policy. + WritersPolicyKey = "Writers" + + // EndorsementPolicyKey is the key used for the endorsement policy. + EndorsementPolicyKey = "Endorsement" + + // LifecycleEndorsementPolicyKey is the key used for the lifecycle endorsement + // policy. + LifecycleEndorsementPolicyKey = "LifecycleEndorsement" + + // BlockValidationPolicyKey is the key used for the block validation policy in + // the OrdererOrgGroup. + BlockValidationPolicyKey = "BlockValidation" + + // ChannelCreationPolicyKey is the key used in the consortium config to denote + // the policy to be used in evaluating whether a channel creation request + // is authorized. + ChannelCreationPolicyKey = "ChannelCreationPolicy" + + // ChannelGroupKey is the group name for the channel config. + ChannelGroupKey = "Channel" + + // ConsortiumsGroupKey is the group name for the consortiums config. + ConsortiumsGroupKey = "Consortiums" + + // OrdererGroupKey is the group name for the orderer config. + OrdererGroupKey = "Orderer" + + // ApplicationGroupKey is the group name for the Application config. + ApplicationGroupKey = "Application" + + // ACLsKey is the name of the ACLs config. + ACLsKey = "ACLs" + + // AnchorPeersKey is the key name for the AnchorPeers ConfigValue. + AnchorPeersKey = "AnchorPeers" + + // ImplicitMetaPolicyType is the 'Type' string for implicit meta policies. + ImplicitMetaPolicyType = "ImplicitMeta" + + // SignaturePolicyType is the 'Type' string for signature policies. + SignaturePolicyType = "Signature" + + ordererAdminsPolicyName = "/Channel/Orderer/Admins" + + // OrdererAddressesKey is the key for the ConfigValue of OrdererAddresses. + OrdererAddressesKey = "OrdererAddresses" +) diff --git a/v2/configtx/example_test.go b/v2/configtx/example_test.go new file mode 100644 index 0000000..54edcc5 --- /dev/null +++ b/v2/configtx/example_test.go @@ -0,0 +1,1303 @@ +/* +Copyright IBM Corp All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx_test + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "log" + "math/big" + "testing" + "time" + + "github.com/hyperledger/fabric-config/v2/configtx" + "github.com/hyperledger/fabric-config/v2/configtx/membership" + "github.com/hyperledger/fabric-config/v2/configtx/orderer" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + ob "github.com/hyperledger/fabric-protos-go-apiv2/orderer" + pb "github.com/hyperledger/fabric-protos-go-apiv2/peer" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +const ( + // Arbitrary valid pem encoded x509 certificate from crypto/x509 tests. + // The contents of the certifcate don't matter, we just need a valid certificate + // to pass marshaling/unmarshalling. + dummyCert = `-----BEGIN CERTIFICATE----- +MIIDATCCAemgAwIBAgIRAKQkkrFx1T/dgB/Go/xBM5swDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHQWNtZSBDbzAeFw0xNjA4MTcyMDM2MDdaFw0xNzA4MTcyMDM2 +MDdaMBIxEDAOBgNVBAoTB0FjbWUgQ28wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDAoJtjG7M6InsWwIo+l3qq9u+g2rKFXNu9/mZ24XQ8XhV6PUR+5HQ4 +jUFWC58ExYhottqK5zQtKGkw5NuhjowFUgWB/VlNGAUBHtJcWR/062wYrHBYRxJH +qVXOpYKbIWwFKoXu3hcpg/CkdOlDWGKoZKBCwQwUBhWE7MDhpVdQ+ZljUJWL+FlK +yQK5iRsJd5TGJ6VUzLzdT4fmN2DzeK6GLeyMpVpU3sWV90JJbxWQ4YrzkKzYhMmB +EcpXTG2wm+ujiHU/k2p8zlf8Sm7VBM/scmnMFt0ynNXop4FWvJzEm1G0xD2t+e2I +5Utr04dOZPCgkm++QJgYhtZvgW7ZZiGTAgMBAAGjUjBQMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMBsGA1UdEQQUMBKC +EHRlc3QuZXhhbXBsZS5jb20wDQYJKoZIhvcNAQELBQADggEBADpqKQxrthH5InC7 +X96UP0OJCu/lLEMkrjoEWYIQaFl7uLPxKH5AmQPH4lYwF7u7gksR7owVG9QU9fs6 +1fK7II9CVgCd/4tZ0zm98FmU4D0lHGtPARrrzoZaqVZcAvRnFTlPX5pFkPhVjjai +/mkxX9LpD8oK1445DFHxK5UjLMmPIIWd8EOi+v5a+hgGwnJpoW7hntSl8kHMtTmy +fnnktsblSUV4lRCit0ymC7Ojhe+gzCCwkgs5kDzVVag+tnl/0e2DloIjASwOhpbH +KVcg7fBd484ht/sS+l0dsB4KDOSpd8JzVDMF8OZqlaydizoJO0yWr9GbCN1+OKq5 +EhLrEqU= +-----END CERTIFICATE----- +` + + // Arbitrary valid pem encoded ec private key. + // The contents of the private key don't matter, we just need a valid + // EC private key to pass marshaling/unmarshalling. + dummyPrivateKey = `-----BEGIN EC PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgDZUgDvKixfLi8cK8 +/TFLY97TDmQV3J2ygPpvuI8jSdihRANCAARRN3xgbPIR83dr27UuDaf2OJezpEJx +UC3v06+FD8MUNcRAboqt4akehaNNSh7MMZI+HdnsM4RXN2y8NePUQsPL +-----END EC PRIVATE KEY----- +` + + // Arbitrary valid pem encoded x509 crl. + // The contents of the CRL don't matter, we just need a valid + // CRL to pass marshaling/unmarshalling. + dummyCRL = `-----BEGIN X509 CRL----- +MIIBYDCBygIBATANBgkqhkiG9w0BAQUFADBDMRMwEQYKCZImiZPyLGQBGRYDY29t +MRcwFQYKCZImiZPyLGQBGRYHZXhhbXBsZTETMBEGA1UEAxMKRXhhbXBsZSBDQRcN +MDUwMjA1MTIwMDAwWhcNMDUwMjA2MTIwMDAwWjAiMCACARIXDTA0MTExOTE1NTcw +M1owDDAKBgNVHRUEAwoBAaAvMC0wHwYDVR0jBBgwFoAUCGivhTPIOUp6+IKTjnBq +SiCELDIwCgYDVR0UBAMCAQwwDQYJKoZIhvcNAQEFBQADgYEAItwYffcIzsx10NBq +m60Q9HYjtIFutW2+DvsVFGzIF20f7pAXom9g5L2qjFXejoRvkvifEBInr0rUL4Xi +NkR9qqNMJTgV/wD9Pn7uPSYS69jnK2LiK8NGgO94gtEVxtCccmrLznrtZ5mLbnCB +fUNCdMGmr8FVF6IzTNYGmCuk/C4= +-----END X509 CRL----- +` +) + +// This example shows the basic usage of the package: modifying, computing, and signing +// a config update. +func Example_basic() { + baseConfig := fetchSystemChannelConfig() + c := configtx.New(baseConfig) + + err := c.Consortium("SampleConsortium").SetChannelCreationPolicy(configtx.Policy{ + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }) + if err != nil { + panic(err) + } + + // Compute the delta + marshaledUpdate, err := c.ComputeMarshaledUpdate("testsyschannel") + if err != nil { + panic(err) + } + + // Collect the necessary signatures + // The example respresents a 2 peer 1 org channel, to meet the policies defined + // the transaction will be signed by both peers + configSignatures := []*cb.ConfigSignature{} + + peer1SigningIdentity := createSigningIdentity() + peer2SigningIdentity := createSigningIdentity() + + signingIdentities := []configtx.SigningIdentity{ + peer1SigningIdentity, + peer2SigningIdentity, + } + + for _, si := range signingIdentities { + // Create a signature for the config update with the specified signer identity + configSignature, err := si.CreateConfigSignature(marshaledUpdate) + if err != nil { + panic(err) + } + + configSignatures = append(configSignatures, configSignature) + } + + // Create the envelope with the list of config signatures + env, err := configtx.NewEnvelope(marshaledUpdate, configSignatures...) + if err != nil { + panic(err) + } + + // Sign the envelope with a signing identity + err = peer1SigningIdentity.SignEnvelope(env) + if err != nil { + panic(err) + } +} + +// This example updates an existing orderer configuration. +func ExampleOrdererGroup_SetConfiguration() { + baseConfig := fetchChannelConfig() + c := configtx.New(baseConfig) + o := c.Orderer() + + // Must retrieve the current orderer configuration from block and modify + // the desired values + oConfig, err := o.Configuration() + if err != nil { + panic(err) + } + + oConfig.Kafka.Brokers = []string{"kafka0:9092", "kafka1:9092", "kafka2:9092"} + oConfig.BatchSize.MaxMessageCount = 500 + + err = o.SetConfiguration(oConfig) + if err != nil { + panic(nil) + } +} + +// This example shows the addition and removal of ACLs. +func Example_aCLs() { + baseConfig := fetchChannelConfig() + c := configtx.New(baseConfig) + a := c.Application() + + acls := map[string]string{ + "peer/Propose": "/Channel/Application/Writers", + } + + err := a.SetACLs(acls) + if err != nil { + panic(err) + } + + aclsToDelete := []string{"event/Block"} + + err = a.RemoveACLs(aclsToDelete) + if err != nil { + panic(err) + } +} + +// This example shows the addition of an anchor peer and the removal of +// an existing anchor peer. +func Example_anchorPeers() { + baseConfig := fetchChannelConfig() + c := configtx.New(baseConfig) + applicationOrg1 := c.Application().Organization("Org1") + + newAnchorPeer := configtx.Address{ + Host: "127.0.0.2", + Port: 7051, + } + + // Add a new anchor peer + err := applicationOrg1.AddAnchorPeer(newAnchorPeer) + if err != nil { + panic(err) + } + + oldAnchorPeer := configtx.Address{ + Host: "127.0.0.1", + Port: 7051, + } + + // Remove an anchor peer + err = applicationOrg1.RemoveAnchorPeer(oldAnchorPeer) + if err != nil { + panic(err) + } +} + +// This example shows the addition and removal policies from different config +// groups. +func Example_policies() { + baseConfig := fetchChannelConfig() + c := configtx.New(baseConfig) + applicationOrg1 := c.Application().Organization("Org1") + + err := applicationOrg1.SetPolicy( + "TestPolicy", + configtx.Policy{ + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }) + if err != nil { + panic(err) + } + + err = applicationOrg1.RemovePolicy(configtx.WritersPolicyKey) + if err != nil { + panic(err) + } + + o := c.Orderer() + ordererOrg := o.Organization("OrdererOrg") + + err = ordererOrg.RemovePolicy(configtx.WritersPolicyKey) + if err != nil { + panic(err) + } + + err = ordererOrg.SetPolicy( + "TestPolicy", + configtx.Policy{ + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }) + if err != nil { + panic(err) + } + + err = o.RemovePolicy(configtx.WritersPolicyKey) + if err != nil { + panic(err) + } + + err = o.SetPolicy("TestPolicy", configtx.Policy{ + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }) + if err != nil { + panic(err) + } +} + +// This example shows the bulk replacement of multiple policies +// for different config groups. +func Example_policies2() { + baseConfig := fetchChannelConfig() + c := configtx.New(baseConfig) + + a := c.Application() + newAppPolicies := map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + configtx.WritersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + configtx.AdminsPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + configtx.EndorsementPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + configtx.LifecycleEndorsementPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + "TestPolicy1": { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + "TestPolicy2": { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + } + err := a.SetPolicies(newAppPolicies) + if err != nil { + panic(err) + } + + o := c.Orderer() + newOrdererPolicies := map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + configtx.WritersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + configtx.AdminsPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + configtx.BlockValidationPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + "TestPolicy1": { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + "TestPolicy2": { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Writers", + }, + } + err = o.SetPolicies(newOrdererPolicies) + if err != nil { + panic(err) + } +} + +// This example shows the addition of an orderer endpoint and the removal of +// an existing orderer endpoint. +func Example_ordererEndpoints() { + baseConfig := fetchChannelConfig() + c := configtx.New(baseConfig) + ordererOrg := c.Orderer().Organization("OrdererOrg") + + err := ordererOrg.SetEndpoint( + configtx.Address{Host: "127.0.0.3", Port: 8050}, + ) + if err != nil { + panic(err) + } + + err = ordererOrg.RemoveEndpoint( + configtx.Address{Host: "127.0.0.1", Port: 9050}, + ) + if err != nil { + panic(err) + } +} + +// This example shows the addition and removal of organizations from +// config groups. +func Example_organizations() { + baseConfig := fetchChannelConfig() + c := configtx.New(baseConfig) + a := c.Application() + + appOrg := configtx.Organization{ + Name: "Org2", + MSP: baseMSP(&testing.T{}), + Policies: map[string]configtx.Policy{ + configtx.AdminsPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + configtx.EndorsementPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + configtx.LifecycleEndorsementPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + configtx.ReadersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + configtx.WritersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + }, + AnchorPeers: []configtx.Address{ + { + Host: "127.0.0.1", + Port: 7051, + }, + }, + } + + err := a.SetOrganization(appOrg) + if err != nil { + panic(err) + } + + a.RemoveOrganization("Org2") + + o := c.Orderer() + + // Orderer Organization + ordererOrg := appOrg + ordererOrg.Name = "OrdererOrg2" + ordererOrg.AnchorPeers = nil + + err = o.SetOrganization(ordererOrg) + if err != nil { + panic(err) + } + + o.RemoveOrganization("OrdererOrg2") +} + +// This example shows updating the individual orderer configuration values. +func ExampleOrdererGroup_SetConfiguration_individual() { + baseConfig := fetchChannelConfig() + c := configtx.New(baseConfig) + o := c.Orderer() + + err := o.BatchSize().SetMaxMessageCount(500) + if err != nil { + panic(err) + } + + err = o.AddConsenter(orderer.Consenter{ + Address: orderer.EtcdAddress{Host: "host1", Port: 7050}, + ClientTLSCert: generateCert(), + ServerTLSCert: generateCert(), + }) + if err != nil { + panic(err) + } + + err = o.EtcdRaftOptions().SetElectionInterval(50) + if err != nil { + panic(err) + } +} + +func ExampleNewSystemChannelGenesisBlock() { + channel := configtx.Channel{ + Consortiums: []configtx.Consortium{ + { + Name: "Consortium1", + Organizations: []configtx.Organization{ + { + Name: "Org1MSP", + Policies: map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org1MSP.admin', 'Org1MSP.peer'," + + "'Org1MSP.client')", + }, + configtx.WritersPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org1MSP.admin', 'Org1MSP.client')", + }, + configtx.AdminsPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org1MSP.admin')", + }, + configtx.EndorsementPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org1MSP.peer')", + }, + }, + MSP: baseMSP(&testing.T{}), + }, + }, + }, + }, + Orderer: configtx.Orderer{ + Policies: map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + configtx.WritersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + configtx.AdminsPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + configtx.BlockValidationPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + }, + OrdererType: orderer.ConsensusTypeSolo, + Organizations: []configtx.Organization{ + { + Name: "OrdererMSP", + Policies: map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('OrdererMSP.member')", + }, + configtx.WritersPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('OrdererMSP.member')", + }, + configtx.AdminsPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('OrdererMSP.admin')", + }, + }, + OrdererEndpoints: []string{ + "localhost:123", + }, + MSP: baseMSP(&testing.T{}), + }, + }, + Capabilities: []string{"V1_3"}, + BatchSize: orderer.BatchSize{ + MaxMessageCount: 100, + AbsoluteMaxBytes: 100, + PreferredMaxBytes: 100, + }, + BatchTimeout: 2 * time.Second, + State: orderer.ConsensusStateNormal, + }, + Capabilities: []string{"V2_0"}, + Policies: map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + configtx.WritersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + configtx.AdminsPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + }, + Consortium: "", + } + + channelID := "testSystemChannel" + _, err := configtx.NewSystemChannelGenesisBlock(channel, channelID) + if err != nil { + panic(err) + } +} + +func ExampleNewApplicationChannelGenesisBlock() { + channel := configtx.Channel{ + Orderer: configtx.Orderer{ + OrdererType: orderer.ConsensusTypeEtcdRaft, + Organizations: []configtx.Organization{ + { + Name: "OrdererMSP", + Policies: map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('OrdererMSP.member')", + }, + configtx.WritersPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('OrdererMSP.member')", + }, + configtx.AdminsPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('OrdererMSP.admin')", + }, + }, + OrdererEndpoints: []string{ + "host1:7050", + }, + MSP: baseMSP(&testing.T{}), + }, + }, + EtcdRaft: orderer.EtcdRaft{ + Consenters: []orderer.Consenter{ + { + Address: orderer.EtcdAddress{ + Host: "host1", + Port: 7050, + }, + ClientTLSCert: generateCert(), + ServerTLSCert: generateCert(), + }, + }, + Options: orderer.EtcdRaftOptions{ + TickInterval: "500ms", + ElectionTick: 10, + HeartbeatTick: 1, + MaxInflightBlocks: 5, + SnapshotIntervalSize: 16 * 1024 * 1024, + }, + }, + Policies: map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + configtx.WritersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + configtx.AdminsPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + configtx.BlockValidationPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + }, + Capabilities: []string{"V2_0"}, + BatchSize: orderer.BatchSize{ + MaxMessageCount: 100, + AbsoluteMaxBytes: 100, + PreferredMaxBytes: 100, + }, + BatchTimeout: 2 * time.Second, + State: orderer.ConsensusStateNormal, + }, + Application: configtx.Application{ + Organizations: []configtx.Organization{ + { + Name: "Org1MSP", + Policies: map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org1MSP.admin', 'Org1MSP.peer'," + + "'Org1MSP.client')", + }, + configtx.WritersPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org1MSP.admin', 'Org1MSP.client')", + }, + configtx.AdminsPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org1MSP.admin')", + }, + configtx.EndorsementPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org1MSP.peer')", + }, + }, + MSP: baseMSP(&testing.T{}), + }, + { + Name: "Org2MSP", + Policies: map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org2MSP.admin', 'Org2MSP.peer'," + + "'Org2MSP.client')", + }, + configtx.WritersPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org2MSP.admin', 'Org2MSP.client')", + }, + configtx.AdminsPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org2MSP.admin')", + }, + configtx.EndorsementPolicyKey: { + Type: configtx.SignaturePolicyType, + Rule: "OR('Org2MSP.peer')", + }, + }, + MSP: baseMSP(&testing.T{}), + }, + }, + Capabilities: []string{"V2_0"}, + ACLs: map[string]string{ + "event/Block": "/Channel/Application/Readers", + }, + Policies: map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + configtx.WritersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + configtx.AdminsPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + configtx.EndorsementPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + configtx.LifecycleEndorsementPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + }, + }, + Capabilities: []string{"V2_0"}, + Policies: map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + configtx.WritersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + configtx.AdminsPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + }, + } + + channelID := "testchannel" + _, err := configtx.NewApplicationChannelGenesisBlock(channel, channelID) + if err != nil { + panic(err) + } +} + +func ExampleNewMarshaledCreateChannelTx() { + channel := configtx.Channel{ + Consortium: "SampleConsortium", + Application: configtx.Application{ + Organizations: []configtx.Organization{ + { + Name: "Org1", + }, + { + Name: "Org2", + }, + }, + Capabilities: []string{"V1_3"}, + ACLs: map[string]string{ + "event/Block": "/Channel/Application/Readers", + }, + Policies: map[string]configtx.Policy{ + configtx.ReadersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + configtx.WritersPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + configtx.AdminsPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + configtx.EndorsementPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + configtx.LifecycleEndorsementPolicyKey: { + Type: configtx.ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + }, + }, + } + + _, err := configtx.NewMarshaledCreateChannelTx(channel, "testchannel") + if err != nil { + panic(err) + } +} + +// This example shows the addition of a certificate to an application org's intermediate +// certificate list. +func ExampleApplicationOrg_SetMSP() { + baseConfig := fetchChannelConfig() + c := configtx.New(baseConfig) + applicationOrg1 := c.Application().Organization("Org1") + + msp, err := applicationOrg1.MSP().Configuration() + if err != nil { + panic(err) + } + + newIntermediateCert := &x509.Certificate{ + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + IsCA: true, + } + + msp.IntermediateCerts = append(msp.IntermediateCerts, newIntermediateCert) + + err = applicationOrg1.SetMSP(msp) + if err != nil { + panic(err) + } +} + +// This example shows the addition of a certificate to an orderer org's intermediate +// certificate list. +func ExampleOrdererOrg_SetMSP() { + baseConfig := fetchChannelConfig() + c := configtx.New(baseConfig) + ordererOrg := c.Orderer().Organization("OrdererOrg") + + msp, err := ordererOrg.MSP().Configuration() + if err != nil { + panic(err) + } + + newIntermediateCert := &x509.Certificate{ + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + IsCA: true, + } + + msp.IntermediateCerts = append(msp.IntermediateCerts, newIntermediateCert) + + err = ordererOrg.SetMSP(msp) + if err != nil { + panic(err) + } +} + +// This example shows the addition of a certificate to a consortium org's intermediate +// certificate list. +func ExampleConsortiumOrg_SetMSP() { + baseConfig := fetchSystemChannelConfig() + c := configtx.New(baseConfig) + + sampleConsortiumOrg1 := c.Consortium("SampleConsortium").Organization("Org1") + + msp, err := sampleConsortiumOrg1.MSP().Configuration() + if err != nil { + panic(err) + } + + newIntermediateCert := &x509.Certificate{ + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + IsCA: true, + } + + msp.IntermediateCerts = append(msp.IntermediateCerts, newIntermediateCert) + + err = sampleConsortiumOrg1.SetMSP(msp) + if err != nil { + panic(err) + } +} + +func ExampleOrdererGroup_RemoveLegacyKafkaBrokers() { + baseConfig := fetchChannelConfig() + c := configtx.New(baseConfig) + ordererConfig, err := c.Orderer().Configuration() + if err != nil { + panic(err) + } + ordererConfig.OrdererType = orderer.ConsensusTypeEtcdRaft + ordererConfig.EtcdRaft = orderer.EtcdRaft{ + Consenters: []orderer.Consenter{ + { + Address: orderer.EtcdAddress{ + Host: "host1", + Port: 7050, + }, + ClientTLSCert: generateCert(), + ServerTLSCert: generateCert(), + }, + }, + } + c.Orderer().RemoveLegacyKafkaBrokers() +} + +// fetchChannelConfig mocks retrieving the config transaction from the most recent configuration block. +func fetchChannelConfig() *cb.Config { + return &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + configtx.OrdererGroupKey: { + Version: 1, + Groups: map[string]*cb.ConfigGroup{ + "OrdererOrg": { + Groups: map[string]*cb.ConfigGroup{}, + Values: map[string]*cb.ConfigValue{ + configtx.EndpointsKey: { + ModPolicy: configtx.AdminsPolicyKey, + Value: marshalOrPanic(&cb.OrdererAddresses{ + Addresses: []string{"127.0.0.1:7050"}, + }), + }, + configtx.MSPKey: { + ModPolicy: configtx.AdminsPolicyKey, + Value: marshalOrPanic(&mb.MSPConfig{ + Config: []byte{}, + }), + }, + }, + Policies: map[string]*cb.ConfigPolicy{ + configtx.AdminsPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_MAJORITY, + SubPolicy: configtx.AdminsPolicyKey, + }), + }, + }, + configtx.ReadersPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_ANY, + SubPolicy: configtx.ReadersPolicyKey, + }), + }, + }, + configtx.WritersPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_ANY, + SubPolicy: configtx.WritersPolicyKey, + }), + }, + }, + }, + ModPolicy: configtx.AdminsPolicyKey, + }, + }, + Values: map[string]*cb.ConfigValue{ + orderer.ConsensusTypeKey: { + ModPolicy: configtx.AdminsPolicyKey, + Value: marshalOrPanic(&ob.ConsensusType{ + Type: orderer.ConsensusTypeKafka, + }), + }, + orderer.ChannelRestrictionsKey: { + ModPolicy: configtx.AdminsPolicyKey, + Value: marshalOrPanic(&ob.ChannelRestrictions{ + MaxCount: 1, + }), + }, + configtx.CapabilitiesKey: { + ModPolicy: configtx.AdminsPolicyKey, + Value: marshalOrPanic(&cb.Capabilities{ + Capabilities: map[string]*cb.Capability{ + "V1_3": {}, + }, + }), + }, + orderer.KafkaBrokersKey: { + ModPolicy: configtx.AdminsPolicyKey, + Value: marshalOrPanic(&ob.KafkaBrokers{ + Brokers: []string{"kafka0:9092", "kafka1:9092"}, + }), + }, + orderer.BatchTimeoutKey: { + Value: marshalOrPanic(&ob.BatchTimeout{ + Timeout: "15s", + }), + }, + orderer.BatchSizeKey: { + Value: marshalOrPanic(&ob.BatchSize{ + MaxMessageCount: 100, + AbsoluteMaxBytes: 100, + PreferredMaxBytes: 100, + }), + }, + }, + Policies: map[string]*cb.ConfigPolicy{ + configtx.AdminsPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_MAJORITY, + SubPolicy: configtx.AdminsPolicyKey, + }), + }, + }, + configtx.ReadersPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_ANY, + SubPolicy: configtx.ReadersPolicyKey, + }), + }, + }, + configtx.WritersPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_ANY, + SubPolicy: configtx.WritersPolicyKey, + }), + }, + }, + configtx.BlockValidationPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_ANY, + SubPolicy: configtx.WritersPolicyKey, + }), + }, + }, + }, + }, + configtx.ApplicationGroupKey: { + Groups: map[string]*cb.ConfigGroup{ + "Org1": { + Groups: map[string]*cb.ConfigGroup{}, + Values: map[string]*cb.ConfigValue{ + configtx.AnchorPeersKey: { + ModPolicy: configtx.AdminsPolicyKey, + Value: marshalOrPanic(&pb.AnchorPeers{ + AnchorPeers: []*pb.AnchorPeer{ + {Host: "127.0.0.1", Port: 7050}, + }, + }), + }, + configtx.MSPKey: { + ModPolicy: configtx.AdminsPolicyKey, + Value: marshalOrPanic(&mb.MSPConfig{ + Config: []byte{}, + }), + }, + }, + }, + }, + Values: map[string]*cb.ConfigValue{ + configtx.ACLsKey: { + ModPolicy: configtx.AdminsPolicyKey, + Value: marshalOrPanic(&pb.ACLs{ + Acls: map[string]*pb.APIResource{ + "event/block": {PolicyRef: "/Channel/Application/Readers"}, + }, + }), + }, + configtx.CapabilitiesKey: { + ModPolicy: configtx.AdminsPolicyKey, + Value: marshalOrPanic(&cb.Capabilities{ + Capabilities: map[string]*cb.Capability{ + "V1_3": {}, + }, + }), + }, + }, + Policies: map[string]*cb.ConfigPolicy{ + configtx.LifecycleEndorsementPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_MAJORITY, + SubPolicy: configtx.AdminsPolicyKey, + }), + }, + }, + configtx.AdminsPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_MAJORITY, + SubPolicy: configtx.AdminsPolicyKey, + }), + }, + }, + configtx.ReadersPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_ANY, + SubPolicy: configtx.ReadersPolicyKey, + }), + }, + }, + configtx.WritersPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_ANY, + SubPolicy: configtx.WritersPolicyKey, + }), + }, + }, + }, + }, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{ + configtx.AdminsPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_MAJORITY, + SubPolicy: configtx.AdminsPolicyKey, + }), + }, + }, + configtx.ReadersPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_ANY, + SubPolicy: configtx.ReadersPolicyKey, + }), + }, + }, + configtx.WritersPolicyKey: { + ModPolicy: configtx.AdminsPolicyKey, + Policy: &cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_ANY, + SubPolicy: configtx.WritersPolicyKey, + }), + }, + }, + }, + }, + } +} + +// marshalOrPanic is a helper for proto marshal. +func marshalOrPanic(pb proto.Message) []byte { + data, err := proto.Marshal(pb) + if err != nil { + panic(err) + } + + return data +} + +// createSigningIdentity returns a identity that can be used for signing transactions. +// Signing identity can be retrieved from MSP configuration for each peer. +func createSigningIdentity() configtx.SigningIdentity { + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + panic(fmt.Sprintf("Failed to generate private key: %v", err)) + } + + return configtx.SigningIdentity{ + Certificate: generateCert(), + PrivateKey: privKey, + MSPID: "Org1MSP", + } +} + +// generateCert creates a certificate for the SigningIdentity. +func generateCert() *x509.Certificate { + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + log.Fatalf("Failed to generate serial number: %s", err) + } + + return &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: "Wile E. Coyote", + Organization: []string{"Acme Co"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } +} + +func fetchSystemChannelConfig() *cb.Config { + return &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + configtx.ConsortiumsGroupKey: { + Groups: map[string]*cb.ConfigGroup{ + "SampleConsortium": { + Groups: map[string]*cb.ConfigGroup{ + "Org1": { + Groups: map[string]*cb.ConfigGroup{}, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + ModPolicy: "Admins", + Version: 0, + }, + "Org2": { + Groups: map[string]*cb.ConfigGroup{}, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + ModPolicy: "Admins", + Version: 0, + }, + }, + Values: map[string]*cb.ConfigValue{ + configtx.ChannelCreationPolicyKey: { + ModPolicy: "/Channel/Orderer/Admins", + Value: marshalOrPanic(&cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_ANY, + SubPolicy: configtx.AdminsPolicyKey, + }), + }), + }, + }, + }, + "SampleConsortium2": { + Groups: map[string]*cb.ConfigGroup{}, + Values: map[string]*cb.ConfigValue{ + configtx.ChannelCreationPolicyKey: { + ModPolicy: "/Channel/Orderer/Admins", + Value: marshalOrPanic(&cb.Policy{ + Type: 3, + Value: marshalOrPanic(&cb.ImplicitMetaPolicy{ + Rule: cb.ImplicitMetaPolicy_ANY, + SubPolicy: configtx.AdminsPolicyKey, + }), + }), + }, + }, + }, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + }, + }, + } +} + +// baseMSP creates a basic MSP struct for organization. +func baseMSP(t *testing.T) configtx.MSP { + gt := NewGomegaWithT(t) + + certBlock, _ := pem.Decode([]byte(dummyCert)) + gt.Expect(certBlock).NotTo(BeNil()) + cert, err := x509.ParseCertificate(certBlock.Bytes) + gt.Expect(err).NotTo(HaveOccurred()) + + crlBlock, _ := pem.Decode([]byte(dummyCRL)) + gt.Expect(crlBlock).NotTo(BeNil()) + crl, err := x509.ParseCRL(crlBlock.Bytes) + gt.Expect(err).NotTo(HaveOccurred()) + + return configtx.MSP{ + Name: "MSPID", + RootCerts: []*x509.Certificate{cert}, + IntermediateCerts: []*x509.Certificate{cert}, + Admins: []*x509.Certificate{cert}, + RevocationList: []*pkix.CertificateList{crl}, + OrganizationalUnitIdentifiers: []membership.OUIdentifier{ + { + Certificate: cert, + OrganizationalUnitIdentifier: "OUID", + }, + }, + CryptoConfig: membership.CryptoConfig{ + SignatureHashFamily: "SHA3", + IdentityIdentifierHashFunction: "SHA256", + }, + TLSRootCerts: []*x509.Certificate{cert}, + TLSIntermediateCerts: []*x509.Certificate{cert}, + NodeOUs: membership.NodeOUs{ + ClientOUIdentifier: membership.OUIdentifier{ + Certificate: cert, + OrganizationalUnitIdentifier: "OUID", + }, + PeerOUIdentifier: membership.OUIdentifier{ + Certificate: cert, + OrganizationalUnitIdentifier: "OUID", + }, + AdminOUIdentifier: membership.OUIdentifier{ + Certificate: cert, + OrganizationalUnitIdentifier: "OUID", + }, + OrdererOUIdentifier: membership.OUIdentifier{ + Certificate: cert, + OrganizationalUnitIdentifier: "OUID", + }, + }, + } +} diff --git a/v2/configtx/internal/policydsl/policyparser.go b/v2/configtx/internal/policydsl/policyparser.go new file mode 100644 index 0000000..a22c09a --- /dev/null +++ b/v2/configtx/internal/policydsl/policyparser.go @@ -0,0 +1,384 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package policydsl + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/Knetic/govaluate" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "google.golang.org/protobuf/proto" +) + +// Gate values +const ( + GateAnd = "And" + GateOr = "Or" + GateOutOf = "OutOf" +) + +// Role values for principals +const ( + RoleAdmin = "admin" + RoleMember = "member" + RoleClient = "client" + RolePeer = "peer" + RoleOrderer = "orderer" +) + +var ( + regex = regexp.MustCompile( + fmt.Sprintf("^([[:alnum:].-]+)([.])(%s|%s|%s|%s|%s)$", + RoleAdmin, RoleMember, RoleClient, RolePeer, RoleOrderer), + ) + regexErr = regexp.MustCompile("^No parameter '([^']+)' found[.]$") +) + +// SignedBy creates a SignaturePolicy requiring a given signer's signature +func SignedBy(index int32) *cb.SignaturePolicy { + return &cb.SignaturePolicy{ + Type: &cb.SignaturePolicy_SignedBy{ + SignedBy: index, + }, + } +} + +// And is a convenience method which utilizes NOutOf to produce And equivalent behavior +func And(lhs, rhs *cb.SignaturePolicy) *cb.SignaturePolicy { + return NOutOf(2, []*cb.SignaturePolicy{lhs, rhs}) +} + +// Or is a convenience method which utilizes NOutOf to produce Or equivalent behavior +func Or(lhs, rhs *cb.SignaturePolicy) *cb.SignaturePolicy { + return NOutOf(1, []*cb.SignaturePolicy{lhs, rhs}) +} + +// NOutOf creates a policy which requires N out of the slice of policies to evaluate to true +func NOutOf(n int32, policies []*cb.SignaturePolicy) *cb.SignaturePolicy { + return &cb.SignaturePolicy{ + Type: &cb.SignaturePolicy_NOutOf_{ + NOutOf: &cb.SignaturePolicy_NOutOf{ + N: n, + Rules: policies, + }, + }, + } +} + +// a stub function - it returns the same string as it's passed. +// This will be evaluated by second/third passes to convert to a proto policy +func outof(args ...interface{}) (interface{}, error) { + toret := "outof(" + + if len(args) < 2 { + return nil, fmt.Errorf("expected at least two arguments to NOutOf. Given %d", len(args)) + } + + arg0 := args[0] + // govaluate treats all numbers as float64 only. But and/or may pass int/string. Allowing int/string for flexibility of caller + if n, ok := arg0.(float64); ok { + toret += strconv.Itoa(int(n)) + } else if n, ok := arg0.(int); ok { + toret += strconv.Itoa(n) + } else if n, ok := arg0.(string); ok { + toret += n + } else { + return nil, fmt.Errorf("unexpected type %s", reflect.TypeOf(arg0)) + } + + for _, arg := range args[1:] { + toret += ", " + + switch t := arg.(type) { + case string: + if regex.MatchString(t) { + toret += "'" + t + "'" + } else { + toret += t + } + default: + return nil, fmt.Errorf("unexpected type %s", reflect.TypeOf(arg)) + } + } + + return toret + ")", nil +} + +func and(args ...interface{}) (interface{}, error) { + args = append([]interface{}{len(args)}, args...) + return outof(args...) +} + +func or(args ...interface{}) (interface{}, error) { + args = append([]interface{}{1}, args...) + return outof(args...) +} + +func firstPass(args ...interface{}) (interface{}, error) { + toret := "outof(ID" + for _, arg := range args { + toret += ", " + + switch t := arg.(type) { + case string: + if regex.MatchString(t) { + toret += "'" + t + "'" + } else { + toret += t + } + case float32: + case float64: + toret += strconv.Itoa(int(t)) + default: + return nil, fmt.Errorf("unexpected type %s", reflect.TypeOf(arg)) + } + } + + return toret + ")", nil +} + +func secondPass(args ...interface{}) (interface{}, error) { + /* general sanity check, we expect at least 3 args */ + if len(args) < 3 { + return nil, fmt.Errorf("at least 3 arguments expected, got %d", len(args)) + } + + /* get the first argument, we expect it to be the context */ + var ctx *context + switch v := args[0].(type) { + case *context: + ctx = v + default: + return nil, fmt.Errorf("unrecognized type, expected the context, got %s", reflect.TypeOf(args[0])) + } + + /* get the second argument, we expect an integer telling us + how many of the remaining we expect to have*/ + var t int + switch arg := args[1].(type) { + case float64: + t = int(arg) + default: + return nil, fmt.Errorf("unrecognized type, expected a number, got %s", reflect.TypeOf(args[1])) + } + + /* get the n in the t out of n */ + n := len(args) - 2 + + /* sanity check - t should be positive, permit equal to n+1, but disallow over n+1 */ + if t < 0 || t > n+1 { + return nil, fmt.Errorf("invalid t-out-of-n predicate, t %d, n %d", t, n) + } + + policies := make([]*cb.SignaturePolicy, 0) + + /* handle the rest of the arguments */ + for _, principal := range args[2:] { + switch t := principal.(type) { + /* if it's a string, we expect it to be formed as + . , where MSP_ID is the MSP identifier + and ROLE is either a member, an admin, a client, a peer or an orderer*/ + case string: + /* split the string */ + subm := regex.FindAllStringSubmatch(t, -1) + if subm == nil || len(subm) != 1 || len(subm[0]) != 4 { + return nil, fmt.Errorf("error parsing principal %s", t) + } + + /* get the right role */ + var r mb.MSPRole_MSPRoleType + + switch subm[0][3] { + case RoleMember: + r = mb.MSPRole_MEMBER + case RoleAdmin: + r = mb.MSPRole_ADMIN + case RoleClient: + r = mb.MSPRole_CLIENT + case RolePeer: + r = mb.MSPRole_PEER + case RoleOrderer: + r = mb.MSPRole_ORDERER + default: + return nil, fmt.Errorf("error parsing role %s", t) + } + + /* build the principal we've been told */ + mspRole, err := proto.Marshal(&mb.MSPRole{MspIdentifier: subm[0][1], Role: r}) + if err != nil { + return nil, fmt.Errorf("error marshalling msp role: %s", err) + } + + p := &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: mspRole, + } + ctx.principals = append(ctx.principals, p) + + /* create a SignaturePolicy that requires a signature from + the principal we've just built*/ + dapolicy := SignedBy(int32(ctx.IDNum)) + policies = append(policies, dapolicy) + + /* increment the identity counter. Note that this is + suboptimal as we are not reusing identities. We + can deduplicate them easily and make this puppy + smaller. For now it's fine though */ + // TODO: deduplicate principals + ctx.IDNum++ + + /* if we've already got a policy we're good, just append it */ + case *cb.SignaturePolicy: + policies = append(policies, t) + + default: + return nil, fmt.Errorf("unrecognized type, expected a principal or a policy, got %s", reflect.TypeOf(principal)) + } + } + + return NOutOf(int32(t), policies), nil +} + +type context struct { + IDNum int + principals []*mb.MSPPrincipal +} + +func newContext() *context { + return &context{IDNum: 0, principals: make([]*mb.MSPPrincipal, 0)} +} + +// FromString takes a string representation of the policy, +// parses it and returns a SignaturePolicyEnvelope that +// implements that policy. The supported language is as follows: +// +// GATE(P[, P]) +// +// where: +// - GATE is either "and" or "or" +// - P is either a principal or another nested call to GATE +// +// A principal is defined as: +// +// # ORG.ROLE +// +// where: +// - ORG is a string (representing the MSP identifier) +// - ROLE takes the value of any of the RoleXXX constants representing +// the required role +func FromString(policy string) (*cb.SignaturePolicyEnvelope, error) { + // first we translate the and/or business into outof gates + intermediate, err := govaluate.NewEvaluableExpressionWithFunctions( + policy, map[string]govaluate.ExpressionFunction{ + GateAnd: and, + strings.ToLower(GateAnd): and, + strings.ToUpper(GateAnd): and, + GateOr: or, + strings.ToLower(GateOr): or, + strings.ToUpper(GateOr): or, + GateOutOf: outof, + strings.ToLower(GateOutOf): outof, + strings.ToUpper(GateOutOf): outof, + }, + ) + if err != nil { + return nil, err + } + + intermediateRes, err := intermediate.Evaluate(map[string]interface{}{}) + if err != nil { + // attempt to produce a meaningful error + if regexErr.MatchString(err.Error()) { + sm := regexErr.FindStringSubmatch(err.Error()) + if len(sm) == 2 { + return nil, fmt.Errorf("unrecognized token '%s' in policy string", sm[1]) + } + } + + return nil, err + } + + resStr, ok := intermediateRes.(string) + if !ok { + return nil, fmt.Errorf("invalid policy string '%s'", policy) + } + + // we still need two passes. The first pass just adds an extra + // argument ID to each of the outof calls. This is + // required because govaluate has no means of giving context + // to user-implemented functions other than via arguments. + // We need this argument because we need a global place where + // we put the identities that the policy requires + exp, err := govaluate.NewEvaluableExpressionWithFunctions( + resStr, + map[string]govaluate.ExpressionFunction{"outof": firstPass}, + ) + if err != nil { + return nil, err + } + + res, err := exp.Evaluate(map[string]interface{}{}) + if err != nil { + // attempt to produce a meaningful error + if regexErr.MatchString(err.Error()) { + sm := regexErr.FindStringSubmatch(err.Error()) + if len(sm) == 2 { + return nil, fmt.Errorf("unrecognized token '%s' in policy string", sm[1]) + } + } + + return nil, err + } + + resStr, ok = res.(string) + if !ok { + return nil, fmt.Errorf("invalid policy string '%s'", policy) + } + + ctx := newContext() + parameters := make(map[string]interface{}, 1) + parameters["ID"] = ctx + + exp, err = govaluate.NewEvaluableExpressionWithFunctions( + resStr, + map[string]govaluate.ExpressionFunction{"outof": secondPass}, + ) + if err != nil { + return nil, err + } + + res, err = exp.Evaluate(parameters) + if err != nil { + // attempt to produce a meaningful error + if regexErr.MatchString(err.Error()) { + sm := regexErr.FindStringSubmatch(err.Error()) + if len(sm) == 2 { + return nil, fmt.Errorf("unrecognized token '%s' in policy string", sm[1]) + } + } + + return nil, err + } + + rule, ok := res.(*cb.SignaturePolicy) + if !ok { + return nil, fmt.Errorf("invalid policy string '%s'", policy) + } + + p := &cb.SignaturePolicyEnvelope{ + Identities: ctx.principals, + Version: 0, + Rule: rule, + } + + return p, nil +} diff --git a/v2/configtx/internal/policydsl/policyparser_test.go b/v2/configtx/internal/policydsl/policyparser_test.go new file mode 100644 index 0000000..a788b30 --- /dev/null +++ b/v2/configtx/internal/policydsl/policyparser_test.go @@ -0,0 +1,487 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package policydsl_test + +import ( + "testing" + + "github.com/hyperledger/fabric-config/v2/configtx/internal/policydsl" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "google.golang.org/protobuf/proto" + + . "github.com/onsi/gomega" +) + +func TestOutOf1(t *testing.T) { + gt := NewGomegaWithT(t) + + p1, err := policydsl.FromString("OutOf(1, 'A.member', 'B.member')") + gt.Expect(err).NotTo(HaveOccurred()) + + principals := make([]*mb.MSPPrincipal, 0) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "A"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "B"}), + }) + + p2 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.NOutOf(1, []*cb.SignaturePolicy{policydsl.SignedBy(0), policydsl.SignedBy(1)}), + Identities: principals, + } + + gt.Expect(p1).To(Equal(p2)) +} + +func TestOutOf2(t *testing.T) { + gt := NewGomegaWithT(t) + + p1, err := policydsl.FromString("OutOf(2, 'A.member', 'B.member')") + gt.Expect(err).NotTo(HaveOccurred()) + + principals := make([]*mb.MSPPrincipal, 0) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "A"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "B"}), + }) + + p2 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.NOutOf(2, []*cb.SignaturePolicy{policydsl.SignedBy(0), policydsl.SignedBy(1)}), + Identities: principals, + } + + gt.Expect(p1).To(Equal(p2)) +} + +func TestAnd(t *testing.T) { + gt := NewGomegaWithT(t) + + p1, err := policydsl.FromString("AND('A.member', 'B.member')") + gt.Expect(err).NotTo(HaveOccurred()) + + principals := make([]*mb.MSPPrincipal, 0) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "A"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "B"}), + }) + + p2 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.And(policydsl.SignedBy(0), policydsl.SignedBy(1)), + Identities: principals, + } + + gt.Expect(p1).To(Equal(p2)) +} + +func TestAndClientPeerOrderer(t *testing.T) { + gt := NewGomegaWithT(t) + + p1, err := policydsl.FromString("AND('A.client', 'B.peer')") + gt.Expect(err).NotTo(HaveOccurred()) + + principals := make([]*mb.MSPPrincipal, 0) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_CLIENT, MspIdentifier: "A"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_PEER, MspIdentifier: "B"}), + }) + + p2 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.And(policydsl.SignedBy(0), policydsl.SignedBy(1)), + Identities: principals, + } + + gt.Expect(p1).To(Equal(p2)) +} + +func TestOr(t *testing.T) { + gt := NewGomegaWithT(t) + + p1, err := policydsl.FromString("OR('A.member', 'B.member')") + gt.Expect(err).NotTo(HaveOccurred()) + + principals := make([]*mb.MSPPrincipal, 0) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "A"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "B"}), + }) + + p2 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.Or(policydsl.SignedBy(0), policydsl.SignedBy(1)), + Identities: principals, + } + + gt.Expect(p1).To(Equal(p2)) +} + +func TestComplex1(t *testing.T) { + gt := NewGomegaWithT(t) + + p1, err := policydsl.FromString("OR('A.member', AND('B.member', 'C.member'))") + gt.Expect(err).NotTo(HaveOccurred()) + + principals := make([]*mb.MSPPrincipal, 0) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "B"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "C"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "A"}), + }) + + p2 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.Or(policydsl.SignedBy(2), policydsl.And(policydsl.SignedBy(0), policydsl.SignedBy(1))), + Identities: principals, + } + + gt.Expect(p1).To(Equal(p2)) +} + +func TestComplex2(t *testing.T) { + gt := NewGomegaWithT(t) + + p1, err := policydsl.FromString("OR(AND('A.member', 'B.member'), OR('C.admin', 'D.member'))") + gt.Expect(err).NotTo(HaveOccurred()) + + principals := make([]*mb.MSPPrincipal, 0) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "A"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "B"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_ADMIN, MspIdentifier: "C"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "D"}), + }) + + p2 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.Or(policydsl.And(policydsl.SignedBy(0), policydsl.SignedBy(1)), policydsl.Or(policydsl.SignedBy(2), policydsl.SignedBy(3))), + Identities: principals, + } + + gt.Expect(p1).To(Equal(p2)) +} + +func TestMSPIDWIthSpecialChars(t *testing.T) { + gt := NewGomegaWithT(t) + + p1, err := policydsl.FromString("OR('MSP.member', 'MSP.WITH.DOTS.member', 'MSP-WITH-DASHES.member')") + gt.Expect(err).NotTo(HaveOccurred()) + + principals := make([]*mb.MSPPrincipal, 0) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "MSP"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "MSP.WITH.DOTS"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "MSP-WITH-DASHES"}), + }) + + p2 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.NOutOf(1, []*cb.SignaturePolicy{policydsl.SignedBy(0), policydsl.SignedBy(1), policydsl.SignedBy(2)}), + Identities: principals, + } + + gt.Expect(p1).To(Equal(p2)) +} + +func TestBadStringsNoPanic(t *testing.T) { + gt := NewGomegaWithT(t) + + _, err := policydsl.FromString("OR('A.member', Bmember)") // error after 1st Evaluate() + gt.Expect(err).To(MatchError("unrecognized token 'Bmember' in policy string")) + + _, err = policydsl.FromString("OR('A.member', 'Bmember')") // error after 2nd Evalute() + gt.Expect(err).To(MatchError("unrecognized token 'Bmember' in policy string")) + + _, err = policydsl.FromString(`OR('A.member', '\'Bmember\'')`) // error after 3rd Evalute() + gt.Expect(err).To(MatchError("unrecognized token 'Bmember' in policy string")) +} + +func TestNodeOUs(t *testing.T) { + gt := NewGomegaWithT(t) + + p1, err := policydsl.FromString("OR('A.peer', 'B.admin', 'C.orderer', 'D.client')") + gt.Expect(err).NotTo(HaveOccurred()) + + principals := make([]*mb.MSPPrincipal, 0) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_PEER, MspIdentifier: "A"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_ADMIN, MspIdentifier: "B"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_ORDERER, MspIdentifier: "C"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_CLIENT, MspIdentifier: "D"}), + }) + + p2 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.NOutOf(1, []*cb.SignaturePolicy{policydsl.SignedBy(0), policydsl.SignedBy(1), policydsl.SignedBy(2), policydsl.SignedBy(3)}), + Identities: principals, + } + + gt.Expect(p1).To(Equal(p2)) +} + +func TestOutOfNumIsString(t *testing.T) { + gt := NewGomegaWithT(t) + + p1, err := policydsl.FromString("OutOf('1', 'A.member', 'B.member')") + gt.Expect(err).NotTo(HaveOccurred()) + + principals := make([]*mb.MSPPrincipal, 0) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "A"}), + }) + + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "B"}), + }) + + p2 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.NOutOf(1, []*cb.SignaturePolicy{policydsl.SignedBy(0), policydsl.SignedBy(1)}), + Identities: principals, + } + + gt.Expect(p1).To(Equal(p2)) +} + +func TestOutOfErrorCase(t *testing.T) { + tests := []struct { + testName string + policyString string + expectedErr string + }{ + { + testName: "1st NewEvaluableExpressionWithFunctions() returns an error", + policyString: "", + expectedErr: "Unexpected end of expression", + }, + { + testName: "outof() if len(args)<2", + policyString: "OutOf(1)", + expectedErr: "expected at least two arguments to NOutOf. Given 1", + }, + { + testName: "outof() }else{. 1st arg is non of float, int, string", + policyString: "OutOf(true, 'A.member')", + expectedErr: "unexpected type bool", + }, + { + testName: "oufof() switch default. 2nd arg is not string.", + policyString: "OutOf(1, 2)", + expectedErr: "unexpected type float64", + }, + { + testName: "firstPass() switch default", + policyString: "OutOf(1, 'true')", + expectedErr: "unexpected type bool", + }, + { + testName: "secondPass() switch args[1].(type) default", + policyString: `OutOf('\'\\\'A\\\'\'', 'B.member')`, + expectedErr: "unrecognized type, expected a number, got string", + }, + { + testName: "secondPass() switch args[1].(type) default", + policyString: `OutOf(1, '\'1\'')`, + expectedErr: "unrecognized type, expected a principal or a policy, got float64", + }, + { + testName: "2nd NewEvaluateExpressionWithFunction() returns an error", + policyString: `''`, + expectedErr: "Unexpected end of expression", + }, + { + testName: "3rd NewEvaluateExpressionWithFunction() returns an error", + policyString: `'\'\''`, + expectedErr: "Unexpected end of expression", + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + gt := NewGomegaWithT(t) + + p, err := policydsl.FromString(tt.policyString) + gt.Expect(p).To(BeNil()) + gt.Expect(err).To(MatchError(tt.expectedErr)) + }) + } +} + +func TestBadStringBeforeFAB11404_ThisCanDeleteAfterFAB11404HasMerged(t *testing.T) { + tests := []struct { + testName string + policyString string + expectedErr string + }{ + { + testName: "integer in string", + policyString: "1", + expectedErr: `invalid policy string '1'`, + }, + { + testName: "quoted integer in string", + policyString: "'1'", + expectedErr: `invalid policy string ''1''`, + }, + { + testName: "nested quoted integer in string", + policyString: `'\'1\''`, + expectedErr: `invalid policy string ''\'1\'''`, + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + gt := NewGomegaWithT(t) + + p, err := policydsl.FromString(tt.policyString) + gt.Expect(p).To(BeNil()) + gt.Expect(err).To(MatchError(tt.expectedErr)) + }) + } +} + +func TestSecondPassBoundaryCheck(t *testing.T) { + gt := NewGomegaWithT(t) + + // Check lower boundary + // Prohibit t<0 + p0, err0 := policydsl.FromString("OutOf(-1, 'A.member', 'B.member')") + gt.Expect(p0).To(BeNil()) + gt.Expect(err0).To(MatchError("invalid t-out-of-n predicate, t -1, n 2")) + + // Permit t==0 : always satisfied policy + // There is no clear usecase of t=0, but somebody may already use it, so we don't treat as an error. + p1, err1 := policydsl.FromString("OutOf(0, 'A.member', 'B.member')") + gt.Expect(err1).NotTo(HaveOccurred()) + principals := make([]*mb.MSPPrincipal, 0) + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "A"}), + }) + principals = append(principals, &mb.MSPPrincipal{ + PrincipalClassification: mb.MSPPrincipal_ROLE, + Principal: protoMarshalOrPanic(&mb.MSPRole{Role: mb.MSPRole_MEMBER, MspIdentifier: "B"}), + }) + expected1 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.NOutOf(0, []*cb.SignaturePolicy{policydsl.SignedBy(0), policydsl.SignedBy(1)}), + Identities: principals, + } + gt.Expect(p1).To(Equal(expected1)) + + // Check upper boundary + // Permit t==n+1 : never satisfied policy + // Usecase: To create immutable ledger key + p2, err2 := policydsl.FromString("OutOf(3, 'A.member', 'B.member')") + gt.Expect(err2).NotTo(HaveOccurred()) + expected2 := &cb.SignaturePolicyEnvelope{ + Version: 0, + Rule: policydsl.NOutOf(3, []*cb.SignaturePolicy{policydsl.SignedBy(0), policydsl.SignedBy(1)}), + Identities: principals, + } + gt.Expect(p2).To(Equal(expected2)) + + // Prohibit t>n + 1 + p3, err3 := policydsl.FromString("OutOf(4, 'A.member', 'B.member')") + gt.Expect(p3).To(BeNil()) + gt.Expect(err3).To(MatchError("invalid t-out-of-n predicate, t 4, n 2")) +} + +// protoMarshalOrPanic serializes a protobuf message and panics if this +// operation fails +func protoMarshalOrPanic(pb proto.Message) []byte { + data, err := proto.Marshal(pb) + if err != nil { + panic(err) + } + + return data +} diff --git a/v2/configtx/membership/membership.go b/v2/configtx/membership/membership.go new file mode 100644 index 0000000..3883e6d --- /dev/null +++ b/v2/configtx/membership/membership.go @@ -0,0 +1,86 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package membership + +import ( + "crypto" + "crypto/x509" +) + +// KeyInfo represents a (secret) key that is either already stored +// in the bccsp/keystore or key material to be imported to the +// bccsp key-store. In later versions it may contain also a +// keystore identifier. +type KeyInfo struct { + // Identifier of the key inside the default keystore; this for + // the case of Software BCCSP as well as the HSM BCCSP would be + // the SKI of the key. + KeyIdentifier string + // KeyMaterial (optional) for the key to be imported; this + // must be a supported PKCS#8 private key type of either + // *rsa.PrivateKey, *ecdsa.PrivateKey, or ed25519.PrivateKey. + KeyMaterial crypto.PrivateKey +} + +// SigningIdentityInfo represents the configuration information +// related to the signing identity the peer is to use for generating +// endorsements. +type SigningIdentityInfo struct { + // PublicSigner carries the public information of the signing + // identity. For an X.509 provider this would be represented by + // an X.509 certificate. + PublicSigner *x509.Certificate + // PrivateSigner denotes a reference to the private key of the + // peer's signing identity. + PrivateSigner KeyInfo +} + +// CryptoConfig contains configuration parameters +// for the cryptographic algorithms used by the MSP +// this configuration refers to. +type CryptoConfig struct { + // SignatureHashFamily is a string representing the hash family to be used + // during sign and verify operations. + // Allowed values are "SHA2" and "SHA3". + SignatureHashFamily string + // IdentityIdentifierHashFunction is a string representing the hash function + // to be used during the computation of the identity identifier of an MSP identity. + // Allowed values are "SHA256", "SHA384" and "SHA3_256", "SHA3_384". + IdentityIdentifierHashFunction string +} + +// OUIdentifier represents an organizational unit and +// its related chain of trust identifier. +type OUIdentifier struct { + // Certificate represents the second certificate in a certification chain. + // (Notice that the first certificate in a certification chain is supposed + // to be the certificate of an identity). + // It must correspond to the certificate of root or intermediate CA + // recognized by the MSP this message belongs to. + // Starting from this certificate, a certification chain is computed + // and bound to the OrganizationUnitIdentifier specified. + Certificate *x509.Certificate + // OrganizationUnitIdentifier defines the organizational unit under the + // MSP identified with MSPIdentifier. + OrganizationalUnitIdentifier string +} + +// NodeOUs contains configuration to tell apart clients from peers from orderers +// based on OUs. If NodeOUs recognition is enabled then an msp identity +// that does not contain any of the specified OU will be considered invalid. +type NodeOUs struct { + // If true then an msp identity that does not contain any of the specified OU will be considered invalid. + Enable bool + // OU Identifier of the clients. + ClientOUIdentifier OUIdentifier + // OU Identifier of the peers. + PeerOUIdentifier OUIdentifier + // OU Identifier of the admins. + AdminOUIdentifier OUIdentifier + // OU Identifier of the orderers. + OrdererOUIdentifier OUIdentifier +} diff --git a/v2/configtx/msp.go b/v2/configtx/msp.go new file mode 100644 index 0000000..6401c82 --- /dev/null +++ b/v2/configtx/msp.go @@ -0,0 +1,947 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "fmt" + "reflect" + "time" + + "github.com/hyperledger/fabric-config/v2/configtx/membership" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "google.golang.org/protobuf/proto" +) + +// MSP is the configuration information for a Fabric MSP. +// Here we assume a default certificate validation policy, where +// any certificate signed by any of the listed rootCA certs would +// be considered as valid under this MSP. +// This MSP may or may not come with a signing identity. If it does, +// it can also issue signing identities. If it does not, it can only +// be used to validate and verify certificates. +type MSP struct { + // Name holds the identifier of the MSP; MSP identifier + // is chosen by the application that governs this MSP. + // For example, and assuming the default implementation of MSP, + // that is X.509-based and considers a single Issuer, + // this can refer to the Subject OU field or the Issuer OU field. + Name string + // List of root certificates trusted by this MSP + // they are used upon certificate validation (see + // comment for IntermediateCerts below). + RootCerts []*x509.Certificate + // List of intermediate certificates trusted by this MSP; + // they are used upon certificate validation as follows: + // validation attempts to build a path from the certificate + // to be validated (which is at one end of the path) and + // one of the certs in the RootCerts field (which is at + // the other end of the path). If the path is longer than + // 2, certificates in the middle are searched within the + // IntermediateCerts pool. + IntermediateCerts []*x509.Certificate + // Identity denoting the administrator of this MSP. + Admins []*x509.Certificate + // Identity revocation list. + RevocationList []*pkix.CertificateList + // OrganizationalUnitIdentifiers holds one or more + // fabric organizational unit identifiers that belong to + // this MSP configuration. + OrganizationalUnitIdentifiers []membership.OUIdentifier + // CryptoConfig contains the configuration parameters + // for the cryptographic algorithms used by this MSP. + CryptoConfig membership.CryptoConfig + // List of TLS root certificates trusted by this MSP. + // They are returned by GetTLSRootCerts. + TLSRootCerts []*x509.Certificate + // List of TLS intermediate certificates trusted by this MSP; + // They are returned by GetTLSIntermediateCerts. + TLSIntermediateCerts []*x509.Certificate + // Contains the configuration to distinguish clients + // from peers from orderers based on the OUs. + NodeOUs membership.NodeOUs +} + +// YEAR is a time duration for a standard 365 day year. +const YEAR = 365 * 24 * time.Hour + +// OrganizationMSP encapsulates the configuration functions used to modify an organization MSP. +type OrganizationMSP struct { + configGroup *cb.ConfigGroup +} + +// Configuration returns the MSP value for a organization in the updated config. +func (m *OrganizationMSP) Configuration() (MSP, error) { + return getMSPConfig(m.configGroup) +} + +// AddAdminCert adds an administator identity to the organization MSP. +func (m *OrganizationMSP) AddAdminCert(cert *x509.Certificate) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + for _, c := range msp.Admins { + if c.Equal(cert) { + return nil + } + } + + msp.Admins = append(msp.Admins, cert) + + return msp.setConfig(m.configGroup) +} + +// RemoveAdminCert removes an administator identity from the organization MSP. +func (m *OrganizationMSP) RemoveAdminCert(cert *x509.Certificate) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + certs := msp.Admins[:] + for i, c := range msp.Admins { + if c.Equal(cert) { + certs = append(certs[:i], certs[i+1:]...) + break + } + } + + msp.Admins = certs + + return msp.setConfig(m.configGroup) +} + +// AddRootCert adds a root certificate trusted by the organization MSP. +func (m *OrganizationMSP) AddRootCert(cert *x509.Certificate) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + for _, c := range msp.RootCerts { + if c.Equal(cert) { + return nil + } + } + + msp.RootCerts = append(msp.RootCerts, cert) + + err = msp.validateCACerts() + if err != nil { + return err + } + + return msp.setConfig(m.configGroup) +} + +// RemoveRootCert removes a trusted root certificate from the organization MSP. +func (m *OrganizationMSP) RemoveRootCert(cert *x509.Certificate) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + certs := msp.RootCerts[:] + for i, c := range msp.RootCerts { + if c.Equal(cert) { + certs = append(certs[:i], certs[i+1:]...) + break + } + } + + msp.RootCerts = certs + + err = msp.validateCACerts() + if err != nil { + return err + } + + return msp.setConfig(m.configGroup) +} + +// AddIntermediateCert adds an intermediate certificate trusted by the organization MSP. +func (m *OrganizationMSP) AddIntermediateCert(cert *x509.Certificate) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + for _, c := range msp.IntermediateCerts { + if c.Equal(cert) { + return nil + } + } + + msp.IntermediateCerts = append(msp.IntermediateCerts, cert) + + err = msp.validateCACerts() + if err != nil { + return err + } + + return msp.setConfig(m.configGroup) +} + +// RemoveIntermediateCert removes a trusted intermediate certificate from the organization MSP. +func (m *OrganizationMSP) RemoveIntermediateCert(cert *x509.Certificate) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + certs := msp.IntermediateCerts[:] + for i, c := range msp.IntermediateCerts { + if c.Equal(cert) { + certs = append(certs[:i], certs[i+1:]...) + break + } + } + + msp.IntermediateCerts = certs + + err = msp.validateCACerts() + if err != nil { + return err + } + + return msp.setConfig(m.configGroup) +} + +// AddOUIdentifier adds a custom organizational unit identifier to the organization MSP. +func (m *OrganizationMSP) AddOUIdentifier(ou membership.OUIdentifier) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + for _, o := range msp.OrganizationalUnitIdentifiers { + if reflect.DeepEqual(o, ou) { + return nil + } + } + + msp.OrganizationalUnitIdentifiers = append(msp.OrganizationalUnitIdentifiers, ou) + + return msp.setConfig(m.configGroup) +} + +// RemoveOUIdentifier removes an existing organizational unit identifier from the organization MSP. +func (m *OrganizationMSP) RemoveOUIdentifier(ou membership.OUIdentifier) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + ous := msp.OrganizationalUnitIdentifiers[:] + for i, o := range msp.OrganizationalUnitIdentifiers { + if reflect.DeepEqual(o, ou) { + ous = append(ous[:i], ous[i+1:]...) + break + } + } + + msp.OrganizationalUnitIdentifiers = ous + + return msp.setConfig(m.configGroup) +} + +// SetCryptoConfig sets the configuration for the cryptographic algorithms for the organization MSP. +func (m *OrganizationMSP) SetCryptoConfig(cryptoConfig membership.CryptoConfig) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + msp.CryptoConfig = cryptoConfig + + return msp.setConfig(m.configGroup) +} + +// AddTLSRootCert adds a TLS root certificate trusted by the organization MSP. +func (m *OrganizationMSP) AddTLSRootCert(cert *x509.Certificate) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + for _, c := range msp.TLSRootCerts { + if c.Equal(cert) { + return nil + } + } + + msp.TLSRootCerts = append(msp.TLSRootCerts, cert) + + err = msp.validateCACerts() + if err != nil { + return err + } + + return msp.setConfig(m.configGroup) +} + +// RemoveTLSRootCert removes a trusted TLS root certificate from the organization MSP. +func (m *OrganizationMSP) RemoveTLSRootCert(cert *x509.Certificate) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + certs := msp.TLSRootCerts[:] + for i, c := range msp.TLSRootCerts { + if c.Equal(cert) { + certs = append(certs[:i], certs[i+1:]...) + break + } + } + + msp.TLSRootCerts = certs + + err = msp.validateCACerts() + if err != nil { + return err + } + + return msp.setConfig(m.configGroup) +} + +// AddTLSIntermediateCert adds a TLS intermediate cert trusted by the organization MSP. +func (m *OrganizationMSP) AddTLSIntermediateCert(cert *x509.Certificate) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + for _, c := range msp.TLSIntermediateCerts { + if c.Equal(cert) { + return nil + } + } + + msp.TLSIntermediateCerts = append(msp.TLSIntermediateCerts, cert) + + err = msp.validateCACerts() + if err != nil { + return err + } + + return msp.setConfig(m.configGroup) +} + +// RemoveTLSIntermediateCert removes a trusted TLS intermediate cert from the organization MSP. +func (m *OrganizationMSP) RemoveTLSIntermediateCert(cert *x509.Certificate) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + certs := msp.TLSIntermediateCerts[:] + for i, c := range msp.TLSIntermediateCerts { + if c.Equal(cert) { + certs = append(certs[:i], certs[i+1:]...) + break + } + } + + msp.TLSIntermediateCerts = certs + + err = msp.validateCACerts() + if err != nil { + return err + } + + return msp.setConfig(m.configGroup) +} + +// SetClientOUIdentifier sets the NodeOUs client ou identifier for the organization MSP. +func (m *OrganizationMSP) SetClientOUIdentifier(clientOU membership.OUIdentifier) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + msp.NodeOUs.ClientOUIdentifier = clientOU + + return msp.setConfig(m.configGroup) +} + +// SetPeerOUIdentifier sets the NodeOUs peer ou identifier for the organization MSP. +func (m *OrganizationMSP) SetPeerOUIdentifier(peerOU membership.OUIdentifier) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + msp.NodeOUs.PeerOUIdentifier = peerOU + + return msp.setConfig(m.configGroup) +} + +// SetAdminOUIdentifier sets the NodeOUs admin ou identifier for the organization MSP. +func (m *OrganizationMSP) SetAdminOUIdentifier(adminOU membership.OUIdentifier) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + msp.NodeOUs.AdminOUIdentifier = adminOU + + return msp.setConfig(m.configGroup) +} + +// SetOrdererOUIdentifier sets the NodeOUs orderer ou identifier for the organization MSP. +func (m *OrganizationMSP) SetOrdererOUIdentifier(ordererOU membership.OUIdentifier) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + msp.NodeOUs.OrdererOUIdentifier = ordererOU + + return msp.setConfig(m.configGroup) +} + +// SetEnableNodeOUs sets the NodeOUs recognition, if NodeOUs recognition is enabled then an msp identity +// that does not contain exactly one of the fabric Node OU Identifiers will be considered invalid. +func (m *OrganizationMSP) SetEnableNodeOUs(isEnabled bool) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + msp.NodeOUs.Enable = isEnabled + + return msp.setConfig(m.configGroup) +} + +// AddCRL adds a CRL to the identity revocation list for the organization MSP. +func (m *OrganizationMSP) AddCRL(crl *pkix.CertificateList) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + msp.RevocationList = append(msp.RevocationList, crl) + + return msp.setConfig(m.configGroup) +} + +// AddCRLFromSigningIdentity creates a CRL from the provided signing identity and associated certs and then adds the CRL to +// the identity revocation list for the organization MSP. +func (m *OrganizationMSP) AddCRLFromSigningIdentity(signingIdentity *SigningIdentity, certs ...*x509.Certificate) error { + msp, err := getMSPConfig(m.configGroup) + if err != nil { + return err + } + + crl, err := msp.CreateMSPCRL(signingIdentity, certs...) + if err != nil { + return err + } + msp.RevocationList = append(msp.RevocationList, crl) + + return msp.setConfig(m.configGroup) +} + +// CreateMSPCRL creates a CRL that revokes the provided certificates +// for the specified organization's msp signed by the provided SigningIdentity. +func (m *MSP) CreateMSPCRL(signingIdentity *SigningIdentity, certs ...*x509.Certificate) (*pkix.CertificateList, error) { + return m.newMSPCRL(signingIdentity, certs...) +} + +// newMSPCRL creates a CRL that revokes the provided certificates for the specified org +// signed by the provided SigningIdentity. If any of the provided certs were +// not signed by any of the root/intermediate CA cets in the MSP configuration, +// it will return an error. +func (m *MSP) newMSPCRL(signingIdentity *SigningIdentity, certs ...*x509.Certificate) (*pkix.CertificateList, error) { + if err := m.validateCertificates(signingIdentity.Certificate, certs...); err != nil { + return nil, err + } + + revokeTime := time.Now().UTC() + + revokedCertificates := make([]pkix.RevokedCertificate, len(certs)) + for i, cert := range certs { + revokedCertificates[i] = pkix.RevokedCertificate{ + SerialNumber: cert.SerialNumber, + RevocationTime: revokeTime, + } + } + + crlBytes, err := signingIdentity.Certificate.CreateCRL(rand.Reader, signingIdentity.PrivateKey, revokedCertificates, revokeTime, revokeTime.Add(YEAR)) + if err != nil { + return nil, err + } + + crl, err := x509.ParseCRL(crlBytes) + if err != nil { + return nil, err + } + + return crl, nil +} + +// validateCertificates first validates that the signing certificate is either +// a root or intermediate CA certificate for the specified application org. It +// then validates that the certificates to add to the CRL were signed by that +// signing certificate. +func (m *MSP) validateCertificates(signingCert *x509.Certificate, certs ...*x509.Certificate) error { + err := m.isCACert(signingCert) + if err != nil { + return err + } + for _, cert := range certs { + if err := cert.CheckSignatureFrom(signingCert); err != nil { + return fmt.Errorf("certificate not issued by this MSP. serial number: %d", cert.SerialNumber) + } + } + + return nil +} + +func (m *MSP) isCACert(signingCert *x509.Certificate) error { + for _, rootCert := range m.RootCerts { + if signingCert.Equal(rootCert) { + return nil + } + } + + for _, intermediateCert := range m.IntermediateCerts { + if signingCert.Equal(intermediateCert) { + return nil + } + } + return fmt.Errorf("signing cert is not a root/intermediate cert for this MSP: %s", m.Name) +} + +func (m *MSP) setConfig(configGroup *cb.ConfigGroup) error { + mspConfig, err := newMSPConfig(*m) + if err != nil { + return fmt.Errorf("new msp config: %v", err) + } + + err = setValue(configGroup, mspValue(mspConfig), AdminsPolicyKey) + if err != nil { + return err + } + + return nil +} + +// getMSPConfig parses the MSP value in a config group returns +// the configuration as an MSP type. +func getMSPConfig(configGroup *cb.ConfigGroup) (MSP, error) { + mspValueProto := &mb.MSPConfig{} + + err := unmarshalConfigValueAtKey(configGroup, MSPKey, mspValueProto) + if err != nil { + return MSP{}, err + } + + fabricMSPConfig := &mb.FabricMSPConfig{} + + err = proto.Unmarshal(mspValueProto.Config, fabricMSPConfig) + if err != nil { + return MSP{}, fmt.Errorf("unmarshaling fabric msp config: %v", err) + } + + // ROOT CERTS + rootCerts, err := parseCertificateListFromBytes(fabricMSPConfig.RootCerts) + if err != nil { + return MSP{}, fmt.Errorf("parsing root certs: %v", err) + } + + // INTERMEDIATE CERTS + intermediateCerts, err := parseCertificateListFromBytes(fabricMSPConfig.IntermediateCerts) + if err != nil { + return MSP{}, fmt.Errorf("parsing intermediate certs: %v", err) + } + + // ADMIN CERTS + adminCerts, err := parseCertificateListFromBytes(fabricMSPConfig.Admins) + if err != nil { + return MSP{}, fmt.Errorf("parsing admin certs: %v", err) + } + + // REVOCATION LIST + revocationList, err := parseCRL(fabricMSPConfig.RevocationList) + if err != nil { + return MSP{}, err + } + + // OU IDENTIFIERS + ouIdentifiers, err := parseOUIdentifiers(fabricMSPConfig.OrganizationalUnitIdentifiers) + if err != nil { + return MSP{}, fmt.Errorf("parsing ou identifiers: %v", err) + } + + // TLS ROOT CERTS + tlsRootCerts, err := parseCertificateListFromBytes(fabricMSPConfig.TlsRootCerts) + if err != nil { + return MSP{}, fmt.Errorf("parsing tls root certs: %v", err) + } + + // TLS INTERMEDIATE CERTS + tlsIntermediateCerts, err := parseCertificateListFromBytes(fabricMSPConfig.TlsIntermediateCerts) + if err != nil { + return MSP{}, fmt.Errorf("parsing tls intermediate certs: %v", err) + } + + // NODE OUS + nodeOUs := membership.NodeOUs{} + if fabricMSPConfig.FabricNodeOus != nil { + clientOUIdentifierCert, err := parseCertificateFromBytes(fabricMSPConfig.FabricNodeOus.ClientOuIdentifier.Certificate) + if err != nil { + return MSP{}, fmt.Errorf("parsing client ou identifier cert: %v", err) + } + + peerOUIdentifierCert, err := parseCertificateFromBytes(fabricMSPConfig.FabricNodeOus.PeerOuIdentifier.Certificate) + if err != nil { + return MSP{}, fmt.Errorf("parsing peer ou identifier cert: %v", err) + } + + adminOUIdentifierCert, err := parseCertificateFromBytes(fabricMSPConfig.FabricNodeOus.AdminOuIdentifier.Certificate) + if err != nil { + return MSP{}, fmt.Errorf("parsing admin ou identifier cert: %v", err) + } + + ordererOUIdentifierCert, err := parseCertificateFromBytes(fabricMSPConfig.FabricNodeOus.OrdererOuIdentifier.Certificate) + if err != nil { + return MSP{}, fmt.Errorf("parsing orderer ou identifier cert: %v", err) + } + + nodeOUs = membership.NodeOUs{ + Enable: fabricMSPConfig.FabricNodeOus.Enable, + ClientOUIdentifier: membership.OUIdentifier{ + Certificate: clientOUIdentifierCert, + OrganizationalUnitIdentifier: fabricMSPConfig.FabricNodeOus.ClientOuIdentifier.OrganizationalUnitIdentifier, + }, + PeerOUIdentifier: membership.OUIdentifier{ + Certificate: peerOUIdentifierCert, + OrganizationalUnitIdentifier: fabricMSPConfig.FabricNodeOus.PeerOuIdentifier.OrganizationalUnitIdentifier, + }, + AdminOUIdentifier: membership.OUIdentifier{ + Certificate: adminOUIdentifierCert, + OrganizationalUnitIdentifier: fabricMSPConfig.FabricNodeOus.AdminOuIdentifier.OrganizationalUnitIdentifier, + }, + OrdererOUIdentifier: membership.OUIdentifier{ + Certificate: ordererOUIdentifierCert, + OrganizationalUnitIdentifier: fabricMSPConfig.FabricNodeOus.OrdererOuIdentifier.OrganizationalUnitIdentifier, + }, + } + } + + return MSP{ + Name: fabricMSPConfig.Name, + RootCerts: rootCerts, + IntermediateCerts: intermediateCerts, + Admins: adminCerts, + RevocationList: revocationList, + OrganizationalUnitIdentifiers: ouIdentifiers, + CryptoConfig: membership.CryptoConfig{ + SignatureHashFamily: fabricMSPConfig.CryptoConfig.SignatureHashFamily, + IdentityIdentifierHashFunction: fabricMSPConfig.CryptoConfig.IdentityIdentifierHashFunction, + }, + TLSRootCerts: tlsRootCerts, + TLSIntermediateCerts: tlsIntermediateCerts, + NodeOUs: nodeOUs, + }, nil +} + +func parseCertificateListFromBytes(certs [][]byte) ([]*x509.Certificate, error) { + certificateList := []*x509.Certificate{} + + for _, cert := range certs { + certificate, err := parseCertificateFromBytes(cert) + if err != nil { + return certificateList, err + } + + certificateList = append(certificateList, certificate) + } + + return certificateList, nil +} + +func parseCertificateFromBytes(cert []byte) (*x509.Certificate, error) { + pemBlock, _ := pem.Decode(cert) + if pemBlock == nil { + return &x509.Certificate{}, fmt.Errorf("no PEM data found in cert[% x]", cert) + } + + certificate, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return &x509.Certificate{}, err + } + + return certificate, nil +} + +func parseCRL(crls [][]byte) ([]*pkix.CertificateList, error) { + certificateLists := []*pkix.CertificateList{} + + for _, crl := range crls { + pemBlock, _ := pem.Decode(crl) + if pemBlock == nil { + return certificateLists, fmt.Errorf("no PEM data found in CRL[% x]", crl) + } + + certificateList, err := x509.ParseCRL(pemBlock.Bytes) + if err != nil { + return certificateLists, fmt.Errorf("parsing crl: %v", err) + } + + certificateLists = append(certificateLists, certificateList) + } + + return certificateLists, nil +} + +func parsePrivateKeyFromBytes(priv []byte) (crypto.PrivateKey, error) { + if len(priv) == 0 { + return nil, nil + } + + pemBlock, _ := pem.Decode(priv) + if pemBlock == nil { + return nil, fmt.Errorf("no PEM data found in private key[% x]", priv) + } + + privateKey, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("failed parsing PKCS#8 private key: %v", err) + } + + return privateKey, nil +} + +func parseOUIdentifiers(identifiers []*mb.FabricOUIdentifier) ([]membership.OUIdentifier, error) { + fabricIdentifiers := []membership.OUIdentifier{} + + for _, identifier := range identifiers { + cert, err := parseCertificateFromBytes(identifier.Certificate) + if err != nil { + return fabricIdentifiers, err + } + + fabricOUIdentifier := membership.OUIdentifier{ + Certificate: cert, + OrganizationalUnitIdentifier: identifier.OrganizationalUnitIdentifier, + } + + fabricIdentifiers = append(fabricIdentifiers, fabricOUIdentifier) + } + + return fabricIdentifiers, nil +} + +// toProto converts an MSP configuration to an mb.FabricMSPConfig proto. +// It pem encodes x509 certificates and ECDSA private keys to byte slices. +func (m *MSP) toProto() (*mb.FabricMSPConfig, error) { + revocationList, err := buildPemEncodedRevocationList(m.RevocationList) + if err != nil { + return nil, fmt.Errorf("building pem encoded revocation list: %v", err) + } + + ouIdentifiers := buildOUIdentifiers(m.OrganizationalUnitIdentifiers) + + var fabricNodeOUs *mb.FabricNodeOUs + if m.NodeOUs != (membership.NodeOUs{}) { + fabricNodeOUs = &mb.FabricNodeOUs{ + Enable: m.NodeOUs.Enable, + ClientOuIdentifier: &mb.FabricOUIdentifier{ + Certificate: pemEncodeX509Certificate(m.NodeOUs.ClientOUIdentifier.Certificate), + OrganizationalUnitIdentifier: m.NodeOUs.ClientOUIdentifier.OrganizationalUnitIdentifier, + }, + PeerOuIdentifier: &mb.FabricOUIdentifier{ + Certificate: pemEncodeX509Certificate(m.NodeOUs.PeerOUIdentifier.Certificate), + OrganizationalUnitIdentifier: m.NodeOUs.PeerOUIdentifier.OrganizationalUnitIdentifier, + }, + AdminOuIdentifier: &mb.FabricOUIdentifier{ + Certificate: pemEncodeX509Certificate(m.NodeOUs.AdminOUIdentifier.Certificate), + OrganizationalUnitIdentifier: m.NodeOUs.AdminOUIdentifier.OrganizationalUnitIdentifier, + }, + OrdererOuIdentifier: &mb.FabricOUIdentifier{ + Certificate: pemEncodeX509Certificate(m.NodeOUs.OrdererOUIdentifier.Certificate), + OrganizationalUnitIdentifier: m.NodeOUs.OrdererOUIdentifier.OrganizationalUnitIdentifier, + }, + } + } + + return &mb.FabricMSPConfig{ + Name: m.Name, + RootCerts: buildPemEncodedCertListFromX509(m.RootCerts), + IntermediateCerts: buildPemEncodedCertListFromX509(m.IntermediateCerts), + Admins: buildPemEncodedCertListFromX509(m.Admins), + RevocationList: revocationList, + OrganizationalUnitIdentifiers: ouIdentifiers, + CryptoConfig: &mb.FabricCryptoConfig{ + SignatureHashFamily: m.CryptoConfig.SignatureHashFamily, + IdentityIdentifierHashFunction: m.CryptoConfig.IdentityIdentifierHashFunction, + }, + TlsRootCerts: buildPemEncodedCertListFromX509(m.TLSRootCerts), + TlsIntermediateCerts: buildPemEncodedCertListFromX509(m.TLSIntermediateCerts), + FabricNodeOus: fabricNodeOUs, + }, nil +} + +func buildOUIdentifiers(identifiers []membership.OUIdentifier) []*mb.FabricOUIdentifier { + fabricIdentifiers := []*mb.FabricOUIdentifier{} + + for _, identifier := range identifiers { + fabricOUIdentifier := &mb.FabricOUIdentifier{ + Certificate: pemEncodeX509Certificate(identifier.Certificate), + OrganizationalUnitIdentifier: identifier.OrganizationalUnitIdentifier, + } + + fabricIdentifiers = append(fabricIdentifiers, fabricOUIdentifier) + } + + return fabricIdentifiers +} + +// buildPemEncodedRevocationList returns a byte slice of the pem-encoded +// CRLs for a revocation list. +func buildPemEncodedRevocationList(crls []*pkix.CertificateList) ([][]byte, error) { + pemEncodedRevocationList := [][]byte{} + + for _, crl := range crls { + // asn1MarshalledBytes, err := asn1.Marshal(*crl) + pemCRL, err := pemEncodeCRL(crl) + if err != nil { + return nil, err + } + + pemEncodedRevocationList = append(pemEncodedRevocationList, pemCRL) + } + + return pemEncodedRevocationList, nil +} + +func pemEncodeCRL(crl *pkix.CertificateList) ([]byte, error) { + asn1MarshalledBytes, err := asn1.Marshal(*crl) + if err != nil { + return nil, err + } + return pem.EncodeToMemory(&pem.Block{Type: "X509 CRL", Bytes: asn1MarshalledBytes}), nil +} + +func buildPemEncodedCertListFromX509(certList []*x509.Certificate) [][]byte { + certs := [][]byte{} + for _, cert := range certList { + certs = append(certs, pemEncodeX509Certificate(cert)) + } + + return certs +} + +func pemEncodeX509Certificate(cert *x509.Certificate) []byte { + return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) +} + +func pemEncodePKCS8PrivateKey(priv crypto.PrivateKey) ([]byte, error) { + privBytes, err := x509.MarshalPKCS8PrivateKey(priv) + if err != nil { + return nil, fmt.Errorf("marshaling PKCS#8 private key: %v", err) + } + + return pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}), nil +} + +// newMSPConfig returns an config for a msp. +func newMSPConfig(updatedMSP MSP) (*mb.MSPConfig, error) { + fabricMSPConfig, err := updatedMSP.toProto() + if err != nil { + return nil, err + } + + conf, err := proto.Marshal(fabricMSPConfig) + if err != nil { + return nil, fmt.Errorf("marshaling msp config: %v", err) + } + + mspConfig := &mb.MSPConfig{ + Config: conf, + } + + return mspConfig, nil +} + +func (m *MSP) validateCACerts() error { + err := validateCACerts(m.RootCerts) + if err != nil { + return fmt.Errorf("invalid root cert: %v", err) + } + + err = validateCACerts(m.IntermediateCerts) + if err != nil { + return fmt.Errorf("invalid intermediate cert: %v", err) + } + + // TODO: follow the workaround that msp code use to incorporate cert.Verify() + for _, ic := range m.IntermediateCerts { + validIntermediateCert := false + for _, rc := range m.RootCerts { + err := ic.CheckSignatureFrom(rc) + if err == nil { + validIntermediateCert = true + break + } + } + if !validIntermediateCert { + return fmt.Errorf("intermediate cert not signed by any root certs of this MSP. serial number: %d", ic.SerialNumber) + } + } + + err = validateCACerts(m.TLSRootCerts) + if err != nil { + return fmt.Errorf("invalid tls root cert: %v", err) + } + + err = validateCACerts(m.TLSIntermediateCerts) + if err != nil { + return fmt.Errorf("invalid tls intermediate cert: %v", err) + } + + tlsRootPool := x509.NewCertPool() + for _, rootCert := range m.TLSRootCerts { + tlsRootPool.AddCert(rootCert) + } + + for _, ic := range m.TLSIntermediateCerts { + _, err := ic.Verify(x509.VerifyOptions{ + Roots: tlsRootPool, + }) + if err != nil { + return err + } + } + + return nil +} + +func validateCACerts(caCerts []*x509.Certificate) error { + for _, caCert := range caCerts { + if (caCert.KeyUsage & x509.KeyUsageCertSign) == 0 { + return fmt.Errorf("KeyUsage must be x509.KeyUsageCertSign. serial number: %d", caCert.SerialNumber) + } + + if !caCert.IsCA { + return fmt.Errorf("must be a CA certificate. serial number: %d", caCert.SerialNumber) + } + } + + return nil +} diff --git a/v2/configtx/msp_test.go b/v2/configtx/msp_test.go new file mode 100644 index 0000000..c4d254b --- /dev/null +++ b/v2/configtx/msp_test.go @@ -0,0 +1,1693 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "bytes" + "crypto/ecdsa" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "fmt" + "math/big" + "testing" + "time" + + "github.com/hyperledger/fabric-config/v2/configtx/membership" + "github.com/hyperledger/fabric-config/v2/configtx/orderer" + "github.com/hyperledger/fabric-config/v2/protolator" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func TestMSPConfigurationFailures(t *testing.T) { + t.Parallel() + + badCert := &x509.Certificate{} + + tests := []struct { + name string + orgType string + consortiumName string + orgName string + mspMod func(*MSP) + expectedErr string + }{ + { + name: "Bad root cert", + orgType: OrdererGroupKey, + orgName: "OrdererOrg", + mspMod: func(msp *MSP) { + badCert := &x509.Certificate{} + msp.RootCerts = append(msp.RootCerts, badCert) + }, + expectedErr: "parsing root certs: x509: malformed certificate", + }, + { + name: "Bad intermediate cert", + orgType: OrdererGroupKey, + orgName: "OrdererOrg", + mspMod: func(msp *MSP) { + msp.IntermediateCerts = append(msp.IntermediateCerts, badCert) + }, + expectedErr: "parsing intermediate certs: x509: malformed certificate", + }, + { + name: "Bad admin cert", + orgType: OrdererGroupKey, + orgName: "OrdererOrg", + mspMod: func(msp *MSP) { + msp.Admins = append(msp.Admins, badCert) + }, + expectedErr: "parsing admin certs: x509: malformed certificate", + }, + { + name: "Bad OU Identifier cert", + orgType: OrdererGroupKey, + orgName: "OrdererOrg", + mspMod: func(msp *MSP) { + msp.OrganizationalUnitIdentifiers[0].Certificate = badCert + }, + expectedErr: "parsing ou identifiers: x509: malformed certificate", + }, + { + name: "Bad tls root cert", + orgType: OrdererGroupKey, + orgName: "OrdererOrg", + mspMod: func(msp *MSP) { + msp.TLSRootCerts = append(msp.TLSRootCerts, badCert) + }, + expectedErr: "parsing tls root certs: x509: malformed certificate", + }, + { + name: "Bad tls intermediate cert", + orgType: OrdererGroupKey, + orgName: "OrdererOrg", + mspMod: func(msp *MSP) { + msp.TLSIntermediateCerts = append(msp.TLSIntermediateCerts, badCert) + }, + expectedErr: "parsing tls intermediate certs: x509: malformed certificate", + }, + { + name: "Bad Client OU Identifier cert", + orgType: OrdererGroupKey, + orgName: "OrdererOrg", + mspMod: func(msp *MSP) { + msp.NodeOUs.ClientOUIdentifier.Certificate = badCert + }, + expectedErr: "parsing client ou identifier cert: x509: malformed certificate", + }, + { + name: "Bad Peer OU Identifier cert", + orgType: OrdererGroupKey, + orgName: "OrdererOrg", + mspMod: func(msp *MSP) { + msp.NodeOUs.PeerOUIdentifier.Certificate = badCert + }, + expectedErr: "parsing peer ou identifier cert: x509: malformed certificate", + }, + { + name: "Bad Admin OU Identifier cert", + orgType: OrdererGroupKey, + orgName: "OrdererOrg", + mspMod: func(msp *MSP) { + msp.NodeOUs.AdminOUIdentifier.Certificate = badCert + }, + expectedErr: "parsing admin ou identifier cert: x509: malformed certificate", + }, + { + name: "Bad Orderer OU Identifier cert", + orgType: OrdererGroupKey, + orgName: "OrdererOrg", + mspMod: func(msp *MSP) { + msp.NodeOUs.OrdererOUIdentifier.Certificate = badCert + }, + expectedErr: "parsing orderer ou identifier cert: x509: malformed certificate", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + orderer1, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(orderer1) + if orderer1.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + application, _ := baseApplication(t) + applicationGroup, err := newApplicationGroup(application) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + OrdererGroupKey: ordererGroup, + ApplicationGroupKey: applicationGroup, + }, + }, + } + + c := &ConfigTx{ + original: config, + updated: config, + } + if tt.mspMod != nil && tt.orgType != ConsortiumsGroupKey { + baseMSP, _ := baseMSP(t) + + tt.mspMod(&baseMSP) + + orgGroup := c.updated.ChannelGroup.Groups[tt.orgType].Groups[tt.orgName] + fabricMSPConfig, err := baseMSP.toProto() + gt.Expect(err).NotTo(HaveOccurred()) + + conf, err := proto.Marshal(fabricMSPConfig) + gt.Expect(err).NotTo(HaveOccurred()) + + mspConfig := &mb.MSPConfig{ + Config: conf, + } + + err = setValue(orgGroup, mspValue(mspConfig), AdminsPolicyKey) + gt.Expect(err).NotTo(HaveOccurred()) + } + + switch tt.orgType { + case ApplicationGroupKey: + _, err := c.Application().Organization(tt.orgName).MSP().Configuration() + gt.Expect(err).To(MatchError(tt.expectedErr)) + case OrdererGroupKey: + _, err := c.Orderer().Organization(tt.orgName).MSP().Configuration() + gt.Expect(err).To(MatchError(tt.expectedErr)) + case ConsortiumsGroupKey: + _, err := c.Consortium(tt.consortiumName).Organization(tt.orgName).MSP().Configuration() + gt.Expect(err).To(MatchError(tt.expectedErr)) + default: + t.Fatalf("invalid org type %s", tt.orgType) + } + }) + } +} + +func TestMSPToProto(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + msp, _ := baseMSP(t) + certBase64, crlBase64 := certCRLBase64(t, msp) + + expectedFabricMSPConfigProtoJSON := fmt.Sprintf(` +{ + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] +} +`, certBase64, crlBase64) + expectedFabricMSPConfigProto := &mb.FabricMSPConfig{} + err := protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectedFabricMSPConfigProtoJSON), expectedFabricMSPConfigProto) + gt.Expect(err).NotTo(HaveOccurred()) + + fabricMSPConfigProto, err := msp.toProto() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(proto.Equal(fabricMSPConfigProto, expectedFabricMSPConfigProto)).To(BeTrue()) +} + +func TestMSPToProtoNoNodeOUs(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + msp, _ := baseMSP(t) + msp.NodeOUs = membership.NodeOUs{} + certBase64, crlBase64 := certCRLBase64(t, msp) + + expectedFabricMSPConfigProtoJSON := fmt.Sprintf(` +{ + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": null, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] +} +`, certBase64, crlBase64) + expectedFabricMSPConfigProto := &mb.FabricMSPConfig{} + err := protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectedFabricMSPConfigProtoJSON), expectedFabricMSPConfigProto) + gt.Expect(err).NotTo(HaveOccurred()) + + fabricMSPConfigProto, err := msp.toProto() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(proto.Equal(fabricMSPConfigProto, expectedFabricMSPConfigProto)).To(BeTrue()) +} + +func TestParseCertificateFromBytesFailure(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + errCert := ` +-----END CERTIFICATE----- +` + + _, err := parseCertificateFromBytes([]byte(errCert)) + gt.Expect(err).NotTo(BeNil()) + gt.Expect(err.Error()).To(ContainSubstring("no PEM data found in cert[")) + + _, err = parseCertificateFromBytes(nil) + gt.Expect(err).To(MatchError("no PEM data found in cert[]")) +} + +func TestParseCRLFailure(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + errCRL := ` +-----END X509 CRL----- +` + + _, err := parseCRL([][]byte{[]byte(errCRL)}) + gt.Expect(err).NotTo(BeNil()) + gt.Expect(err.Error()).To(ContainSubstring("no PEM data found in CRL[")) + + _, err = parseCRL([][]byte{nil, []byte(errCRL)}) + gt.Expect(err).To(MatchError("no PEM data found in CRL[]")) +} + +func TestParsePrivateKeyFromBytesFailure(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + errPrivateKey := ` +-----END EC PRIVATE KEY----- +` + + _, err := parsePrivateKeyFromBytes([]byte(errPrivateKey)) + gt.Expect(err).NotTo(BeNil()) + gt.Expect(err.Error()).To(ContainSubstring("no PEM data found in private key[")) +} + +func TestAddAdminCert(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + + err = ordererMSP.AddAdminCert(newCert) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.AddAdminCert(msp.Admins[0]) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.Admins).Should(ContainElement(newCert)) + gt.Expect(msp.Admins).Should(HaveLen(2)) +} + +func TestAddAdminCertFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP.configGroup = &cb.ConfigGroup{} + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.AddAdminCert(msp.Admins[0]) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestRemoveAdminCert(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + msp, err := ordererMSP.Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + existingCert := msp.Admins[0] + + err = ordererMSP.RemoveAdminCert(existingCert) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.Admins).Should(HaveLen(0)) + gt.Expect(msp.Admins).ShouldNot(ContainElement(existingCert)) +} + +func TestRemoveAdminCertFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP.configGroup = &cb.ConfigGroup{} + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.RemoveAdminCert(msp.Admins[0]) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestAddRootCert(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert, _ := generateCACertAndPrivateKey(t, "ca-org1.example.com") + + err = ordererMSP.AddRootCert(newCert) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.AddRootCert(msp.RootCerts[0]) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.RootCerts).Should(ContainElement(newCert)) + gt.Expect(msp.RootCerts).Should(HaveLen(2)) +} + +func TestAddRootCertFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + err = ordererMSP.AddRootCert(&x509.Certificate{}) + gt.Expect(err).To(MatchError("invalid root cert: KeyUsage must be x509.KeyUsageCertSign. serial number: ")) + + ordererMSP.configGroup = &cb.ConfigGroup{} + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.AddRootCert(msp.RootCerts[0]) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestRemoveRootCert(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + msp, err := ordererMSP.Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + + newCert, _ := generateCACertAndPrivateKey(t, "ca-org1.example.com") + + err = ordererMSP.AddRootCert(newCert) + gt.Expect(err).NotTo(HaveOccurred()) + + err = ordererMSP.RemoveRootCert(newCert) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.RootCerts).Should(HaveLen(1)) + gt.Expect(msp.RootCerts).ShouldNot(ContainElement(newCert)) +} + +func TestRemoveRootCertFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP.configGroup = &cb.ConfigGroup{} + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.RemoveRootCert(msp.RootCerts[0]) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestAddIntermediateCert(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, privKeys, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + msp, err := ordererMSP.Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + newIntermediateCert, _ := generateIntermediateCACertAndPrivateKey(t, "ca-org1.example.com", msp.RootCerts[0], privKeys[0]) + + err = ordererMSP.AddIntermediateCert(newIntermediateCert) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.AddIntermediateCert(msp.IntermediateCerts[0]) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.IntermediateCerts).Should(ContainElement(newIntermediateCert)) + gt.Expect(msp.IntermediateCerts).Should(HaveLen(2)) +} + +func TestAddIntermediateCertFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP.configGroup = &cb.ConfigGroup{} + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.AddIntermediateCert(msp.IntermediateCerts[0]) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestRemoveIntermediateCert(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + msp, err := ordererMSP.Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + existingCert := msp.IntermediateCerts[0] + + err = ordererMSP.RemoveIntermediateCert(existingCert) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.IntermediateCerts).Should(HaveLen(0)) + gt.Expect(msp.IntermediateCerts).ShouldNot(ContainElement(existingCert)) +} + +func TestRemoveIntermediateCertFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP.configGroup = &cb.ConfigGroup{} + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.RemoveIntermediateCert(msp.IntermediateCerts[0]) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestAddOUIdentifier(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + newOU := membership.OUIdentifier{ + Certificate: newCert, + } + + err = ordererMSP.AddOUIdentifier(newOU) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.AddOUIdentifier(msp.OrganizationalUnitIdentifiers[0]) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.OrganizationalUnitIdentifiers).Should(ContainElement(newOU)) +} + +func TestAddOUIdentifierFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + newOU := membership.OUIdentifier{ + Certificate: newCert, + } + + ordererMSP.configGroup = &cb.ConfigGroup{} + err = ordererMSP.AddOUIdentifier(newOU) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestRemoveOUIdentifier(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + msp, err := ordererMSP.Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + existingOU := msp.OrganizationalUnitIdentifiers[0] + + err = ordererMSP.RemoveOUIdentifier(existingOU) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.OrganizationalUnitIdentifiers).Should(HaveLen(0)) + gt.Expect(msp.OrganizationalUnitIdentifiers).ShouldNot(ContainElement(existingOU)) +} + +func TestRemoveOUIdentifierFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + newOU := membership.OUIdentifier{ + Certificate: newCert, + } + + ordererMSP.configGroup = &cb.ConfigGroup{} + err = ordererMSP.RemoveOUIdentifier(newOU) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestSetCryptoConfig(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + cryptoConfig := membership.CryptoConfig{} + + err = ordererMSP.SetCryptoConfig(cryptoConfig) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.CryptoConfig).To(Equal(cryptoConfig)) +} + +func TestSetCryptoConfigFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + cryptoConfig := membership.CryptoConfig{} + ordererMSP.configGroup = &cb.ConfigGroup{} + err = ordererMSP.SetCryptoConfig(cryptoConfig) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestAddTLSRootCert(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert, _ := generateCACertAndPrivateKey(t, "ca-org1.example.com") + + err = ordererMSP.AddTLSRootCert(newCert) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.TLSRootCerts).Should(ContainElement(newCert)) +} + +func TestAddTLSRootCertFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP.configGroup = &cb.ConfigGroup{} + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.AddTLSRootCert(msp.TLSRootCerts[0]) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestRemoveTLSRootCert(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + msp, err := ordererMSP.Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + + newCert, _ := generateCACertAndPrivateKey(t, "ca-org1.example.com") + + err = ordererMSP.AddTLSRootCert(newCert) + gt.Expect(err).NotTo(HaveOccurred()) + + err = ordererMSP.RemoveTLSRootCert(newCert) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.TLSRootCerts).Should(HaveLen(1)) + gt.Expect(msp.TLSRootCerts).ShouldNot(ContainElement(newCert)) +} + +func TestRemoveTLSRootCertFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP.configGroup = &cb.ConfigGroup{} + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.RemoveTLSRootCert(msp.TLSRootCerts[0]) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestRemoveTLSRootCertVerifyFailure(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + msp, err := ordererMSP.Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + + newCert, _ := generateCACertAndPrivateKey(t, "org1.example.com") + newCert.SerialNumber = big.NewInt(7) + + msp.TLSIntermediateCerts = append(msp.TLSIntermediateCerts, newCert) + + err = ordererMSP.RemoveTLSRootCert(msp.TLSRootCerts[0]) + gt.Expect(err).To(MatchError("x509: certificate signed by unknown authority")) +} + +func TestAddTLSIntermediateCert(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, privKeys, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + msp, err := ordererMSP.Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + newTLSIntermediateCert, _ := generateIntermediateCACertAndPrivateKey(t, "ca-org1.example.com", msp.RootCerts[0], privKeys[0]) + + err = ordererMSP.AddTLSIntermediateCert(newTLSIntermediateCert) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.AddTLSIntermediateCert(msp.TLSIntermediateCerts[0]) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.TLSIntermediateCerts).Should(ContainElement(newTLSIntermediateCert)) + gt.Expect(msp.TLSIntermediateCerts).Should(HaveLen(2)) +} + +func TestAddTLSIntermediateCertFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP.configGroup = &cb.ConfigGroup{} + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.AddTLSIntermediateCert(msp.TLSIntermediateCerts[0]) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestRemoveTLSIntermediateCert(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + msp, err := ordererMSP.Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + existingCert := msp.TLSIntermediateCerts[0] + + err = ordererMSP.RemoveTLSIntermediateCert(existingCert) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.TLSIntermediateCerts).Should(HaveLen(0)) + gt.Expect(msp.TLSIntermediateCerts).ShouldNot(ContainElement(existingCert)) +} + +func TestRemoveTLSIntermediateCertFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP.configGroup = &cb.ConfigGroup{} + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + err = ordererMSP.RemoveTLSIntermediateCert(msp.TLSIntermediateCerts[0]) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestSetClientOUIdentifier(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + newOU := membership.OUIdentifier{ + Certificate: newCert, + } + + err = ordererMSP.SetClientOUIdentifier(newOU) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.NodeOUs.ClientOUIdentifier).To(Equal(newOU)) +} + +func TestSetClientOUIdentifierFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + newOU := membership.OUIdentifier{ + Certificate: newCert, + } + + ordererMSP.configGroup = &cb.ConfigGroup{} + err = ordererMSP.SetClientOUIdentifier(newOU) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestSetPeerOUIdentifier(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + newOU := membership.OUIdentifier{ + Certificate: newCert, + } + + err = ordererMSP.SetPeerOUIdentifier(newOU) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.NodeOUs.PeerOUIdentifier).To(Equal(newOU)) +} + +func TestSetPeerOUIdentifierFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + newOU := membership.OUIdentifier{ + Certificate: newCert, + } + + ordererMSP.configGroup = &cb.ConfigGroup{} + err = ordererMSP.SetPeerOUIdentifier(newOU) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestSetAdminOUIdentifier(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + newOU := membership.OUIdentifier{ + Certificate: newCert, + } + + err = ordererMSP.SetAdminOUIdentifier(newOU) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.NodeOUs.AdminOUIdentifier).To(Equal(newOU)) +} + +func TestSetAdminOUIdentifierFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + newOU := membership.OUIdentifier{ + Certificate: newCert, + } + + ordererMSP.configGroup = &cb.ConfigGroup{} + err = ordererMSP.SetAdminOUIdentifier(newOU) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestSetOrdererOUIdentifier(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + newOU := membership.OUIdentifier{ + Certificate: newCert, + } + + err = ordererMSP.SetOrdererOUIdentifier(newOU) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.NodeOUs.OrdererOUIdentifier).To(Equal(newOU)) +} + +func TestSetOrdererOUIdentifierFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + newCert := generateCert(t, "anothercert-org1.example.com") + newOU := membership.OUIdentifier{ + Certificate: newCert, + } + + ordererMSP.configGroup = &cb.ConfigGroup{} + err = ordererMSP.SetOrdererOUIdentifier(newOU) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestSetEnableNodeOUs(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + + err = ordererMSP.SetEnableNodeOUs(true) + gt.Expect(err).NotTo(HaveOccurred()) + + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp.NodeOUs.Enable).To(BeTrue()) +} + +func TestSetEnableNodeOUsFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP.configGroup = &cb.ConfigGroup{} + err = ordererMSP.SetEnableNodeOUs(true) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestAddCRL(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, privKeys, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + msp := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP, _ := msp.Configuration() + + cert := ordererMSP.RootCerts[0] + certToRevoke, _ := generateCertAndPrivateKeyFromCACert(t, "org1.example.com", cert, privKeys[0]) + signingIdentity := &SigningIdentity{ + Certificate: cert, + PrivateKey: privKeys[0], + MSPID: "MSPID", + } + newCRL, err := ordererMSP.CreateMSPCRL(signingIdentity, certToRevoke) + + err = msp.AddCRL(newCRL) + gt.Expect(err).NotTo(HaveOccurred()) + + ordererMSP, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(ordererMSP.RevocationList).Should(ContainElement(newCRL)) +} + +func TestAddCRLFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + msp := c.Orderer().Organization("OrdererOrg").MSP() + + msp.configGroup = &cb.ConfigGroup{} + err = msp.AddCRL(&pkix.CertificateList{}) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestAddCRLFromSigningIdentityFailures(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + msp := c.Orderer().Organization("OrdererOrg").MSP() + + msp.configGroup = &cb.ConfigGroup{} + err = msp.AddCRLFromSigningIdentity(nil, nil) + gt.Expect(err).To(MatchError("config does not contain value for MSP")) +} + +func TestAddCRLFromSigningIdentity(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, privKeys, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + msp := c.Orderer().Organization("OrdererOrg").MSP() + ordererMSP, _ := msp.Configuration() + + cert := ordererMSP.RootCerts[0] + certToRevoke, _ := generateCertAndPrivateKeyFromCACert(t, "org1.example.com", cert, privKeys[0]) + signingIdentity := &SigningIdentity{ + Certificate: cert, + PrivateKey: privKeys[0], + MSPID: "MSPID", + } + + // newCRL, err := ordererMSP.CreateMSPCRL(signingIdentity, certToRevoke) + + err = msp.AddCRLFromSigningIdentity(signingIdentity, certToRevoke) + gt.Expect(err).NotTo(HaveOccurred()) + + ordererMSP, err = c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + // gt.Expect(ordererMSP.RevocationList).Should(ContainElement(newCRL)) +} + +func baseMSP(t *testing.T) (MSP, *ecdsa.PrivateKey) { + gt := NewGomegaWithT(t) + + cert, privKey := generateCACertAndPrivateKey(t, "org1.example.com") + crlBytes, err := cert.CreateCRL(rand.Reader, privKey, nil, time.Now(), time.Now().Add(YEAR)) + gt.Expect(err).NotTo(HaveOccurred()) + + crl, err := x509.ParseCRL(crlBytes) + gt.Expect(err).NotTo(HaveOccurred()) + + return MSP{ + Name: "MSPID", + RootCerts: []*x509.Certificate{cert}, + IntermediateCerts: []*x509.Certificate{cert}, + Admins: []*x509.Certificate{cert}, + RevocationList: []*pkix.CertificateList{crl}, + OrganizationalUnitIdentifiers: []membership.OUIdentifier{ + { + Certificate: cert, + OrganizationalUnitIdentifier: "OUID", + }, + }, + CryptoConfig: membership.CryptoConfig{ + SignatureHashFamily: "SHA3", + IdentityIdentifierHashFunction: "SHA256", + }, + TLSRootCerts: []*x509.Certificate{cert}, + TLSIntermediateCerts: []*x509.Certificate{cert}, + NodeOUs: membership.NodeOUs{ + ClientOUIdentifier: membership.OUIdentifier{ + Certificate: cert, + OrganizationalUnitIdentifier: "OUID", + }, + PeerOUIdentifier: membership.OUIdentifier{ + Certificate: cert, + OrganizationalUnitIdentifier: "OUID", + }, + AdminOUIdentifier: membership.OUIdentifier{ + Certificate: cert, + OrganizationalUnitIdentifier: "OUID", + }, + OrdererOUIdentifier: membership.OUIdentifier{ + Certificate: cert, + OrganizationalUnitIdentifier: "OUID", + }, + }, + }, privKey +} + +// certCRLBase64 returns a base64 encoded representation of +// the first root certificate, the private key, and the first revocation list +// for the specified MSP. These are intended for use when formatting the +// expected config in JSON format. +func certCRLBase64(t *testing.T, msp MSP) (string, string) { + gt := NewGomegaWithT(t) + + cert := msp.RootCerts[0] + crl := msp.RevocationList[0] + + certBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(cert)) + pemCRLBytes, err := buildPemEncodedRevocationList([]*pkix.CertificateList{crl}) + gt.Expect(err).NotTo(HaveOccurred()) + crlBase64 := base64.StdEncoding.EncodeToString(pemCRLBytes[0]) + + return certBase64, crlBase64 +} diff --git a/v2/configtx/orderer.go b/v2/configtx/orderer.go new file mode 100644 index 0000000..3fc415e --- /dev/null +++ b/v2/configtx/orderer.go @@ -0,0 +1,1063 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "math" + "reflect" + "time" + + "github.com/hyperledger/fabric-config/v2/configtx/orderer" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + ob "github.com/hyperledger/fabric-protos-go-apiv2/orderer" + eb "github.com/hyperledger/fabric-protos-go-apiv2/orderer/etcdraft" + "google.golang.org/protobuf/proto" +) + +const ( + defaultHashingAlgorithm = "SHA256" + defaultBlockDataHashingStructureWidth = math.MaxUint32 +) + +// Orderer configures the ordering service behavior for a channel. +type Orderer struct { + // OrdererType is the type of orderer + // Options: `ConsensusTypeSolo`, `ConsensusTypeKafka` or `ConsensusTypeEtcdRaft` + OrdererType string + // BatchTimeout is the wait time between transactions. + BatchTimeout time.Duration + BatchSize orderer.BatchSize + Kafka orderer.Kafka + EtcdRaft orderer.EtcdRaft + Organizations []Organization + // MaxChannels is the maximum count of channels an orderer supports. + MaxChannels uint64 + // Capabilities is a map of the capabilities the orderer supports. + Capabilities []string + Policies map[string]Policy + // Options: `ConsensusStateNormal` and `ConsensusStateMaintenance` + State orderer.ConsensusState + ModPolicy string +} + +// OrdererGroup encapsulates the parts of the config that control +// the orderering service behavior. +type OrdererGroup struct { + channelGroup *cb.ConfigGroup + ordererGroup *cb.ConfigGroup +} + +// OrdererOrg encapsulates the parts of the config that control +// an orderer organization's configuration. +type OrdererOrg struct { + orgGroup *cb.ConfigGroup + name string +} + +// MSP returns an OrganizationMSP object that can be used to configure the organization's MSP. +func (o *OrdererOrg) MSP() *OrganizationMSP { + return &OrganizationMSP{ + configGroup: o.orgGroup, + } +} + +// EtcdRaftOptionsValue encapsulates the configuration functions used to modify an etcdraft configuration's options. +type EtcdRaftOptionsValue struct { + value *cb.ConfigValue +} + +// BatchSizeValue encapsulates the configuration functions used to modify an orderer configuration's batch size values. +type BatchSizeValue struct { + value *cb.ConfigValue +} + +// Orderer returns the orderer group from the updated config. +func (c *ConfigTx) Orderer() *OrdererGroup { + channelGroup := c.updated.ChannelGroup + ordererGroup := channelGroup.Groups[OrdererGroupKey] + return &OrdererGroup{channelGroup: channelGroup, ordererGroup: ordererGroup} +} + +// Organization returns the orderer org from the updated config. +func (o *OrdererGroup) Organization(name string) *OrdererOrg { + orgGroup, ok := o.ordererGroup.Groups[name] + if !ok { + return nil + } + return &OrdererOrg{name: name, orgGroup: orgGroup} +} + +// Configuration returns the existing orderer configuration values from the updated +// config in a config transaction as an Orderer type. This can be used to retrieve +// existing values for the orderer prior to updating the orderer configuration. +func (o *OrdererGroup) Configuration() (Orderer, error) { + // CONSENSUS TYPE, STATE, AND METADATA + var etcdRaft orderer.EtcdRaft + kafkaBrokers := orderer.Kafka{} + + consensusTypeProto := &ob.ConsensusType{} + err := unmarshalConfigValueAtKey(o.ordererGroup, orderer.ConsensusTypeKey, consensusTypeProto) + if err != nil { + return Orderer{}, errors.New("cannot determine consensus type of orderer") + } + + ordererType := consensusTypeProto.Type + state := orderer.ConsensusState(ob.ConsensusType_State_name[int32(consensusTypeProto.State)]) + + switch consensusTypeProto.Type { + case orderer.ConsensusTypeSolo: + case orderer.ConsensusTypeKafka: + kafkaBrokersValue, ok := o.ordererGroup.Values[orderer.KafkaBrokersKey] + if !ok { + return Orderer{}, errors.New("unable to find kafka brokers for kafka orderer") + } + + kafkaBrokersProto := &ob.KafkaBrokers{} + err := proto.Unmarshal(kafkaBrokersValue.Value, kafkaBrokersProto) + if err != nil { + return Orderer{}, fmt.Errorf("unmarshaling kafka brokers: %v", err) + } + + kafkaBrokers.Brokers = kafkaBrokersProto.Brokers + case orderer.ConsensusTypeEtcdRaft: + etcdRaft, err = unmarshalEtcdRaftMetadata(consensusTypeProto.Metadata) + if err != nil { + return Orderer{}, fmt.Errorf("unmarshaling etcd raft metadata: %v", err) + } + default: + return Orderer{}, fmt.Errorf("config contains unknown consensus type '%s'", consensusTypeProto.Type) + } + + // BATCHSIZE AND TIMEOUT + batchSize := &ob.BatchSize{} + err = unmarshalConfigValueAtKey(o.ordererGroup, orderer.BatchSizeKey, batchSize) + if err != nil { + return Orderer{}, err + } + + batchTimeoutProto := &ob.BatchTimeout{} + err = unmarshalConfigValueAtKey(o.ordererGroup, orderer.BatchTimeoutKey, batchTimeoutProto) + if err != nil { + return Orderer{}, err + } + + batchTimeout, err := time.ParseDuration(batchTimeoutProto.Timeout) + if err != nil { + return Orderer{}, fmt.Errorf("batch timeout configuration '%s' is not a duration string", batchTimeoutProto.Timeout) + } + + // ORDERER ORGS + var ordererOrgs []Organization + for orgName := range o.ordererGroup.Groups { + orgConfig, err := o.Organization(orgName).Configuration() + if err != nil { + return Orderer{}, fmt.Errorf("retrieving orderer org %s: %v", orgName, err) + } + + ordererOrgs = append(ordererOrgs, orgConfig) + } + + // MAX CHANNELS + channelRestrictions := &ob.ChannelRestrictions{} + err = unmarshalConfigValueAtKey(o.ordererGroup, orderer.ChannelRestrictionsKey, channelRestrictions) + if err != nil { + return Orderer{}, err + } + + // CAPABILITIES + capabilities, err := getCapabilities(o.ordererGroup) + if err != nil { + return Orderer{}, fmt.Errorf("retrieving orderer capabilities: %v", err) + } + + // POLICIES + policies, err := o.Policies() + if err != nil { + return Orderer{}, fmt.Errorf("retrieving orderer policies: %v", err) + } + + return Orderer{ + OrdererType: ordererType, + BatchTimeout: batchTimeout, + BatchSize: orderer.BatchSize{ + MaxMessageCount: batchSize.MaxMessageCount, + AbsoluteMaxBytes: batchSize.AbsoluteMaxBytes, + PreferredMaxBytes: batchSize.PreferredMaxBytes, + }, + Kafka: kafkaBrokers, + EtcdRaft: etcdRaft, + Organizations: ordererOrgs, + MaxChannels: channelRestrictions.MaxCount, + Capabilities: capabilities, + Policies: policies, + State: state, + ModPolicy: o.ordererGroup.GetModPolicy(), + }, nil +} + +// BatchSize returns a BatchSizeValue that can be used to configure an orderer configuration's batch size parameters. +func (o *OrdererGroup) BatchSize() *BatchSizeValue { + return &BatchSizeValue{ + value: o.ordererGroup.Values[orderer.BatchSizeKey], + } +} + +// SetMaxMessageCount sets an orderer configuration's batch size max message count. +func (b *BatchSizeValue) SetMaxMessageCount(maxMessageCount uint32) error { + batchSize := &ob.BatchSize{} + err := proto.Unmarshal(b.value.Value, batchSize) + if err != nil { + return err + } + + batchSize.MaxMessageCount = maxMessageCount + b.value.Value, err = proto.Marshal(batchSize) + + return err +} + +// SetAbsoluteMaxBytes sets an orderer configuration's batch size max block size. +func (b *BatchSizeValue) SetAbsoluteMaxBytes(maxBytes uint32) error { + batchSize := &ob.BatchSize{} + err := proto.Unmarshal(b.value.Value, batchSize) + if err != nil { + return err + } + + batchSize.AbsoluteMaxBytes = maxBytes + b.value.Value, err = proto.Marshal(batchSize) + + return err +} + +// SetPreferredMaxBytes sets an orderer configuration's batch size preferred size of blocks. +func (b *BatchSizeValue) SetPreferredMaxBytes(maxBytes uint32) error { + batchSize := &ob.BatchSize{} + err := proto.Unmarshal(b.value.Value, batchSize) + if err != nil { + return err + } + + batchSize.PreferredMaxBytes = maxBytes + b.value.Value, err = proto.Marshal(batchSize) + + return err +} + +// SetBatchTimeout sets the wait time between transactions. +func (o *OrdererGroup) SetBatchTimeout(timeout time.Duration) error { + return setValue(o.ordererGroup, batchTimeoutValue(timeout.String()), AdminsPolicyKey) +} + +// SetMaxChannels sets the maximum count of channels an orderer supports. +func (o *OrdererGroup) SetMaxChannels(max int) error { + return setValue(o.ordererGroup, channelRestrictionsValue(uint64(max)), AdminsPolicyKey) +} + +// SetEtcdRaftConsensusType sets the orderer consensus type to etcdraft, sets etcdraft metadata, and consensus state. +func (o *OrdererGroup) SetEtcdRaftConsensusType(consensusMetadata orderer.EtcdRaft, consensusState orderer.ConsensusState) error { + consensusMetadataBytes, err := marshalEtcdRaftMetadata(consensusMetadata) + if err != nil { + return fmt.Errorf("marshaling etcdraft metadata: %v", err) + } + + return setValue(o.ordererGroup, consensusTypeValue(orderer.ConsensusTypeEtcdRaft, consensusMetadataBytes, ob.ConsensusType_State_value[string(consensusState)]), AdminsPolicyKey) +} + +// SetConsensusState sets the consensus state. +func (o *OrdererGroup) SetConsensusState(consensusState orderer.ConsensusState) error { + consensusTypeProto := &ob.ConsensusType{} + err := unmarshalConfigValueAtKey(o.ordererGroup, orderer.ConsensusTypeKey, consensusTypeProto) + if err != nil { + return err + } + + return setValue(o.ordererGroup, consensusTypeValue(consensusTypeProto.Type, consensusTypeProto.Metadata, ob.ConsensusType_State_value[string(consensusState)]), AdminsPolicyKey) +} + +// EtcdRaftOptions returns an EtcdRaftOptionsValue that can be used to configure an etcdraft configuration's options. +func (o *OrdererGroup) EtcdRaftOptions() *EtcdRaftOptionsValue { + return &EtcdRaftOptionsValue{ + value: o.ordererGroup.Values[orderer.ConsensusTypeKey], + } +} + +func (e *EtcdRaftOptionsValue) etcdRaftConfig(consensusTypeProto *ob.ConsensusType) (orderer.EtcdRaft, error) { + err := proto.Unmarshal(e.value.Value, consensusTypeProto) + if err != nil { + return orderer.EtcdRaft{}, err + } + + return unmarshalEtcdRaftMetadata(consensusTypeProto.Metadata) +} + +func (e *EtcdRaftOptionsValue) setEtcdRaftConfig(consensusTypeProto *ob.ConsensusType, etcdRaft orderer.EtcdRaft) error { + consensusMetadata, err := marshalEtcdRaftMetadata(etcdRaft) + if err != nil { + return fmt.Errorf("marshaling etcdraft metadata: %v", err) + } + + consensusTypeProto.Metadata = consensusMetadata + + e.value.Value, err = proto.Marshal(consensusTypeProto) + return err +} + +// SetTickInterval sets the Etcdraft's tick interval. +func (e *EtcdRaftOptionsValue) SetTickInterval(interval string) error { + consensusTypeProto := &ob.ConsensusType{} + etcdRaft, err := e.etcdRaftConfig(consensusTypeProto) + if err != nil { + return nil + } + + etcdRaft.Options.TickInterval = interval + return e.setEtcdRaftConfig(consensusTypeProto, etcdRaft) +} + +// SetElectionInterval sets the Etcdraft's election interval. +func (e *EtcdRaftOptionsValue) SetElectionInterval(interval uint32) error { + consensusTypeProto := &ob.ConsensusType{} + etcdRaft, err := e.etcdRaftConfig(consensusTypeProto) + if err != nil { + return nil + } + + etcdRaft.Options.ElectionTick = interval + return e.setEtcdRaftConfig(consensusTypeProto, etcdRaft) +} + +// SetHeartbeatTick sets the Etcdraft's heartbeat tick interval. +func (e *EtcdRaftOptionsValue) SetHeartbeatTick(tick uint32) error { + consensusTypeProto := &ob.ConsensusType{} + etcdRaft, err := e.etcdRaftConfig(consensusTypeProto) + if err != nil { + return nil + } + + etcdRaft.Options.HeartbeatTick = tick + return e.setEtcdRaftConfig(consensusTypeProto, etcdRaft) +} + +// SetMaxInflightBlocks sets the Etcdraft's max inflight blocks. +func (e *EtcdRaftOptionsValue) SetMaxInflightBlocks(maxBlks uint32) error { + consensusTypeProto := &ob.ConsensusType{} + etcdRaft, err := e.etcdRaftConfig(consensusTypeProto) + if err != nil { + return nil + } + + etcdRaft.Options.MaxInflightBlocks = maxBlks + return e.setEtcdRaftConfig(consensusTypeProto, etcdRaft) +} + +// SetSnapshotIntervalSize sets the Etcdraft's snapshot interval size. +func (e *EtcdRaftOptionsValue) SetSnapshotIntervalSize(intervalSize uint32) error { + consensusTypeProto := &ob.ConsensusType{} + etcdRaft, err := e.etcdRaftConfig(consensusTypeProto) + if err != nil { + return nil + } + + etcdRaft.Options.SnapshotIntervalSize = intervalSize + return e.setEtcdRaftConfig(consensusTypeProto, etcdRaft) +} + +// Configuration retrieves an existing org's configuration from an +// orderer organization config group in the updated config. +func (o *OrdererOrg) Configuration() (Organization, error) { + org, err := getOrganization(o.orgGroup, o.name) + if err != nil { + return Organization{}, err + } + + // OrdererEndpoints are optional when retrieving from an existing config + org.OrdererEndpoints = nil + _, ok := o.orgGroup.Values[EndpointsKey] + if ok { + endpointsProtos := &cb.OrdererAddresses{} + err = unmarshalConfigValueAtKey(o.orgGroup, EndpointsKey, endpointsProtos) + if err != nil { + return Organization{}, err + } + ordererEndpoints := make([]string, len(endpointsProtos.Addresses)) + for i, address := range endpointsProtos.Addresses { + ordererEndpoints[i] = address + } + org.OrdererEndpoints = ordererEndpoints + } + + // Remove AnchorPeers which are application org specific. + org.AnchorPeers = nil + + return org, nil +} + +// SetOrganization sets the organization config group for the given orderer +// org key in an existing Orderer configuration's Groups map. +// If the orderer org already exists in the current configuration, its value will be overwritten. +func (o *OrdererGroup) SetOrganization(org Organization) error { + orgGroup, err := newOrdererOrgConfigGroup(org) + if err != nil { + return fmt.Errorf("failed to create orderer org %s: %v", org.Name, err) + } + + o.ordererGroup.Groups[org.Name] = orgGroup + + return nil +} + +// RemoveOrganization removes an org from the Orderer group. +// Removal will panic if the orderer group does not exist. +func (o *OrdererGroup) RemoveOrganization(name string) { + delete(o.ordererGroup.Groups, name) +} + +// SetConfiguration modifies an updated config's Orderer configuration +// via the passed in Orderer values. It skips updating OrdererOrgGroups and Policies. +func (o *OrdererGroup) SetConfiguration(ord Orderer) error { + // update orderer values + err := addOrdererValues(o.ordererGroup, ord) + if err != nil { + return err + } + + return nil +} + +// AddConsenter adds a consenter to an etcdraft configuration. +func (o *OrdererGroup) AddConsenter(consenter orderer.Consenter) error { + cfg, err := o.Configuration() + if err != nil { + return err + } + + if cfg.OrdererType != orderer.ConsensusTypeEtcdRaft { + return fmt.Errorf("consensus type %s is not etcdraft", cfg.OrdererType) + } + + for _, c := range cfg.EtcdRaft.Consenters { + if reflect.DeepEqual(c, consenter) { + return nil + } + } + + cfg.EtcdRaft.Consenters = append(cfg.EtcdRaft.Consenters, consenter) + + consensusMetadata, err := marshalEtcdRaftMetadata(cfg.EtcdRaft) + if err != nil { + return fmt.Errorf("marshaling etcdraft metadata: %v", err) + } + + consensusState, ok := ob.ConsensusType_State_value[string(cfg.State)] + if !ok { + return fmt.Errorf("unknown consensus state '%s'", cfg.State) + } + + err = setValue(o.ordererGroup, consensusTypeValue(cfg.OrdererType, consensusMetadata, consensusState), AdminsPolicyKey) + if err != nil { + return err + } + + return nil +} + +// RemoveConsenter removes a consenter from an etcdraft configuration. +func (o *OrdererGroup) RemoveConsenter(consenter orderer.Consenter) error { + cfg, err := o.Configuration() + if err != nil { + return err + } + + if cfg.OrdererType != orderer.ConsensusTypeEtcdRaft { + return fmt.Errorf("consensus type %s is not etcdraft", cfg.OrdererType) + } + + consenters := cfg.EtcdRaft.Consenters[:] + for i, c := range cfg.EtcdRaft.Consenters { + if reflect.DeepEqual(c, consenter) { + consenters = append(consenters[:i], consenters[i+1:]...) + break + } + } + + cfg.EtcdRaft.Consenters = consenters + + consensusMetadata, err := marshalEtcdRaftMetadata(cfg.EtcdRaft) + if err != nil { + return fmt.Errorf("marshaling etcdraft metadata: %v", err) + } + + consensusState, ok := ob.ConsensusType_State_value[string(cfg.State)] + if !ok { + return fmt.Errorf("unknown consensus state '%s'", cfg.State) + } + + err = setValue(o.ordererGroup, consensusTypeValue(cfg.OrdererType, consensusMetadata, consensusState), AdminsPolicyKey) + if err != nil { + return err + } + + return nil +} + +// Capabilities returns a map of enabled orderer capabilities +// from the updated config. +func (o *OrdererGroup) Capabilities() ([]string, error) { + capabilities, err := getCapabilities(o.ordererGroup) + if err != nil { + return nil, fmt.Errorf("retrieving orderer capabilities: %v", err) + } + + return capabilities, nil +} + +// AddCapability adds capability to the provided channel config. +// If the provided capability already exists in current configuration, this action +// will be a no-op. +func (o *OrdererGroup) AddCapability(capability string) error { + capabilities, err := o.Capabilities() + if err != nil { + return err + } + + err = addCapability(o.ordererGroup, capabilities, AdminsPolicyKey, capability) + if err != nil { + return err + } + + return nil +} + +// RemoveCapability removes capability to the provided channel config. +func (o *OrdererGroup) RemoveCapability(capability string) error { + capabilities, err := o.Capabilities() + if err != nil { + return err + } + + err = removeCapability(o.ordererGroup, capabilities, AdminsPolicyKey, capability) + if err != nil { + return err + } + + return nil +} + +// SetEndpoint adds an orderer's endpoint to an existing channel config transaction. +// If the same endpoint already exists in current configuration, this will be a no-op. +func (o *OrdererOrg) SetEndpoint(endpoint Address) error { + ordererAddrProto := &cb.OrdererAddresses{} + + if ordererAddrConfigValue, ok := o.orgGroup.Values[EndpointsKey]; ok { + err := proto.Unmarshal(ordererAddrConfigValue.Value, ordererAddrProto) + if err != nil { + return fmt.Errorf("failed unmarshaling endpoints for orderer org %s: %v", o.name, err) + } + } + + endpointToAdd := fmt.Sprintf("%s:%d", endpoint.Host, endpoint.Port) + + existingOrdererEndpoints := ordererAddrProto.Addresses + for _, e := range existingOrdererEndpoints { + if e == endpointToAdd { + return nil + } + } + + existingOrdererEndpoints = append(existingOrdererEndpoints, endpointToAdd) + + // Add orderer endpoints config value back to orderer org + err := setValue(o.orgGroup, endpointsValue(existingOrdererEndpoints), AdminsPolicyKey) + if err != nil { + return fmt.Errorf("failed to add endpoint %v to orderer org %s: %v", endpoint, o.name, err) + } + + return nil +} + +// RemoveEndpoint removes an orderer's endpoint from an existing channel config transaction. +// Removal will panic if either the orderer group or orderer org group does not exist. +func (o *OrdererOrg) RemoveEndpoint(endpoint Address) error { + ordererAddrProto := &cb.OrdererAddresses{} + + if ordererAddrConfigValue, ok := o.orgGroup.Values[EndpointsKey]; ok { + err := proto.Unmarshal(ordererAddrConfigValue.Value, ordererAddrProto) + if err != nil { + return fmt.Errorf("failed unmarshaling endpoints for orderer org %s: %v", o.name, err) + } + } + + endpointToRemove := fmt.Sprintf("%s:%d", endpoint.Host, endpoint.Port) + + existingEndpoints := ordererAddrProto.Addresses[:0] + for _, e := range ordererAddrProto.Addresses { + if e != endpointToRemove { + existingEndpoints = append(existingEndpoints, e) + } + } + + // Add orderer endpoints config value back to orderer org + err := setValue(o.orgGroup, endpointsValue(existingEndpoints), AdminsPolicyKey) + if err != nil { + return fmt.Errorf("failed to remove endpoint %v from orderer org %s: %v", endpoint, o.name, err) + } + + return nil +} + +// SetModPolicy sets the specified modification policy for the orderer group. +func (o *OrdererGroup) SetModPolicy(modPolicy string) error { + if modPolicy == "" { + return errors.New("non empty mod policy is required") + } + + o.ordererGroup.ModPolicy = modPolicy + + return nil +} + +// SetPolicy sets the specified policy in the orderer group's config policy map. +// If the policy already exists in current configuration, its value will be overwritten. +func (o *OrdererGroup) SetPolicy(policyName string, policy Policy) error { + err := setPolicy(o.ordererGroup, policyName, policy) + if err != nil { + return fmt.Errorf("failed to set policy '%s': %v", policyName, err) + } + + return nil +} + +// SetPolicies sets the specified policy in the orderer group's config policy map. +// If the policies already exist in current configuration, the values will be replaced with new policies. +func (o *OrdererGroup) SetPolicies(policies map[string]Policy) error { + if _, ok := policies[BlockValidationPolicyKey]; !ok { + return errors.New("BlockValidation policy must be defined") + } + + err := setPolicies(o.ordererGroup, policies) + if err != nil { + return fmt.Errorf("failed to set policies: %v", err) + } + + return nil +} + +// RemovePolicy removes an existing orderer policy configuration. +func (o *OrdererGroup) RemovePolicy(policyName string) error { + if policyName == BlockValidationPolicyKey { + return errors.New("BlockValidation policy must be defined") + } + + policies, err := o.Policies() + if err != nil { + return err + } + + removePolicy(o.ordererGroup, policyName, policies) + return nil +} + +// Policies returns a map of policies for channel orderer in the +// updated config. +func (o *OrdererGroup) Policies() (map[string]Policy, error) { + return getPolicies(o.ordererGroup.Policies) +} + +// SetMSP updates the MSP config for the specified orderer org +// in the updated config. +func (o *OrdererOrg) SetMSP(updatedMSP MSP) error { + currentMSP, err := o.MSP().Configuration() + if err != nil { + return fmt.Errorf("retrieving msp: %v", err) + } + + if currentMSP.Name != updatedMSP.Name { + return errors.New("MSP name cannot be changed") + } + + err = updatedMSP.validateCACerts() + if err != nil { + return err + } + + err = updatedMSP.setConfig(o.orgGroup) + if err != nil { + return err + } + + return nil +} + +// SetModPolicy sets the specified modification policy for the orderer org group. +func (o *OrdererOrg) SetModPolicy(modPolicy string) error { + if modPolicy == "" { + return errors.New("non empty mod policy is required") + } + + o.orgGroup.ModPolicy = modPolicy + + return nil +} + +// SetPolicy sets the specified policy in the orderer org group's config policy map. +// If the policy already exists in current configuration, its value will be overwritten. +func (o *OrdererOrg) SetPolicy(policyName string, policy Policy) error { + return setPolicy(o.orgGroup, policyName, policy) +} + +// SetPolicies sets the specified policies in the orderer org group's config policy map. +// If the policies already exist in current configuration, the values will be replaced with new policies. +func (o *OrdererOrg) SetPolicies(policies map[string]Policy) error { + return setPolicies(o.orgGroup, policies) +} + +// RemovePolicy removes an existing policy from an orderer organization. +func (o *OrdererOrg) RemovePolicy(policyName string) error { + policies, err := o.Policies() + if err != nil { + return err + } + + removePolicy(o.orgGroup, policyName, policies) + return nil +} + +// Policies returns a map of policies for a specific orderer org +// in the updated config. +func (o *OrdererOrg) Policies() (map[string]Policy, error) { + return getPolicies(o.orgGroup.Policies) +} + +// RemoveLegacyKafkaBrokers removes the legacy kafka brokers config key and value from config. +// In fabric 2.0, kafka was deprecated as a consensus type. +func (o *OrdererGroup) RemoveLegacyKafkaBrokers() { + delete(o.ordererGroup.Values, orderer.KafkaBrokersKey) +} + +// newOrdererGroup returns the orderer component of the channel configuration. +// It defines parameters of the ordering service about how large blocks should be, +// how frequently they should be emitted, etc. as well as the organizations of the ordering network. +// It sets the mod_policy of all elements to "Admins". +// This group is always present in any channel configuration. +func newOrdererGroup(orderer Orderer) (*cb.ConfigGroup, error) { + ordererGroup := newConfigGroup() + ordererGroup.ModPolicy = AdminsPolicyKey + + if orderer.ModPolicy != "" { + ordererGroup.ModPolicy = orderer.ModPolicy + } + + if err := setOrdererPolicies(ordererGroup, orderer.Policies, AdminsPolicyKey); err != nil { + return nil, err + } + + // add orderer values + err := addOrdererValues(ordererGroup, orderer) + if err != nil { + return nil, err + } + + // add orderer groups + for _, org := range orderer.Organizations { + // As of fabric v1.4 we expect new system channels to contain orderer endpoints at the org level + if len(org.OrdererEndpoints) == 0 { + return nil, fmt.Errorf("orderer endpoints are not defined for org %s", org.Name) + } + + ordererGroup.Groups[org.Name], err = newOrdererOrgConfigGroup(org) + if err != nil { + return nil, fmt.Errorf("org group '%s': %v", org.Name, err) + } + } + + return ordererGroup, nil +} + +// addOrdererValues adds configuration specified in Orderer to an orderer +// *cb.ConfigGroup's Values map. +func addOrdererValues(ordererGroup *cb.ConfigGroup, o Orderer) error { + err := setValue(ordererGroup, batchSizeValue( + o.BatchSize.MaxMessageCount, + o.BatchSize.AbsoluteMaxBytes, + o.BatchSize.PreferredMaxBytes, + ), AdminsPolicyKey) + if err != nil { + return err + } + + err = setValue(ordererGroup, batchTimeoutValue(o.BatchTimeout.String()), AdminsPolicyKey) + if err != nil { + return err + } + + err = setValue(ordererGroup, channelRestrictionsValue(o.MaxChannels), AdminsPolicyKey) + if err != nil { + return err + } + + if len(o.Capabilities) > 0 { + err = setValue(ordererGroup, capabilitiesValue(o.Capabilities), AdminsPolicyKey) + if err != nil { + return err + } + } + + var consensusMetadata []byte + + switch o.OrdererType { + case orderer.ConsensusTypeSolo: + return fmt.Errorf("the solo consensus type is no longer supported") + case orderer.ConsensusTypeKafka: + return fmt.Errorf("the kafka consensus type is no longer supported") + case orderer.ConsensusTypeEtcdRaft: + if consensusMetadata, err = marshalEtcdRaftMetadata(o.EtcdRaft); err != nil { + return fmt.Errorf("marshaling etcdraft metadata for orderer type '%s': %v", orderer.ConsensusTypeEtcdRaft, err) + } + default: + return fmt.Errorf("unknown orderer type '%s'", o.OrdererType) + } + + consensusState, ok := ob.ConsensusType_State_value[string(o.State)] + if !ok { + return fmt.Errorf("unknown consensus state '%s'", o.State) + } + + err = setValue(ordererGroup, consensusTypeValue(o.OrdererType, consensusMetadata, consensusState), AdminsPolicyKey) + if err != nil { + return err + } + + return nil +} + +// setOrdererPolicies adds *cb.ConfigPolicies to the passed Orderer *cb.ConfigGroup's Policies map. +// It checks that the BlockValidation policy is defined alongside the standard policy checks. +func setOrdererPolicies(cg *cb.ConfigGroup, policyMap map[string]Policy, modPolicy string) error { + if policyMap == nil { + return errors.New("no policies defined") + } + if _, ok := policyMap[BlockValidationPolicyKey]; !ok { + return errors.New("no BlockValidation policy defined") + } + + return setPolicies(cg, policyMap) +} + +// batchSizeValue returns the config definition for the orderer batch size. +// It is a value for the /Channel/Orderer group. +func batchSizeValue(maxMessages, absoluteMaxBytes, preferredMaxBytes uint32) *standardConfigValue { + return &standardConfigValue{ + key: orderer.BatchSizeKey, + value: &ob.BatchSize{ + MaxMessageCount: maxMessages, + AbsoluteMaxBytes: absoluteMaxBytes, + PreferredMaxBytes: preferredMaxBytes, + }, + } +} + +// batchTimeoutValue returns the config definition for the orderer batch timeout. +// It is a value for the /Channel/Orderer group. +func batchTimeoutValue(timeout string) *standardConfigValue { + return &standardConfigValue{ + key: orderer.BatchTimeoutKey, + value: &ob.BatchTimeout{ + Timeout: timeout, + }, + } +} + +// endpointsValue returns the config definition for the orderer addresses at an org scoped level. +// It is a value for the /Channel/Orderer/ group. +func endpointsValue(addresses []string) *standardConfigValue { + return &standardConfigValue{ + key: EndpointsKey, + value: &cb.OrdererAddresses{ + Addresses: addresses, + }, + } +} + +// channelRestrictionsValue returns the config definition for the orderer channel restrictions. +// It is a value for the /Channel/Orderer group. +func channelRestrictionsValue(maxChannelCount uint64) *standardConfigValue { + return &standardConfigValue{ + key: orderer.ChannelRestrictionsKey, + value: &ob.ChannelRestrictions{ + MaxCount: maxChannelCount, + }, + } +} + +// kafkaBrokersValue returns the config definition for the addresses of the ordering service's Kafka brokers. +// It is a value for the /Channel/Orderer group. +// Deprecated: the kafka consensus type is no longer supported +func kafkaBrokersValue(brokers []string) *standardConfigValue { + return &standardConfigValue{ + key: orderer.KafkaBrokersKey, + value: &ob.KafkaBrokers{ + Brokers: brokers, + }, + } +} + +// consensusTypeValue returns the config definition for the orderer consensus type. +// It is a value for the /Channel/Orderer group. +func consensusTypeValue(consensusType string, consensusMetadata []byte, consensusState int32) *standardConfigValue { + return &standardConfigValue{ + key: orderer.ConsensusTypeKey, + value: &ob.ConsensusType{ + Type: consensusType, + Metadata: consensusMetadata, + State: ob.ConsensusType_State(consensusState), + }, + } +} + +// marshalEtcdRaftMetadata serializes etcd RAFT metadata. +func marshalEtcdRaftMetadata(md orderer.EtcdRaft) ([]byte, error) { + var consenters []*eb.Consenter + + if len(md.Consenters) == 0 { + return nil, errors.New("consenters are required") + } + + for _, c := range md.Consenters { + host := c.Address.Host + port := c.Address.Port + + if c.ClientTLSCert == nil { + return nil, fmt.Errorf("client tls cert for consenter %s:%d is required", host, port) + } + + if c.ServerTLSCert == nil { + return nil, fmt.Errorf("server tls cert for consenter %s:%d is required", host, port) + } + + consenter := &eb.Consenter{ + Host: host, + Port: uint32(port), + ClientTlsCert: pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: c.ClientTLSCert.Raw, + }), + ServerTlsCert: pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: c.ServerTLSCert.Raw, + }), + } + + consenters = append(consenters, consenter) + } + + configMetadata := &eb.ConfigMetadata{ + Consenters: consenters, + Options: &eb.Options{ + TickInterval: md.Options.TickInterval, + ElectionTick: md.Options.ElectionTick, + HeartbeatTick: md.Options.HeartbeatTick, + MaxInflightBlocks: md.Options.MaxInflightBlocks, + SnapshotIntervalSize: md.Options.SnapshotIntervalSize, + }, + } + + data, err := proto.Marshal(configMetadata) + if err != nil { + return nil, fmt.Errorf("marshaling config metadata: %v", err) + } + + return data, nil +} + +// unmarshalEtcdRaftMetadata deserializes etcd RAFT metadata. +func unmarshalEtcdRaftMetadata(mdBytes []byte) (orderer.EtcdRaft, error) { + etcdRaftMetadata := &eb.ConfigMetadata{} + err := proto.Unmarshal(mdBytes, etcdRaftMetadata) + if err != nil { + return orderer.EtcdRaft{}, fmt.Errorf("unmarshaling etcd raft metadata: %v", err) + } + + consenters := []orderer.Consenter{} + + for _, c := range etcdRaftMetadata.Consenters { + clientTLSCertBlock, _ := pem.Decode(c.ClientTlsCert) + if clientTLSCertBlock == nil { + return orderer.EtcdRaft{}, fmt.Errorf("no PEM data found in client TLS cert[% x]", c.ClientTlsCert) + } + clientTLSCert, err := x509.ParseCertificate(clientTLSCertBlock.Bytes) + if err != nil { + return orderer.EtcdRaft{}, fmt.Errorf("unable to parse client tls cert: %v", err) + } + serverTLSCertBlock, _ := pem.Decode(c.ServerTlsCert) + if serverTLSCertBlock == nil { + return orderer.EtcdRaft{}, fmt.Errorf("no PEM data found in server TLS cert[% x]", c.ServerTlsCert) + } + serverTLSCert, err := x509.ParseCertificate(serverTLSCertBlock.Bytes) + if err != nil { + return orderer.EtcdRaft{}, fmt.Errorf("unable to parse server tls cert: %v", err) + } + + consenter := orderer.Consenter{ + Address: orderer.EtcdAddress{ + Host: c.Host, + Port: int(c.Port), + }, + ClientTLSCert: clientTLSCert, + ServerTLSCert: serverTLSCert, + } + + consenters = append(consenters, consenter) + } + + if etcdRaftMetadata.Options == nil { + return orderer.EtcdRaft{}, errors.New("missing etcdraft metadata options in config") + } + + return orderer.EtcdRaft{ + Consenters: consenters, + Options: orderer.EtcdRaftOptions{ + TickInterval: etcdRaftMetadata.Options.TickInterval, + ElectionTick: etcdRaftMetadata.Options.ElectionTick, + HeartbeatTick: etcdRaftMetadata.Options.HeartbeatTick, + MaxInflightBlocks: etcdRaftMetadata.Options.MaxInflightBlocks, + SnapshotIntervalSize: etcdRaftMetadata.Options.SnapshotIntervalSize, + }, + }, nil +} + +// getOrdererOrg returns the organization config group for an orderer org in the +// provided config. It returns nil if the org doesn't exist in the config. +func getOrdererOrg(config *cb.Config, orgName string) *cb.ConfigGroup { + return config.ChannelGroup.Groups[OrdererGroupKey].Groups[orgName] +} + +// hashingAlgorithm returns the only currently valid hashing algorithm. +// It is a value for the /Channel group. +func hashingAlgorithmValue() *standardConfigValue { + return &standardConfigValue{ + key: HashingAlgorithmKey, + value: &cb.HashingAlgorithm{ + Name: defaultHashingAlgorithm, + }, + } +} + +// blockDataHashingStructureValue returns the only currently valid block data hashing structure. +// It is a value for the /Channel group. +func blockDataHashingStructureValue() *standardConfigValue { + return &standardConfigValue{ + key: BlockDataHashingStructureKey, + value: &cb.BlockDataHashingStructure{ + Width: defaultBlockDataHashingStructureWidth, + }, + } +} diff --git a/v2/configtx/orderer/orderer.go b/v2/configtx/orderer/orderer.go new file mode 100644 index 0000000..3eb405f --- /dev/null +++ b/v2/configtx/orderer/orderer.go @@ -0,0 +1,100 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package orderer + +import ( + "crypto/x509" +) + +const ( + + // ConsensusStateNormal indicates normal orderer operation. + ConsensusStateNormal ConsensusState = "STATE_NORMAL" + + // ConsensusStateMaintenance indicates the orderer is in consensus type migration. + ConsensusStateMaintenance ConsensusState = "STATE_MAINTENANCE" + + // ConsensusTypeSolo identifies the solo consensus implementation. + // Deprecated: the solo consensus type is no longer supported + ConsensusTypeSolo = "solo" + + // ConsensusTypeKafka identifies the Kafka-based consensus implementation. + // Deprecated: the kafka consensus type is no longer supported + ConsensusTypeKafka = "kafka" + + // ConsensusTypeEtcdRaft identifies the Raft-based consensus implementation. + ConsensusTypeEtcdRaft = "etcdraft" + + // KafkaBrokersKey is the common.ConfigValue type key name for the KafkaBrokers message. + // Deprecated: the kafka consensus type is no longer supported + KafkaBrokersKey = "KafkaBrokers" + + // ConsensusTypeKey is the common.ConfigValue type key name for the ConsensusType message. + ConsensusTypeKey = "ConsensusType" + + // BatchSizeKey is the common.ConfigValue type key name for the BatchSize message. + BatchSizeKey = "BatchSize" + + // BatchTimeoutKey is the common.ConfigValue type key name for the BatchTimeout message. + BatchTimeoutKey = "BatchTimeout" + + // ChannelRestrictionsKey is the key name for the ChannelRestrictions message. + ChannelRestrictionsKey = "ChannelRestrictions" +) + +// ConsensusState defines the orderer mode of operation. +// Options: `ConsensusStateNormal` and `ConsensusStateMaintenance` +type ConsensusState string + +// BatchSize is the configuration affecting the size of batches. +type BatchSize struct { + // MaxMessageCount is the max message count. + MaxMessageCount uint32 + // AbsoluteMaxBytes is the max block size (not including headers). + AbsoluteMaxBytes uint32 + // PreferredMaxBytes is the preferred size of blocks. + PreferredMaxBytes uint32 +} + +// Kafka is a list of Kafka broker endpoints. +// Deprecated: the kafka consensus type is no longer supported +type Kafka struct { + // Brokers contains the addresses of *at least two* kafka brokers + // Must be in `IP:port` notation + Brokers []string +} + +// EtcdRaft is serialized and set as the value of ConsensusType.Metadata in +// a channel configuration when the ConsensusType.Type is set to "etcdraft". +type EtcdRaft struct { + Consenters []Consenter + Options EtcdRaftOptions +} + +// EtcdRaftOptions to be specified for all the etcd/raft nodes. +// These can be modified on a per-channel basis. +type EtcdRaftOptions struct { + TickInterval string + ElectionTick uint32 + HeartbeatTick uint32 + MaxInflightBlocks uint32 + // Take snapshot when cumulative data exceeds certain size in bytes. + SnapshotIntervalSize uint32 +} + +// Consenter represents a consenting node (i.e. replica). +type Consenter struct { + Address EtcdAddress + ClientTLSCert *x509.Certificate + ServerTLSCert *x509.Certificate +} + +// EtcdAddress contains the hostname and port for an endpoint. +type EtcdAddress struct { + Host string + Port int +} diff --git a/v2/configtx/orderer_test.go b/v2/configtx/orderer_test.go new file mode 100644 index 0000000..1c600ba --- /dev/null +++ b/v2/configtx/orderer_test.go @@ -0,0 +1,6619 @@ +/* +Copyright IBM Corp All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "bytes" + "crypto/ecdsa" + "crypto/x509" + "encoding/base64" + "fmt" + "math/big" + "testing" + "time" + + "github.com/hyperledger/fabric-config/v2/configtx/orderer" + "github.com/hyperledger/fabric-config/v2/protolator" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/ordererext" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + ob "github.com/hyperledger/fabric-protos-go-apiv2/orderer" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func TestNewOrdererGroup(t *testing.T) { + t.Parallel() + + tests := []struct { + ordererType string + numOrdererGroupValues int + expectedConfigJSONGen func(Orderer) string + }{ + { + ordererType: orderer.ConsensusTypeSolo, + numOrdererGroupValues: 5, + expectedConfigJSONGen: func(o Orderer) string { + certBase64, crlBase64 := certCRLBase64(t, o.Organizations[0].MSP) + return fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": { + "max_count": "0" + }, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": null, + "state": "STATE_NORMAL", + "type": "solo" + }, + "version": "0" + } + }, + "version": "0" +} +`, certBase64, crlBase64) + }, + }, + { + ordererType: orderer.ConsensusTypeEtcdRaft, + numOrdererGroupValues: 5, + expectedConfigJSONGen: func(o Orderer) string { + certBase64, crlBase64 := certCRLBase64(t, o.Organizations[0].MSP) + etcdRaftCert := o.EtcdRaft.Consenters[0].ClientTLSCert + etcdRaftCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(etcdRaftCert)) + return fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": { + "max_count": "0" + }, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": { + "consenters": [ + { + "client_tls_cert": "%[3]s", + "host": "node-1.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-2.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-3.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + } + ], + "options": { + "election_tick": 0, + "heartbeat_tick": 0, + "max_inflight_blocks": 0, + "snapshot_interval_size": 0, + "tick_interval": "" + } + }, + "state": "STATE_NORMAL", + "type": "etcdraft" + }, + "version": "0" + } + }, + "version": "0" +} +`, certBase64, crlBase64, etcdRaftCertBase64) + }, + }, + { + ordererType: orderer.ConsensusTypeKafka, + numOrdererGroupValues: 6, + expectedConfigJSONGen: func(o Orderer) string { + certBase64, crlBase64 := certCRLBase64(t, o.Organizations[0].MSP) + return fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": { + "max_count": "0" + }, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": null, + "state": "STATE_NORMAL", + "type": "kafka" + }, + "version": "0" + }, + "KafkaBrokers": { + "mod_policy": "Admins", + "value": { + "brokers": [ + "broker1", + "broker2" + ] + }, + "version": "0" + } + }, + "version": "0" +} +`, certBase64, crlBase64) + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.ordererType, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererConf, _ := baseOrdererOfType(t, tt.ordererType) + + ordererGroup, err := newOrdererGroup(ordererConf) + if tt.ordererType != orderer.ConsensusTypeKafka && tt.ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("consensus type is no longer supported")) + return + } + + expectedConfigJSON := tt.expectedConfigJSONGen(ordererConf) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererGroup{ConfigGroup: ordererGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(buf.String()).To(Equal(expectedConfigJSON)) + }) + } +} + +func TestNewOrdererGroupFailure(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + ordererMod func(*Orderer) + err string + }{ + { + testName: "When orderer group policy is empty", + ordererMod: func(o *Orderer) { + o.Policies = nil + }, + err: "no policies defined", + }, + { + testName: "When orderer type is unknown", + ordererMod: func(o *Orderer) { + o.OrdererType = "ConsensusTypeGreen" + }, + err: "unknown orderer type 'ConsensusTypeGreen'", + }, + { + testName: "When adding policies to orderer org group", + ordererMod: func(o *Orderer) { + o.Organizations[0].Policies = nil + }, + err: "org group 'OrdererOrg': no policies defined", + }, + { + testName: "When missing consenters in EtcdRaft for consensus type etcdraft", + ordererMod: func(o *Orderer) { + o.OrdererType = orderer.ConsensusTypeEtcdRaft + o.EtcdRaft = orderer.EtcdRaft{ + Consenters: nil, + } + }, + err: "marshaling etcdraft metadata for orderer type 'etcdraft': consenters are required", + }, + { + testName: "When missing a client tls cert in EtcdRaft for consensus type etcdraft", + ordererMod: func(o *Orderer) { + o.OrdererType = orderer.ConsensusTypeEtcdRaft + o.EtcdRaft = orderer.EtcdRaft{ + Consenters: []orderer.Consenter{ + { + Address: orderer.EtcdAddress{ + Host: "host1", + Port: 123, + }, + ClientTLSCert: nil, + }, + }, + } + }, + err: "marshaling etcdraft metadata for orderer type 'etcdraft': client tls cert for consenter host1:123 is required", + }, + { + testName: "When missing a server tls cert in EtcdRaft for consensus type etcdraft", + ordererMod: func(o *Orderer) { + o.OrdererType = orderer.ConsensusTypeEtcdRaft + o.EtcdRaft = orderer.EtcdRaft{ + Consenters: []orderer.Consenter{ + { + Address: orderer.EtcdAddress{ + Host: "host1", + Port: 123, + }, + ClientTLSCert: &x509.Certificate{}, + ServerTLSCert: nil, + }, + }, + } + }, + err: "marshaling etcdraft metadata for orderer type 'etcdraft': server tls cert for consenter host1:123 is required", + }, + { + testName: "When consensus state is invalid", + ordererMod: func(o *Orderer) { + o.State = "invalid state" + }, + err: "unknown consensus state 'invalid state'", + }, + { + testName: "When consensus state is invalid", + ordererMod: func(o *Orderer) { + o.State = "invalid state" + }, + err: "unknown consensus state 'invalid state'", + }, + } + + for _, tt := range tests { + tt := tt // capture range variable + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererConf, _ := baseSoloOrderer(t) + tt.ordererMod(&ordererConf) + + ordererGroup, err := newOrdererGroup(ordererConf) + gt.Expect(err).To(HaveOccurred()) + gt.Expect(ordererGroup).To(BeNil()) + }) + } +} + +func TestSetOrdererConfiguration(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + certBase64, crlBase64 := certCRLBase64(t, baseOrdererConf.Organizations[0].MSP) + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + imp, err := implicitMetaFromString(baseOrdererConf.Policies[AdminsPolicyKey].Rule) + gt.Expect(err).NotTo(HaveOccurred()) + + originalAdminsPolicy, err := proto.Marshal(imp) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{ + AdminsPolicyKey: { + Policy: &cb.Policy{ + Type: int32(cb.Policy_IMPLICIT_META), + Value: originalAdminsPolicy, + }, + ModPolicy: AdminsPolicyKey, + }, + }, + }, + } + + updatedOrdererConf := baseOrdererConf + + // Modify MaxMessageCount and ConesnsusType to etcdraft + updatedOrdererConf.BatchSize.MaxMessageCount = 10000 + updatedOrdererConf.OrdererType = orderer.ConsensusTypeEtcdRaft + updatedOrdererConf.EtcdRaft = orderer.EtcdRaft{ + Consenters: []orderer.Consenter{ + { + Address: orderer.EtcdAddress{ + Host: "host1", + Port: 123, + }, + ClientTLSCert: &x509.Certificate{}, + ServerTLSCert: &x509.Certificate{}, + }, + }, + Options: orderer.EtcdRaftOptions{}, + } + + c := New(config) + + err = c.Orderer().SetConfiguration(updatedOrdererConf) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedConfigJSON := fmt.Sprintf(` +{ + "channel_group": { + "groups": { + "Orderer": { + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 10000, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": { + "max_count": "0" + }, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": { + "consenters": [ + { + "client_tls_cert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", + "host": "host1", + "port": 123, + "server_tls_cert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" + } + ], + "options": { + "election_tick": 0, + "heartbeat_tick": 0, + "max_inflight_blocks": 0, + "snapshot_interval_size": 0, + "tick_interval": "" + } + }, + "state": "STATE_NORMAL", + "type": "etcdraft" + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + } + }, + "values": {}, + "version": "0" + }, + "sequence": "0" +} +`, certBase64, crlBase64) + + buf := &bytes.Buffer{} + err = protolator.DeepMarshalJSON(buf, c.updated) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(MatchJSON(expectedConfigJSON)) +} + +func TestOrdererConfiguration(t *testing.T) { + t.Parallel() + + tests := []struct { + ordererType string + }{ + { + ordererType: orderer.ConsensusTypeSolo, + }, + { + ordererType: orderer.ConsensusTypeKafka, + }, + { + ordererType: orderer.ConsensusTypeEtcdRaft, + }, + } + + for _, tt := range tests { + tt := tt // capture range variable + t.Run(tt.ordererType, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseOrdererOfType(t, tt.ordererType) + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if tt.ordererType != orderer.ConsensusTypeKafka && tt.ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + Values: map[string]*cb.ConfigValue{}, + }, + } + + c := New(config) + + ordererConf, err := c.Orderer().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(ordererConf).To(Equal(baseOrdererConf)) + }) + } +} + +func TestOrdererConfigurationNoOrdererEndpoints(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + baseOrdererConf, _ := baseOrdererOfType(t, ordererType) + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + Values: map[string]*cb.ConfigValue{}, + }, + } + + delete(config.ChannelGroup.Groups[OrdererGroupKey].Groups["OrdererOrg"].Values, EndpointsKey) + + c := New(config) + + ordererConf, err := c.Orderer().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + baseOrdererConf.Organizations[0].OrdererEndpoints = nil + gt.Expect(ordererConf).To(Equal(baseOrdererConf)) +} + +func TestOrdererConfigurationFailure(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + ordererType string + configMod func(*cb.Config, *GomegaWithT) + expectedErr string + }{ + { + testName: "When the config contains an unknown consensus type", + ordererType: orderer.ConsensusTypeSolo, + configMod: func(config *cb.Config, gt *GomegaWithT) { + err := setValue(config.ChannelGroup.Groups[OrdererGroupKey], consensusTypeValue("badtype", nil, 0), AdminsPolicyKey) + gt.Expect(err).NotTo(HaveOccurred()) + }, + expectedErr: "config contains unknown consensus type 'badtype'", + }, + { + testName: "Missing Kafka brokers for kafka orderer", + ordererType: orderer.ConsensusTypeKafka, + configMod: func(config *cb.Config, gt *GomegaWithT) { + delete(config.ChannelGroup.Groups[OrdererGroupKey].Values, orderer.KafkaBrokersKey) + }, + expectedErr: "unable to find kafka brokers for kafka orderer", + }, + { + testName: "Failed unmarshaling etcd raft metadata", + ordererType: orderer.ConsensusTypeEtcdRaft, + configMod: func(config *cb.Config, gt *GomegaWithT) { + err := setValue(config.ChannelGroup.Groups[OrdererGroupKey], consensusTypeValue(orderer.ConsensusTypeEtcdRaft, nil, 0), AdminsPolicyKey) + gt.Expect(err).NotTo(HaveOccurred()) + }, + expectedErr: "unmarshaling etcd raft metadata: missing etcdraft metadata options in config", + }, + { + testName: "Invalid batch timeout", + ordererType: orderer.ConsensusTypeSolo, + configMod: func(config *cb.Config, gt *GomegaWithT) { + err := setValue(config.ChannelGroup.Groups[OrdererGroupKey], batchTimeoutValue("invalidtime"), AdminsPolicyKey) + gt.Expect(err).NotTo(HaveOccurred()) + }, + expectedErr: "batch timeout configuration 'invalidtime' is not a duration string", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConfig, _ := baseOrdererOfType(t, tt.ordererType) + ordererGroup, err := newOrdererGroup(baseOrdererConfig) + if tt.ordererType != orderer.ConsensusTypeKafka && tt.ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + Values: map[string]*cb.ConfigValue{}, + }, + } + + if tt.configMod != nil { + tt.configMod(config, gt) + } + + c := New(config) + + _, err = c.Orderer().Configuration() + gt.Expect(err).To(MatchError(tt.expectedErr)) + }) + } +} + +func TestSetOrdererOrg(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + orderer1, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(orderer1) + if orderer1.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + msp, _ := baseMSP(t) + org := Organization{ + Name: "OrdererOrg2", + Policies: orgStandardPolicies(), + OrdererEndpoints: []string{ + "localhost:123", + }, + MSP: msp, + } + certBase64, crlBase64 := certCRLBase64(t, org.MSP) + + expectedConfigJSON := fmt.Sprintf(` +{ + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" +} +`, certBase64, crlBase64) + + err = c.Orderer().SetOrganization(org) + gt.Expect(err).NotTo(HaveOccurred()) + + actualOrdererConfigGroup := c.Orderer().Organization("OrdererOrg2").orgGroup + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererOrgGroup{ConfigGroup: actualOrdererConfigGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(buf.String()).To(MatchJSON(expectedConfigJSON)) +} + +func TestSetOrdererOrgFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + orderer1, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(orderer1) + if orderer1.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + org := Organization{ + Name: "OrdererOrg2", + } + + err = c.Orderer().SetOrganization(org) + gt.Expect(err).To(MatchError("failed to create orderer org OrdererOrg2: no policies defined")) +} + +func TestSetOrdererEndpoint(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: { + Version: 0, + Groups: map[string]*cb.ConfigGroup{ + "Orderer1Org": { + Groups: map[string]*cb.ConfigGroup{}, + Values: map[string]*cb.ConfigValue{ + EndpointsKey: { + ModPolicy: AdminsPolicyKey, + Value: marshalOrPanic(&cb.OrdererAddresses{ + Addresses: []string{"127.0.0.1:8050"}, + }), + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + }, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + Sequence: 0, + } + + c := New(config) + + expectedUpdatedConfigJSON := ` +{ + "channel_group": { + "groups": { + "Orderer": { + "groups": { + "Orderer1Org": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "127.0.0.1:8050", + "127.0.0.1:9050" + ] + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "sequence": "0" +} + ` + expectedUpdatedConfig := &cb.Config{} + err := protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectedUpdatedConfigJSON), expectedUpdatedConfig) + gt.Expect(err).ToNot(HaveOccurred()) + + newOrderer1OrgEndpoint := Address{Host: "127.0.0.1", Port: 9050} + err = c.Orderer().Organization("Orderer1Org").SetEndpoint(newOrderer1OrgEndpoint) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(proto.Equal(c.updated, expectedUpdatedConfig)).To(BeTrue()) +} + +func TestRemoveOrdererEndpoint(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: { + Version: 0, + Groups: map[string]*cb.ConfigGroup{ + "OrdererOrg": { + Groups: map[string]*cb.ConfigGroup{}, + Values: map[string]*cb.ConfigValue{ + EndpointsKey: { + ModPolicy: AdminsPolicyKey, + Value: marshalOrPanic(&cb.OrdererAddresses{ + Addresses: []string{ + "127.0.0.1:7050", + "127.0.0.1:8050", + }, + }), + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + }, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + Sequence: 0, + } + + c := New(config) + + expectedUpdatedConfigJSON := ` +{ + "channel_group": { + "groups": { + "Orderer": { + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "", + "policies": {}, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "127.0.0.1:7050" + ] + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "sequence": "0" +} +` + + expectedUpdatedConfig := &cb.Config{} + err := protolator.DeepUnmarshalJSON(bytes.NewBufferString(expectedUpdatedConfigJSON), expectedUpdatedConfig) + gt.Expect(err).ToNot(HaveOccurred()) + + removedEndpoint := Address{Host: "127.0.0.1", Port: 8050} + err = c.Orderer().Organization("OrdererOrg").RemoveEndpoint(removedEndpoint) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(proto.Equal(c.updated, expectedUpdatedConfig)).To(BeTrue()) +} + +func TestRemoveOrdererEndpointFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: { + Version: 0, + Groups: map[string]*cb.ConfigGroup{ + "OrdererOrg": { + Groups: map[string]*cb.ConfigGroup{}, + Values: map[string]*cb.ConfigValue{ + EndpointsKey: { + ModPolicy: AdminsPolicyKey, + Value: []byte("fire time"), + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + }, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + Sequence: 0, + } + + c := New(config) + + err := c.Orderer().Organization("OrdererOrg").RemoveEndpoint(Address{Host: "127.0.0.1", Port: 8050}) + gt.Expect(err.Error()).To(ContainSubstring("failed unmarshaling endpoints for orderer org OrdererOrg: proto:")) + gt.Expect(err.Error()).To(ContainSubstring("cannot parse invalid wire-format data")) +} + +func TestGetOrdererOrg(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + ordererChannelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: ordererChannelGroup, + } + + ordererOrgGroup := getOrdererOrg(config, "OrdererOrg") + gt.Expect(ordererOrgGroup).To(Equal(config.ChannelGroup.Groups[OrdererGroupKey].Groups["OrdererOrg"])) +} + +func TestOrdererCapabilities(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + ordererCapabilities, err := c.Orderer().Capabilities() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(ordererCapabilities).To(Equal(baseOrdererConf.Capabilities)) + + // Delete the capabilities key and assert retrieval to return nil + delete(c.Orderer().ordererGroup.Values, CapabilitiesKey) + ordererCapabilities, err = c.Orderer().Capabilities() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(ordererCapabilities).To(BeNil()) +} + +func TestAddOrdererCapability(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + ordererOrgMSP := baseOrdererConf.Organizations[0].MSP + orgCertBase64, orgCRLBase64 := certCRLBase64(t, ordererOrgMSP) + + expectedConfigGroupJSON := fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {}, + "V3_0": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": null, + "state": "STATE_NORMAL", + "type": "solo" + }, + "version": "0" + } + }, + "version": "0" +} +`, orgCertBase64, orgCRLBase64) + + capability := "V3_0" + err = c.Orderer().AddCapability(capability) + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererGroup{ConfigGroup: c.Orderer().ordererGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) +} + +func TestAddConsenter(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + baseOrderer func(o Orderer) Orderer + }{ + { + testName: "when adding a fourth consenter", + baseOrderer: func(o Orderer) Orderer { + return o + }, + }, + { + testName: "when adding an existing consenter", + baseOrderer: func(o Orderer) Orderer { + consenter4 := o.EtcdRaft.Consenters[0] + consenter4.Address.Host = "node-4.example.com" + o.EtcdRaft.Consenters = append(o.EtcdRaft.Consenters, consenter4) + return o + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseEtcdRaftOrderer(t) + baseOrdererConf = tt.baseOrderer(baseOrdererConf) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + ordererOrgMSP := baseOrdererConf.Organizations[0].MSP + orgCertBase64, orgCRLBase64 := certCRLBase64(t, ordererOrgMSP) + etcdRaftCert := baseOrdererConf.EtcdRaft.Consenters[0].ClientTLSCert + + etcdRaftCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(etcdRaftCert)) + expectedConfigGroupJSON := fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": { + "consenters": [ + { + "client_tls_cert": "%[3]s", + "host": "node-1.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-2.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-3.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-4.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + } + ], + "options": { + "election_tick": 0, + "heartbeat_tick": 0, + "max_inflight_blocks": 0, + "snapshot_interval_size": 0, + "tick_interval": "" + } + }, + "state": "STATE_NORMAL", + "type": "etcdraft" + }, + "version": "0" + } + }, + "version": "0" +} +`, orgCertBase64, orgCRLBase64, etcdRaftCertBase64) + + consenter := orderer.Consenter{ + Address: orderer.EtcdAddress{ + Host: "node-4.example.com", + Port: 7050, + }, + ClientTLSCert: etcdRaftCert, + ServerTLSCert: etcdRaftCert, + } + + err = c.Orderer().AddConsenter(consenter) + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererGroup{ConfigGroup: c.Orderer().ordererGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) + }) + } +} + +func TestAddConsenterFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + orderer func(o Orderer) Orderer + ordererGroup func(og *cb.ConfigGroup, ord Orderer) + consenter func(c orderer.Consenter) orderer.Consenter + expectedErr string + }{ + { + testName: "when retrieving orderer configuration fails", + orderer: func(o Orderer) Orderer { + return o + }, + ordererGroup: func(og *cb.ConfigGroup, ord Orderer) { + _ = setValue(og, consensusTypeValue("foobar", []byte{}, 1), AdminsPolicyKey) + }, + consenter: func(c orderer.Consenter) orderer.Consenter { + return c + }, + expectedErr: "config contains unknown consensus type 'foobar'", + }, + { + testName: "when consensus type is not etcdraft", + orderer: func(o Orderer) Orderer { + o.OrdererType = orderer.ConsensusTypeSolo + return o + }, + ordererGroup: func(og *cb.ConfigGroup, ord Orderer) { + }, + consenter: func(c orderer.Consenter) orderer.Consenter { + return c + }, + expectedErr: "consensus type solo is not etcdraft", + }, + { + testName: "when marshaling metadata fails", + orderer: func(o Orderer) Orderer { + return o + }, + ordererGroup: func(og *cb.ConfigGroup, ord Orderer) { + }, + consenter: func(c orderer.Consenter) orderer.Consenter { + c.ClientTLSCert = nil + return c + }, + expectedErr: "marshaling etcdraft metadata: client tls cert for consenter node-4.example.com:7050 is required", + }, + { + testName: "when the consensus state is invalid", + orderer: func(o Orderer) Orderer { + return o + }, + ordererGroup: func(og *cb.ConfigGroup, ord Orderer) { + ord.State = "bababa" + met, _ := marshalEtcdRaftMetadata(ord.EtcdRaft) + _ = setValue(og, consensusTypeValue(ord.OrdererType, met, 3), AdminsPolicyKey) + }, + consenter: func(c orderer.Consenter) orderer.Consenter { + return c + }, + expectedErr: "unknown consensus state ''", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseEtcdRaftOrderer(t) + ord := tt.orderer(baseOrdererConf) + + ordererGroup, err := newOrdererGroup(ord) + if ord.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + tt.ordererGroup(ordererGroup, ord) + + etcdRaftCert := baseOrdererConf.EtcdRaft.Consenters[0].ClientTLSCert + + consenter := orderer.Consenter{ + Address: orderer.EtcdAddress{ + Host: "node-4.example.com", + Port: 7050, + }, + ClientTLSCert: etcdRaftCert, + ServerTLSCert: etcdRaftCert, + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + err = c.Orderer().AddConsenter(tt.consenter(consenter)) + gt.Expect(err).To(MatchError(tt.expectedErr)) + }) + } +} + +func TestRemoveConsenter(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseEtcdRaftOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + ordererOrgMSP := baseOrdererConf.Organizations[0].MSP + orgCertBase64, orgCRLBase64 := certCRLBase64(t, ordererOrgMSP) + etcdRaftCert := baseOrdererConf.EtcdRaft.Consenters[0].ClientTLSCert + + etcdRaftCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(etcdRaftCert)) + expectedConfigGroupJSON := fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": { + "consenters": [ + { + "client_tls_cert": "%[3]s", + "host": "node-2.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-3.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + } + ], + "options": { + "election_tick": 0, + "heartbeat_tick": 0, + "max_inflight_blocks": 0, + "snapshot_interval_size": 0, + "tick_interval": "" + } + }, + "state": "STATE_NORMAL", + "type": "etcdraft" + }, + "version": "0" + } + }, + "version": "0" +} +`, orgCertBase64, orgCRLBase64, etcdRaftCertBase64) + + consenter := orderer.Consenter{ + Address: orderer.EtcdAddress{ + Host: "node-1.example.com", + Port: 7050, + }, + ClientTLSCert: etcdRaftCert, + ServerTLSCert: etcdRaftCert, + } + + err = c.Orderer().RemoveConsenter(consenter) + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererGroup{ConfigGroup: c.Orderer().ordererGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) +} + +func TestRemoveConsenterFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + orderer func(o Orderer) Orderer + ordererGroup func(og *cb.ConfigGroup, ord Orderer) + consenter func(c orderer.Consenter) orderer.Consenter + expectedErr string + }{ + { + testName: "when retrieving orderer configuration fails", + orderer: func(o Orderer) Orderer { + return o + }, + ordererGroup: func(og *cb.ConfigGroup, ord Orderer) { + _ = setValue(og, consensusTypeValue("foobar", []byte{}, 1), AdminsPolicyKey) + }, + consenter: func(c orderer.Consenter) orderer.Consenter { + return c + }, + expectedErr: "config contains unknown consensus type 'foobar'", + }, + { + testName: "when consensus type is not etcdraft", + orderer: func(o Orderer) Orderer { + o.OrdererType = orderer.ConsensusTypeSolo + return o + }, + ordererGroup: func(og *cb.ConfigGroup, ord Orderer) { + }, + consenter: func(c orderer.Consenter) orderer.Consenter { + return c + }, + expectedErr: "consensus type solo is not etcdraft", + }, + { + testName: "when the consensus state is invalid", + orderer: func(o Orderer) Orderer { + return o + }, + ordererGroup: func(og *cb.ConfigGroup, ord Orderer) { + ord.State = "bababa" + met, _ := marshalEtcdRaftMetadata(ord.EtcdRaft) + _ = setValue(og, consensusTypeValue(ord.OrdererType, met, 3), AdminsPolicyKey) + }, + consenter: func(c orderer.Consenter) orderer.Consenter { + return c + }, + expectedErr: "unknown consensus state ''", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseEtcdRaftOrderer(t) + ord := tt.orderer(baseOrdererConf) + + ordererGroup, err := newOrdererGroup(ord) + if ord.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + tt.ordererGroup(ordererGroup, ord) + + etcdRaftCert := baseOrdererConf.EtcdRaft.Consenters[0].ClientTLSCert + + consenter := orderer.Consenter{ + Address: orderer.EtcdAddress{ + Host: "node-4.example.com", + Port: 7050, + }, + ClientTLSCert: etcdRaftCert, + ServerTLSCert: etcdRaftCert, + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + err = c.Orderer().RemoveConsenter(tt.consenter(consenter)) + gt.Expect(err).To(MatchError(tt.expectedErr)) + }) + } +} + +func TestAddOrdererCapabilityFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + capability string + ordererGroup func(og *cb.ConfigGroup) + expectedErr string + }{ + { + testName: "when retrieving existing capabilities", + capability: "V1_3", + ordererGroup: func(og *cb.ConfigGroup) { + og.Values = map[string]*cb.ConfigValue{ + CapabilitiesKey: { + Value: []byte("foobar"), + }, + } + }, + expectedErr: "retrieving orderer capabilities: unmarshaling capabilities: proto", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + tt.ordererGroup(ordererGroup) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + err = c.Orderer().AddCapability(tt.capability) + gt.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) + }) + } +} + +func TestRemoveOrdererCapability(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + ordererOrgMSP := baseOrdererConf.Organizations[0].MSP + orgCertBase64, orgCRLBase64 := certCRLBase64(t, ordererOrgMSP) + + expectedConfigGroupJSON := fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": {} + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": null, + "state": "STATE_NORMAL", + "type": "solo" + }, + "version": "0" + } + }, + "version": "0" +} +`, orgCertBase64, orgCRLBase64) + + capability := "V1_3" + err = c.Orderer().RemoveCapability(capability) + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererGroup{ConfigGroup: c.Orderer().ordererGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) +} + +func TestRemoveOrdererCapabilityFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + capability string + ordererGroup func(og *cb.ConfigGroup) + expectedErr string + }{ + { + testName: "when capability does not exist", + capability: "V3_0", + ordererGroup: func(og *cb.ConfigGroup) { + }, + expectedErr: "capability not set", + }, + { + testName: "when retrieving existing capabilities", + capability: "V3_0", + ordererGroup: func(og *cb.ConfigGroup) { + og.Values = map[string]*cb.ConfigValue{ + CapabilitiesKey: { + Value: []byte("foobar"), + }, + } + }, + expectedErr: "retrieving orderer capabilities: unmarshaling capabilities: proto", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + tt.ordererGroup(ordererGroup) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + err = c.Orderer().RemoveCapability(tt.capability) + gt.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) + }) + } +} + +func TestOrdererOrg(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel, _, _ := baseSystemChannelProfile(t) + channelGroup, err := newSystemChannelGroup(channel) + if channel.Orderer.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + expectedOrg := channel.Orderer.Organizations[0] + + tests := []struct { + name string + orgName string + expectedErr string + }{ + { + name: "success", + orgName: "OrdererOrg", + expectedErr: "", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + org, err := c.Orderer().Organization(tc.orgName).Configuration() + if tc.expectedErr != "" { + gt.Expect(err).To(MatchError(tc.expectedErr)) + gt.Expect(Organization{}).To(Equal(org)) + } else { + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(expectedOrg).To(Equal(org)) + } + }) + } +} + +func TestRemoveOrdererOrg(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + channel, _, _ := baseSystemChannelProfile(t) + channelGroup, err := newSystemChannelGroup(channel) + if channel.Orderer.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + c.Orderer().RemoveOrganization("OrdererOrg") + gt.Expect(c.Orderer().Organization("OrdererOrg")).To(BeNil()) +} + +func TestSetOrdererModPolicy(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + err = c.Orderer().SetModPolicy("TestModPolicy") + gt.Expect(err).NotTo(HaveOccurred()) + + updatedModPolicy := c.Orderer().ordererGroup.GetModPolicy() + gt.Expect(updatedModPolicy).To(Equal("TestModPolicy")) +} + +func TestSetOrdererModPolicyFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + err = c.Orderer().SetModPolicy("") + gt.Expect(err).To(MatchError("non empty mod policy is required")) +} + +func TestSetOrdererPolicy(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + expectedPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + BlockValidationPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy": { + Type: ImplicitMetaPolicyType, + Rule: "ANY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + + err = c.Orderer().SetPolicy("TestPolicy", Policy{Type: ImplicitMetaPolicyType, Rule: "ANY Endorsement"}) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := c.Orderer().Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(expectedPolicies)) +} + +func TestSetOrdererPolicyFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + err = c.Orderer().SetPolicy("TestPolicy", Policy{}) + gt.Expect(err).To(MatchError("failed to set policy 'TestPolicy': unknown policy type: ")) +} + +func TestSetOrdererPolicies(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + baseOrdererConf.Policies["TestPolicy_Remove"] = baseOrdererConf.Policies[ReadersPolicyKey] + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + newPolices := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + BlockValidationPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add1": { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add2": { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + + err = c.Orderer().SetPolicies(newPolices) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := c.Orderer().Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(newPolices)) + + originalPolicies := c.original.ChannelGroup.Groups[OrdererGroupKey].Policies + gt.Expect(originalPolicies).To(Equal(ordererGroup.Policies)) +} + +func TestSetOrdererPoliciesFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + newPolices := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + BlockValidationPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + "TestPolicy": {}, + } + + err = c.Orderer().SetPolicies(newPolices) + gt.Expect(err).To(MatchError("failed to set policies: unknown policy type: ")) +} + +func TestSetOrdererPoliciesWithoutBlockValidationPolicyFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + newPolices := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + } + + err = c.Orderer().SetPolicies(newPolices) + gt.Expect(err).To(MatchError("BlockValidation policy must be defined")) +} + +func TestRemoveOrdererPolicy(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + baseOrdererConf.Policies["TestPolicy"] = baseOrdererConf.Policies[AdminsPolicyKey] + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + expectedPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + BlockValidationPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + } + + err = c.Orderer().RemovePolicy("TestPolicy") + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := c.Orderer().Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(expectedPolicies)) +} + +func TestRemoveOrdererPolicyFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + baseOrdererConf.Policies["TestPolicy"] = baseOrdererConf.Policies[AdminsPolicyKey] + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + tests := []struct { + testName string + ordererGrpMod func(*cb.ConfigGroup) *cb.ConfigGroup + policyName string + expectedErr string + }{ + { + testName: "when removing blockvalidation policy", + ordererGrpMod: func(og *cb.ConfigGroup) *cb.ConfigGroup { + return proto.Clone(og).(*cb.ConfigGroup) + }, + policyName: BlockValidationPolicyKey, + expectedErr: "BlockValidation policy must be defined", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + gt := NewGomegaWithT(t) + + ordererGroup := tt.ordererGrpMod(ordererGroup) + if ordererGroup == nil { + delete(config.ChannelGroup.Groups, OrdererGroupKey) + } else { + config.ChannelGroup.Groups[OrdererGroupKey] = ordererGroup + } + + err = c.Orderer().RemovePolicy(tt.policyName) + gt.Expect(err).To(MatchError(tt.expectedErr)) + }) + } +} + +func TestSetOrdererOrgModPolicy(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + ordererOrg := c.Orderer().Organization("OrdererOrg") + err = ordererOrg.SetModPolicy("TestModPolicy") + gt.Expect(err).NotTo(HaveOccurred()) + + updatedModPolicy := ordererOrg.orgGroup.GetModPolicy() + gt.Expect(updatedModPolicy).To(Equal("TestModPolicy")) +} + +func TestSetOrdererOrgModPolicyFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + err = c.Orderer().Organization("OrdererOrg").SetModPolicy("") + gt.Expect(err).To(MatchError("non empty mod policy is required")) +} + +func TestSetOrdererOrgPolicy(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + expectedPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + EndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy": { + Type: ImplicitMetaPolicyType, + Rule: "ANY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + + ordererOrg := c.Orderer().Organization("OrdererOrg") + err = ordererOrg.SetPolicy("TestPolicy", Policy{Type: ImplicitMetaPolicyType, Rule: "ANY Endorsement"}) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := ordererOrg.Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(expectedPolicies)) +} + +func TestSetOrdererOrgPolicyFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + err = c.Orderer().Organization("OrdererOrg").SetPolicy("TestPolicy", Policy{}) + gt.Expect(err).To(MatchError("unknown policy type: ")) +} + +func TestSetOrdererOrgPolicies(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + baseOrdererConf.Organizations[0].Policies["TestPolicy_Remove"] = baseOrdererConf.Organizations[0].Policies[ReadersPolicyKey] + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + newPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + EndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add1": { + Type: ImplicitMetaPolicyType, + Rule: "ANY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + "TestPolicy_Add2": { + Type: ImplicitMetaPolicyType, + Rule: "ANY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + + ordererOrg := c.Orderer().Organization("OrdererOrg") + err = ordererOrg.SetPolicies(newPolicies) + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := ordererOrg.Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(newPolicies)) + + originalPolicies := c.original.ChannelGroup.Groups[OrdererGroupKey].Groups["OrdererOrg"].Policies + gt.Expect(originalPolicies).To(Equal(ordererGroup.Groups["OrdererOrg"].Policies)) +} + +func TestSetOrdererOrgPoliciesFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + newPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + }, + EndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + }, + "TestPolicy": {}, + } + + err = c.Orderer().Organization("OrdererOrg").SetPolicies(newPolicies) + gt.Expect(err).To(MatchError("unknown policy type: ")) +} + +func TestRemoveOrdererOrgPolicy(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + baseOrdererConf.Organizations[0].Policies["TestPolicy"] = baseOrdererConf.Organizations[0].Policies[AdminsPolicyKey] + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Orderer": ordererGroup, + }, + }, + } + + c := New(config) + + expectedPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Readers", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Writers", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Admins", + ModPolicy: AdminsPolicyKey, + }, + EndorsementPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Endorsement", + ModPolicy: AdminsPolicyKey, + }, + } + + err = c.Orderer().Organization("OrdererOrg").RemovePolicy("TestPolicy") + gt.Expect(err).NotTo(HaveOccurred()) + + updatedPolicies, err := c.Orderer().Organization("OrdererOrg").Policies() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(updatedPolicies).To(Equal(expectedPolicies)) +} + +func TestOrdererMSP(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + soloOrderer, _ := baseSoloOrderer(t) + expectedMSP := soloOrderer.Organizations[0].MSP + + ordererGroup, err := newOrdererGroup(soloOrderer) + if soloOrderer.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + msp, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msp).To(Equal(expectedMSP)) +} + +func TestUpdateOrdererMSP(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, privKeys, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + + ordererCertBase64, ordererCRLBase64 := certCRLBase64(t, ordererMSP) + + newRootCert, newRootPrivKey := generateCACertAndPrivateKey(t, "anotherca-org1.example.com") + newRootCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(newRootCert)) + ordererMSP.RootCerts = append(ordererMSP.RootCerts, newRootCert) + + newIntermediateCert, _ := generateIntermediateCACertAndPrivateKey(t, "anotherca-org1.example.com", newRootCert, newRootPrivKey) + newIntermediateCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(newIntermediateCert)) + ordererMSP.IntermediateCerts = append(ordererMSP.IntermediateCerts, newIntermediateCert) + + cert := ordererMSP.RootCerts[0] + certToRevoke, _ := generateCertAndPrivateKeyFromCACert(t, "org1.example.com", cert, privKeys[0]) + signingIdentity := &SigningIdentity{ + Certificate: cert, + PrivateKey: privKeys[0], + MSPID: "MSPID", + } + newCRL, err := ordererMSP.CreateMSPCRL(signingIdentity, certToRevoke) + gt.Expect(err).NotTo(HaveOccurred()) + pemNewCRL, err := pemEncodeCRL(newCRL) + gt.Expect(err).NotTo(HaveOccurred()) + newCRLBase64 := base64.StdEncoding.EncodeToString(pemNewCRL) + ordererMSP.RevocationList = append(ordererMSP.RevocationList, newCRL) + + err = c.Orderer().Organization("OrdererOrg").SetMSP(ordererMSP) + gt.Expect(err).NotTo(HaveOccurred()) + + expectedConfigJSON := fmt.Sprintf(` +{ + "channel_group": { + "groups": { + "Orderer": { + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s", + "%[2]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[3]s", + "%[4]s" + ], + "root_certs": [ + "%[1]s", + "%[5]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": null, + "state": "STATE_NORMAL", + "type": "solo" + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "", + "policies": {}, + "values": {}, + "version": "0" + }, + "sequence": "0" +}`, ordererCertBase64, newIntermediateCertBase64, ordererCRLBase64, newCRLBase64, newRootCertBase64) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, c.updated) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(MatchJSON(expectedConfigJSON)) +} + +func TestUpdateOrdererMSPFailure(t *testing.T) { + t.Parallel() + + tests := []struct { + spec string + mspMod func(MSP) MSP + orgName string + expectedErr string + }{ + { + spec: "updating msp name", + mspMod: func(msp MSP) MSP { + msp.Name = "thiscantbegood" + return msp + }, + orgName: "OrdererOrg", + expectedErr: "MSP name cannot be changed", + }, + { + spec: "invalid root ca cert keyusage", + mspMod: func(msp MSP) MSP { + msp.RootCerts = []*x509.Certificate{ + { + SerialNumber: big.NewInt(7), + KeyUsage: x509.KeyUsageKeyAgreement, + }, + } + return msp + }, + orgName: "OrdererOrg", + expectedErr: "invalid root cert: KeyUsage must be x509.KeyUsageCertSign. serial number: 7", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.spec, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + ordererType := orderer.ConsensusTypeSolo + channelGroup, _, err := baseOrdererChannelGroup(t, ordererType) + if ordererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + c := New(config) + + ordererMSP, err := c.Orderer().Organization("OrdererOrg").MSP().Configuration() + gt.Expect(err).NotTo(HaveOccurred()) + + ordererMSP = tc.mspMod(ordererMSP) + err = c.Orderer().Organization(tc.orgName).SetMSP(ordererMSP) + gt.Expect(err).To(MatchError(tc.expectedErr)) + }) + } +} + +func TestRemoveLegacyKafkaBrokers(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + // creates a channel config group that only contains an Orderer group. + channelGroup := newConfigGroup() + ordererConf, _ := baseOrdererOfType(t, orderer.ConsensusTypeKafka) + ordererGroup, err := newOrdererGroupWithOrdererConsensusTypeKafka(ordererConf) + gt.Expect(err).NotTo(HaveOccurred()) + channelGroup.Groups[OrdererGroupKey] = ordererGroup + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: channelGroup, + } + + c := New(config) + + c.Orderer().RemoveLegacyKafkaBrokers() + + expectedConfigValue := map[string]*cb.ConfigValue{ + orderer.ConsensusTypeKey: { + ModPolicy: AdminsPolicyKey, + Value: marshalOrPanic(&ob.ConsensusType{ + Type: orderer.ConsensusTypeKafka, + }), + }, + orderer.ChannelRestrictionsKey: { + ModPolicy: AdminsPolicyKey, + }, + CapabilitiesKey: { + ModPolicy: AdminsPolicyKey, + Value: marshalOrPanic(&cb.Capabilities{ + Capabilities: map[string]*cb.Capability{ + "V1_3": {}, + }, + }), + }, + orderer.BatchTimeoutKey: { + ModPolicy: AdminsPolicyKey, + Value: marshalOrPanic(&ob.BatchTimeout{ + Timeout: "0s", + }), + }, + orderer.BatchSizeKey: { + ModPolicy: AdminsPolicyKey, + Value: marshalOrPanic(&ob.BatchSize{ + MaxMessageCount: 100, + AbsoluteMaxBytes: 100, + PreferredMaxBytes: 100, + }), + }, + } + + gt.Expect(c.Orderer().ordererGroup.Values).To(Equal(expectedConfigValue)) +} + +func TestSetBatchSizeValues(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseEtcdRaftOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + ordererOrgMSP := baseOrdererConf.Organizations[0].MSP + orgCertBase64, orgCRLBase64 := certCRLBase64(t, ordererOrgMSP) + etcdRaftCert := baseOrdererConf.EtcdRaft.Consenters[0].ClientTLSCert + + etcdRaftCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(etcdRaftCert)) + expectedConfigGroupJSON := fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 300, + "max_message_count": 200, + "preferred_max_bytes": 500 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": { + "consenters": [ + { + "client_tls_cert": "%[3]s", + "host": "node-1.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-2.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-3.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + } + ], + "options": { + "election_tick": 0, + "heartbeat_tick": 0, + "max_inflight_blocks": 0, + "snapshot_interval_size": 0, + "tick_interval": "" + } + }, + "state": "STATE_NORMAL", + "type": "etcdraft" + }, + "version": "0" + } + }, + "version": "0" +} +`, orgCertBase64, orgCRLBase64, etcdRaftCertBase64) + + err = c.Orderer().BatchSize().SetMaxMessageCount(200) + gt.Expect(err).NotTo(HaveOccurred()) + + err = c.Orderer().BatchSize().SetAbsoluteMaxBytes(300) + gt.Expect(err).NotTo(HaveOccurred()) + + err = c.Orderer().BatchSize().SetPreferredMaxBytes(500) + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererGroup{ConfigGroup: c.Orderer().ordererGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) +} + +func TestSetMaxMessageCountFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + ordererGroup.Values[orderer.BatchSizeKey] = &cb.ConfigValue{Value: []byte("{")} + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + err = c.Orderer().BatchSize().SetMaxMessageCount(5) + gt.Expect(err.Error()).To(ContainSubstring("cannot parse invalid wire-format data")) +} + +func TestSetAbsoluteMaxBytesFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + ordererGroup.Values[orderer.BatchSizeKey] = &cb.ConfigValue{Value: []byte("{")} + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + err = c.Orderer().BatchSize().SetAbsoluteMaxBytes(5) + gt.Expect(err.Error()).To(ContainSubstring("cannot parse invalid wire-format data")) +} + +func TestSetPreferredMaxBytesFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + ordererGroup.Values[orderer.BatchSizeKey] = &cb.ConfigValue{Value: []byte("{")} + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + err = c.Orderer().BatchSize().SetPreferredMaxBytes(5) + gt.Expect(err.Error()).To(ContainSubstring("cannot parse invalid wire-format data")) +} + +func TestSetBatchTimeout(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseEtcdRaftOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + ordererOrgMSP := baseOrdererConf.Organizations[0].MSP + orgCertBase64, orgCRLBase64 := certCRLBase64(t, ordererOrgMSP) + etcdRaftCert := baseOrdererConf.EtcdRaft.Consenters[0].ClientTLSCert + + etcdRaftCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(etcdRaftCert)) + expectedConfigGroupJSON := fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "20s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": { + "consenters": [ + { + "client_tls_cert": "%[3]s", + "host": "node-1.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-2.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-3.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + } + ], + "options": { + "election_tick": 0, + "heartbeat_tick": 0, + "max_inflight_blocks": 0, + "snapshot_interval_size": 0, + "tick_interval": "" + } + }, + "state": "STATE_NORMAL", + "type": "etcdraft" + }, + "version": "0" + } + }, + "version": "0" +} +`, orgCertBase64, orgCRLBase64, etcdRaftCertBase64) + + err = c.Orderer().SetBatchTimeout(time.Second * 20) + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererGroup{ConfigGroup: c.Orderer().ordererGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) +} + +func TestSetMaxChannels(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseEtcdRaftOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + ordererOrgMSP := baseOrdererConf.Organizations[0].MSP + orgCertBase64, orgCRLBase64 := certCRLBase64(t, ordererOrgMSP) + etcdRaftCert := baseOrdererConf.EtcdRaft.Consenters[0].ClientTLSCert + + etcdRaftCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(etcdRaftCert)) + expectedConfigGroupJSON := fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": { + "max_count": "100" + }, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": { + "consenters": [ + { + "client_tls_cert": "%[3]s", + "host": "node-1.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-2.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-3.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + } + ], + "options": { + "election_tick": 0, + "heartbeat_tick": 0, + "max_inflight_blocks": 0, + "snapshot_interval_size": 0, + "tick_interval": "" + } + }, + "state": "STATE_NORMAL", + "type": "etcdraft" + }, + "version": "0" + } + }, + "version": "0" +} +`, orgCertBase64, orgCRLBase64, etcdRaftCertBase64) + + err = c.Orderer().SetMaxChannels(100) + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererGroup{ConfigGroup: c.Orderer().ordererGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) +} + +func TestSetConsensusType(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + ordererType string + expectedErr string + }{ + {testName: "when current consensus type is etcdraft", ordererType: orderer.ConsensusTypeEtcdRaft, expectedErr: "config does not contain value for ChannelRestrictions"}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + gt := NewGomegaWithT(t) + + var etcdRaftCertBase64 string + baseOrdererConf, _ := baseSoloOrderer(t) + if tt.ordererType == orderer.ConsensusTypeEtcdRaft { + baseOrdererConf, _ = baseEtcdRaftOrderer(t) + etcdRaftCert := baseOrdererConf.EtcdRaft.Consenters[0].ClientTLSCert + etcdRaftCertBase64 = base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(etcdRaftCert)) + + } else if tt.ordererType == orderer.ConsensusTypeKafka { + baseOrdererConf, _ = baseKafkaOrderer(t) + } + + ordererGroup, err := newOrdererGroup(baseOrdererConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + ordererOrgMSP := baseOrdererConf.Organizations[0].MSP + orgCertBase64, orgCRLBase64 := certCRLBase64(t, ordererOrgMSP) + expectedConfigGroupJSON := fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": { + "consenters": [ + { + "client_tls_cert": "%[3]s", + "host": "node-1.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-2.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-3.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + } + ], + "options": { + "election_tick": 0, + "heartbeat_tick": 0, + "max_inflight_blocks": 0, + "snapshot_interval_size": 0, + "tick_interval": "" + } + }, + "state": "STATE_NORMAL", + "type": "etcdraft" + }, + "version": "0" + } + }, + "version": "0" +} +`, orgCertBase64, orgCRLBase64, etcdRaftCertBase64) + + consensusMetadata := orderer.EtcdRaft{ + Consenters: baseOrdererConf.EtcdRaft.Consenters, + } + err = c.Orderer().SetEtcdRaftConsensusType(consensusMetadata, orderer.ConsensusTypeSolo) + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererGroup{ConfigGroup: c.Orderer().ordererGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(MatchJSON(expectedConfigGroupJSON)) + }) + } +} + +func TestSetConsensusTypeFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + ordererType string + expectedErr string + }{ + {testName: "when consensus type is empty", ordererType: "solo", expectedErr: "marshaling etcdraft metadata: consenters are required"}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + delete(ordererGroup.Values, orderer.ConsensusTypeKey) + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + err = c.Orderer().SetEtcdRaftConsensusType(orderer.EtcdRaft{}, "") + gt.Expect(err).To(MatchError(tt.expectedErr)) + }) + } +} + +func TestSetConsensusState(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseEtcdRaftOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + ordererOrgMSP := baseOrdererConf.Organizations[0].MSP + orgCertBase64, orgCRLBase64 := certCRLBase64(t, ordererOrgMSP) + etcdRaftCert := baseOrdererConf.EtcdRaft.Consenters[0].ClientTLSCert + + etcdRaftCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(etcdRaftCert)) + expectedConfigGroupJSON := fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": { + "consenters": [ + { + "client_tls_cert": "%[3]s", + "host": "node-1.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-2.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-3.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + } + ], + "options": { + "election_tick": 0, + "heartbeat_tick": 0, + "max_inflight_blocks": 0, + "snapshot_interval_size": 0, + "tick_interval": "" + } + }, + "state": "STATE_MAINTENANCE", + "type": "etcdraft" + }, + "version": "0" + } + }, + "version": "0" +} +`, orgCertBase64, orgCRLBase64, etcdRaftCertBase64) + + err = c.Orderer().SetConsensusState(orderer.ConsensusStateMaintenance) + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererGroup{ConfigGroup: c.Orderer().ordererGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) +} + +func TestSetConsensusStateFailures(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + expectedErr string + }{ + {testName: "when retrieving orderer config fails", expectedErr: "config does not contain value for ConsensusType"}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseSoloOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + if baseOrdererConf.OrdererType != orderer.ConsensusTypeSolo { + gt.Expect(err).NotTo(HaveOccurred()) + } else { + gt.Expect(err.Error()).To(ContainSubstring("the solo consensus type is no longer supported")) + return + } + + delete(ordererGroup.Values, orderer.ConsensusTypeKey) + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + err = c.Orderer().SetConsensusState("") + gt.Expect(err).To(MatchError(tt.expectedErr)) + }) + } +} + +func TestSetEtcdRaftOptions(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseOrdererConf, _ := baseEtcdRaftOrderer(t) + ordererGroup, err := newOrdererGroup(baseOrdererConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + OrdererGroupKey: ordererGroup, + }, + }, + } + + c := New(config) + + ordererOrgMSP := baseOrdererConf.Organizations[0].MSP + orgCertBase64, orgCRLBase64 := certCRLBase64(t, ordererOrgMSP) + etcdRaftCert := baseOrdererConf.EtcdRaft.Consenters[0].ClientTLSCert + + etcdRaftCertBase64 := base64.StdEncoding.EncodeToString(pemEncodeX509Certificate(etcdRaftCert)) + expectedConfigGroupJSON := fmt.Sprintf(`{ + "groups": { + "OrdererOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 100, + "max_message_count": 100, + "preferred_max_bytes": 100 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "0s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_3": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": { + "consenters": [ + { + "client_tls_cert": "%[3]s", + "host": "node-1.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-2.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + }, + { + "client_tls_cert": "%[3]s", + "host": "node-3.example.com", + "port": 7050, + "server_tls_cert": "%[3]s" + } + ], + "options": { + "election_tick": 10, + "heartbeat_tick": 20, + "max_inflight_blocks": 5, + "snapshot_interval_size": 25, + "tick_interval": "200" + } + }, + "state": "STATE_NORMAL", + "type": "etcdraft" + }, + "version": "0" + } + }, + "version": "0" +} +`, orgCertBase64, orgCRLBase64, etcdRaftCertBase64) + + err = c.Orderer().EtcdRaftOptions().SetTickInterval("200") + gt.Expect(err).NotTo(HaveOccurred()) + + err = c.Orderer().EtcdRaftOptions().SetElectionInterval(10) + gt.Expect(err).NotTo(HaveOccurred()) + + err = c.Orderer().EtcdRaftOptions().SetHeartbeatTick(20) + gt.Expect(err).NotTo(HaveOccurred()) + + err = c.Orderer().EtcdRaftOptions().SetMaxInflightBlocks(5) + gt.Expect(err).NotTo(HaveOccurred()) + + err = c.Orderer().EtcdRaftOptions().SetSnapshotIntervalSize(25) + gt.Expect(err).NotTo(HaveOccurred()) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererGroup{ConfigGroup: c.Orderer().ordererGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(Equal(expectedConfigGroupJSON)) +} + +func baseOrdererOfType(t *testing.T, ordererType string) (Orderer, []*ecdsa.PrivateKey) { + switch ordererType { + case orderer.ConsensusTypeKafka: + return baseKafkaOrderer(t) + case orderer.ConsensusTypeEtcdRaft: + return baseEtcdRaftOrderer(t) + default: + return baseSoloOrderer(t) + } +} + +// Deprecated: the solo consensus type is no longer supported +func baseSoloOrderer(t *testing.T) (Orderer, []*ecdsa.PrivateKey) { + baseMSP, privKey := baseMSP(t) + return Orderer{ + Policies: ordererStandardPolicies(), + OrdererType: orderer.ConsensusTypeSolo, + Organizations: []Organization{ + { + Name: "OrdererOrg", + Policies: orgStandardPolicies(), + OrdererEndpoints: []string{ + "localhost:123", + }, + MSP: baseMSP, + }, + }, + Capabilities: []string{"V1_3"}, + BatchSize: orderer.BatchSize{ + MaxMessageCount: 100, + AbsoluteMaxBytes: 100, + PreferredMaxBytes: 100, + }, + State: orderer.ConsensusStateNormal, + ModPolicy: AdminsPolicyKey, + }, []*ecdsa.PrivateKey{privKey} +} + +// Deprecated: the kafka consensus type is no longer supported +func baseKafkaOrderer(t *testing.T) (Orderer, []*ecdsa.PrivateKey) { + soloOrderer, privKeys := baseSoloOrderer(t) + soloOrderer.OrdererType = orderer.ConsensusTypeKafka + soloOrderer.Kafka = orderer.Kafka{ + Brokers: []string{"broker1", "broker2"}, + } + + return soloOrderer, privKeys +} + +func baseEtcdRaftOrderer(t *testing.T) (Orderer, []*ecdsa.PrivateKey) { + caCert, caPrivKey := generateCACertAndPrivateKey(t, "orderer-org") + cert, _ := generateCertAndPrivateKeyFromCACert(t, "orderer-org", caCert, caPrivKey) + + soloOrderer, privKeys := baseSoloOrderer(t) + soloOrderer.OrdererType = orderer.ConsensusTypeEtcdRaft + soloOrderer.EtcdRaft = orderer.EtcdRaft{ + Consenters: []orderer.Consenter{ + { + Address: orderer.EtcdAddress{ + Host: "node-1.example.com", + Port: 7050, + }, + ClientTLSCert: cert, + ServerTLSCert: cert, + }, + { + Address: orderer.EtcdAddress{ + Host: "node-2.example.com", + Port: 7050, + }, + ClientTLSCert: cert, + ServerTLSCert: cert, + }, + { + Address: orderer.EtcdAddress{ + Host: "node-3.example.com", + Port: 7050, + }, + ClientTLSCert: cert, + ServerTLSCert: cert, + }, + }, + Options: orderer.EtcdRaftOptions{}, + } + + return soloOrderer, privKeys +} + +// baseOrdererChannelGroup creates a channel config group +// that only contains an Orderer group. +func baseOrdererChannelGroup(t *testing.T, ordererType string) (*cb.ConfigGroup, []*ecdsa.PrivateKey, error) { + channelGroup := newConfigGroup() + + ordererConf, privKeys := baseOrdererOfType(t, ordererType) + ordererGroup, err := newOrdererGroup(ordererConf) + if err != nil { + return nil, nil, err + } + channelGroup.Groups[OrdererGroupKey] = ordererGroup + + return channelGroup, privKeys, nil +} + +// marshalOrPanic is a helper for proto marshal. +func marshalOrPanic(pb proto.Message) []byte { + data, err := proto.Marshal(pb) + if err != nil { + panic(err) + } + + return data +} + +func newOrdererGroupWithOrdererConsensusTypeKafka(orderer Orderer) (*cb.ConfigGroup, error) { + ordererGroup := newConfigGroup() + ordererGroup.ModPolicy = AdminsPolicyKey + + if orderer.ModPolicy != "" { + ordererGroup.ModPolicy = orderer.ModPolicy + } + + if err := setOrdererPolicies(ordererGroup, orderer.Policies, AdminsPolicyKey); err != nil { + return nil, err + } + + // add orderer values + err := addOrdererValuesWithOrdererConsensusTypeKafka(ordererGroup, orderer) + if err != nil { + return nil, err + } + + // add orderer groups + for _, org := range orderer.Organizations { + // As of fabric v1.4 we expect new system channels to contain orderer endpoints at the org level + if len(org.OrdererEndpoints) == 0 { + return nil, fmt.Errorf("orderer endpoints are not defined for org %s", org.Name) + } + + ordererGroup.Groups[org.Name], err = newOrdererOrgConfigGroup(org) + if err != nil { + return nil, fmt.Errorf("org group '%s': %v", org.Name, err) + } + } + + return ordererGroup, nil +} + +// addOrdererValues adds configuration specified in Orderer to an orderer +// *cb.ConfigGroup's Values map. +func addOrdererValuesWithOrdererConsensusTypeKafka(ordererGroup *cb.ConfigGroup, o Orderer) error { + err := setValue(ordererGroup, batchSizeValue( + o.BatchSize.MaxMessageCount, + o.BatchSize.AbsoluteMaxBytes, + o.BatchSize.PreferredMaxBytes, + ), AdminsPolicyKey) + if err != nil { + return err + } + + err = setValue(ordererGroup, batchTimeoutValue(o.BatchTimeout.String()), AdminsPolicyKey) + if err != nil { + return err + } + + err = setValue(ordererGroup, channelRestrictionsValue(o.MaxChannels), AdminsPolicyKey) + if err != nil { + return err + } + + if len(o.Capabilities) > 0 { + err = setValue(ordererGroup, capabilitiesValue(o.Capabilities), AdminsPolicyKey) + if err != nil { + return err + } + } + + var consensusMetadata []byte + + err = setValue(ordererGroup, kafkaBrokersValue(o.Kafka.Brokers), AdminsPolicyKey) + if err != nil { + return err + } + + consensusState, ok := ob.ConsensusType_State_value[string(o.State)] + if !ok { + return fmt.Errorf("unknown consensus state '%s'", o.State) + } + + err = setValue(ordererGroup, consensusTypeValue(o.OrdererType, consensusMetadata, consensusState), AdminsPolicyKey) + if err != nil { + return err + } + + return nil +} diff --git a/v2/configtx/organization.go b/v2/configtx/organization.go new file mode 100644 index 0000000..ea5eeb4 --- /dev/null +++ b/v2/configtx/organization.go @@ -0,0 +1,136 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "fmt" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + pb "github.com/hyperledger/fabric-protos-go-apiv2/peer" + "google.golang.org/protobuf/proto" +) + +// newOrgConfigGroup returns an config group for an organization. +// It defines the crypto material for the organization (its MSP). +// It sets the mod_policy of all elements to "Admins". +func newOrgConfigGroup(org Organization) (*cb.ConfigGroup, error) { + orgGroup := newConfigGroup() + orgGroup.ModPolicy = AdminsPolicyKey + + if org.ModPolicy != "" { + orgGroup.ModPolicy = org.ModPolicy + } + + if err := setPolicies(orgGroup, org.Policies); err != nil { + return nil, err + } + + fabricMSPConfig, err := org.MSP.toProto() + if err != nil { + return nil, fmt.Errorf("converting fabric msp config to proto: %v", err) + } + + conf, err := proto.Marshal(fabricMSPConfig) + if err != nil { + return nil, fmt.Errorf("marshaling msp config: %v", err) + } + + // mspConfig defaults type to FABRIC which implements an X.509 based provider + mspConfig := &mb.MSPConfig{ + Config: conf, + } + + err = setValue(orgGroup, mspValue(mspConfig), AdminsPolicyKey) + if err != nil { + return nil, err + } + + return orgGroup, nil +} + +func newOrdererOrgConfigGroup(org Organization) (*cb.ConfigGroup, error) { + orgGroup, err := newOrgConfigGroup(org) + if err != nil { + return nil, err + } + + // OrdererEndpoints are orderer org specific and are only added when specified for orderer orgs + if len(org.OrdererEndpoints) > 0 { + err := setValue(orgGroup, endpointsValue(org.OrdererEndpoints), AdminsPolicyKey) + if err != nil { + return nil, err + } + } + + return orgGroup, nil +} + +func newApplicationOrgConfigGroup(org Organization) (*cb.ConfigGroup, error) { + orgGroup, err := newOrgConfigGroup(org) + if err != nil { + return nil, err + } + + // AnchorPeers are application org specific and are only added when specified for application orgs + anchorProtos := make([]*pb.AnchorPeer, len(org.AnchorPeers)) + for i, anchorPeer := range org.AnchorPeers { + anchorProtos[i] = &pb.AnchorPeer{ + Host: anchorPeer.Host, + Port: int32(anchorPeer.Port), + } + } + + // Avoid adding an unnecessary anchor peers element when one is not required + // This helps prevent a delta from the orderer system channel when computing + // more complex channel creation transactions + if len(anchorProtos) > 0 { + err := setValue(orgGroup, anchorPeersValue(anchorProtos), AdminsPolicyKey) + if err != nil { + return nil, fmt.Errorf("failed to add anchor peers value: %v", err) + } + } + + return orgGroup, nil +} + +// getOrganization returns a basic Organization struct from org config group. +func getOrganization(orgGroup *cb.ConfigGroup, orgName string) (Organization, error) { + policies, err := getPolicies(orgGroup.Policies) + if err != nil { + return Organization{}, err + } + + msp, err := getMSPConfig(orgGroup) + if err != nil { + return Organization{}, err + } + + var anchorPeers []Address + _, ok := orgGroup.Values[AnchorPeersKey] + if ok { + anchorProtos := &pb.AnchorPeers{} + err = unmarshalConfigValueAtKey(orgGroup, AnchorPeersKey, anchorProtos) + if err != nil { + return Organization{}, err + } + + for _, anchorProto := range anchorProtos.AnchorPeers { + anchorPeers = append(anchorPeers, Address{ + Host: anchorProto.Host, + Port: int(anchorProto.Port), + }) + } + } + + return Organization{ + Name: orgName, + Policies: policies, + MSP: msp, + AnchorPeers: anchorPeers, + }, nil +} diff --git a/v2/configtx/organization_test.go b/v2/configtx/organization_test.go new file mode 100644 index 0000000..63d45d4 --- /dev/null +++ b/v2/configtx/organization_test.go @@ -0,0 +1,204 @@ +/* +Copyright IBM Corp All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "bytes" + "fmt" + "testing" + + "github.com/hyperledger/fabric-config/v2/protolator" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/ordererext" + . "github.com/onsi/gomega" +) + +func TestOrganization(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + expectedOrg := baseApplicationOrg(t) + expectedOrg.AnchorPeers = nil + orgGroup, err := newOrgConfigGroup(expectedOrg) + gt.Expect(err).NotTo(HaveOccurred()) + + org, err := getOrganization(orgGroup, "Org1") + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(expectedOrg).To(Equal(org)) +} + +func TestNewOrgConfigGroup(t *testing.T) { + t.Parallel() + + t.Run("success", func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + baseSystemChannelProfile, _, _ := baseSystemChannelProfile(t) + org := baseSystemChannelProfile.Orderer.Organizations[0] + configGroup, err := newOrdererOrgConfigGroup(org) + gt.Expect(err).NotTo(HaveOccurred()) + + certBase64, crlBase64 := certCRLBase64(t, org.MSP) + + // The organization is from network.BasicSolo Profile + // configtxgen -printOrg Org1 + expectedPrintOrg := fmt.Sprintf(` +{ + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Endorsement" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "localhost:123" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "%[1]s" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA3" + }, + "fabric_node_ous": { + "admin_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "client_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "enable": false, + "orderer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + }, + "peer_ou_identifier": { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + }, + "intermediate_certs": [ + "%[1]s" + ], + "name": "MSPID", + "organizational_unit_identifiers": [ + { + "certificate": "%[1]s", + "organizational_unit_identifier": "OUID" + } + ], + "revocation_list": [ + "%[2]s" + ], + "root_certs": [ + "%[1]s" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "%[1]s" + ], + "tls_root_certs": [ + "%[1]s" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" +} +`, certBase64, crlBase64) + + buf := bytes.Buffer{} + err = protolator.DeepMarshalJSON(&buf, &ordererext.DynamicOrdererOrgGroup{ConfigGroup: configGroup}) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(buf.String()).To(MatchJSON(expectedPrintOrg)) + }) +} + +func TestNewOrgConfigGroupFailure(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + baseSystemChannelProfile, _, _ := baseSystemChannelProfile(t) + baseOrg := baseSystemChannelProfile.Orderer.Organizations[0] + baseOrg.Policies = nil + + configGroup, err := newOrgConfigGroup(baseOrg) + gt.Expect(configGroup).To(BeNil()) + gt.Expect(err).To(MatchError("no policies defined")) +} + +func baseApplicationOrg(t *testing.T) Organization { + msp, _ := baseMSP(t) + return Organization{ + Name: "Org1", + Policies: standardPolicies(), + MSP: msp, + AnchorPeers: []Address{ + {Host: "host3", Port: 123}, + }, + } +} diff --git a/v2/configtx/policies.go b/v2/configtx/policies.go new file mode 100644 index 0000000..4dbc61c --- /dev/null +++ b/v2/configtx/policies.go @@ -0,0 +1,277 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/hyperledger/fabric-config/v2/configtx/internal/policydsl" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "google.golang.org/protobuf/proto" +) + +// getPolicies returns a map of Policy from given map of ConfigPolicy in organization config group. +func getPolicies(policies map[string]*cb.ConfigPolicy) (map[string]Policy, error) { + p := map[string]Policy{} + + for name, policy := range policies { + switch cb.Policy_PolicyType(policy.Policy.Type) { + case cb.Policy_IMPLICIT_META: + imp := &cb.ImplicitMetaPolicy{} + err := proto.Unmarshal(policy.Policy.Value, imp) + if err != nil { + return nil, err + } + + rule, err := implicitMetaToString(imp) + if err != nil { + return nil, err + } + + p[name] = Policy{ + Type: ImplicitMetaPolicyType, + Rule: rule, + ModPolicy: policy.GetModPolicy(), + } + case cb.Policy_SIGNATURE: + sp := &cb.SignaturePolicyEnvelope{} + err := proto.Unmarshal(policy.Policy.Value, sp) + if err != nil { + return nil, err + } + + rule, err := signatureMetaToString(sp) + if err != nil { + return nil, err + } + + p[name] = Policy{ + Type: SignaturePolicyType, + Rule: rule, + ModPolicy: policy.GetModPolicy(), + } + default: + return nil, fmt.Errorf("unknown policy type: %v", policy.Policy.Type) + } + } + + return p, nil +} + +// implicitMetaToString converts a *cb.ImplicitMetaPolicy to a string representation. +func implicitMetaToString(imp *cb.ImplicitMetaPolicy) (string, error) { + var args string + + switch imp.Rule { + case cb.ImplicitMetaPolicy_ANY: + args += cb.ImplicitMetaPolicy_ANY.String() + case cb.ImplicitMetaPolicy_ALL: + args += cb.ImplicitMetaPolicy_ALL.String() + case cb.ImplicitMetaPolicy_MAJORITY: + args += cb.ImplicitMetaPolicy_MAJORITY.String() + default: + return "", fmt.Errorf("unknown implicit meta policy rule type %v", imp.Rule) + } + + args = args + " " + imp.SubPolicy + + return args, nil +} + +// signatureMetaToString converts a *cb.SignaturePolicyEnvelope to a string representation. +func signatureMetaToString(sig *cb.SignaturePolicyEnvelope) (string, error) { + var roles []string + + for _, id := range sig.Identities { + role, err := mspPrincipalToString(id) + if err != nil { + return "", err + } + + roles = append(roles, role) + } + + return signaturePolicyToString(sig.Rule, roles) +} + +// mspPrincipalToString converts a *mb.MSPPrincipal to a string representation. +func mspPrincipalToString(principal *mb.MSPPrincipal) (string, error) { + switch principal.PrincipalClassification { + case mb.MSPPrincipal_ROLE: + var res strings.Builder + + role := &mb.MSPRole{} + + err := proto.Unmarshal(principal.Principal, role) + if err != nil { + return "", err + } + + res.WriteString("'") + res.WriteString(role.MspIdentifier) + res.WriteString(".") + res.WriteString(strings.ToLower(role.Role.String())) + res.WriteString("'") + + return res.String(), nil + // TODO: currently fabric only support string to principle convertion for + // type ROLE. Implement MSPPrinciple to String for types ORGANIZATION_UNIT, + // IDENTITY, ANONYMITY, and GOMBINED once we have support from fabric. + case mb.MSPPrincipal_ORGANIZATION_UNIT: + return "", nil + case mb.MSPPrincipal_IDENTITY: + return "", nil + case mb.MSPPrincipal_ANONYMITY: + return "", nil + case mb.MSPPrincipal_COMBINED: + return "", nil + default: + return "", fmt.Errorf("unknown MSP principal classiciation %v", principal.PrincipalClassification) + } +} + +// signaturePolicyToString recursively converts a *cb.SignaturePolicy to a +// string representation. +func signaturePolicyToString(sig *cb.SignaturePolicy, IDs []string) (string, error) { + switch sig.Type.(type) { + case *cb.SignaturePolicy_NOutOf_: + nOutOf := sig.GetNOutOf() + + var policies []string + + var res strings.Builder + + // get gate values + gate := policydsl.GateOutOf + if nOutOf.N == 1 { + gate = policydsl.GateOr + } + + if nOutOf.N == int32(len(nOutOf.Rules)) { + gate = policydsl.GateAnd + } + + if gate == policydsl.GateOutOf { + policies = append(policies, strconv.Itoa(int(nOutOf.N))) + } + + // get subpolicies recursively + for _, rule := range nOutOf.Rules { + subPolicy, err := signaturePolicyToString(rule, IDs) + if err != nil { + return "", err + } + + policies = append(policies, subPolicy) + } + + res.WriteString(strings.ToUpper(gate)) + res.WriteString("(") + res.WriteString(strings.Join(policies, ", ")) + res.WriteString(")") + + return res.String(), nil + case *cb.SignaturePolicy_SignedBy: + return IDs[sig.GetSignedBy()], nil + default: + return "", fmt.Errorf("unknown signature policy type %v", sig.Type) + } +} + +func setPolicies(cg *cb.ConfigGroup, policyMap map[string]Policy) error { + if policyMap == nil { + return errors.New("no policies defined") + } + + if _, ok := policyMap[AdminsPolicyKey]; !ok { + return errors.New("no Admins policy defined") + } + + if _, ok := policyMap[ReadersPolicyKey]; !ok { + return errors.New("no Readers policy defined") + } + + if _, ok := policyMap[WritersPolicyKey]; !ok { + return errors.New("no Writers policy defined") + } + + cg.Policies = make(map[string]*cb.ConfigPolicy) + for policyName, policy := range policyMap { + err := setPolicy(cg, policyName, policy) + if err != nil { + return err + } + } + + return nil +} + +func setPolicy(cg *cb.ConfigGroup, policyName string, policy Policy) error { + if cg.Policies == nil { + cg.Policies = make(map[string]*cb.ConfigPolicy) + } + + switch policy.Type { + case ImplicitMetaPolicyType: + imp, err := implicitMetaFromString(policy.Rule) + if err != nil { + return fmt.Errorf("invalid implicit meta policy rule: '%s': %v", policy.Rule, err) + } + + implicitMetaPolicy, err := proto.Marshal(imp) + if err != nil { + return fmt.Errorf("marshaling implicit meta policy: %v", err) + } + + if policy.ModPolicy == "" { + policy.ModPolicy = AdminsPolicyKey + } + + cg.Policies[policyName] = &cb.ConfigPolicy{ + ModPolicy: policy.ModPolicy, + Policy: &cb.Policy{ + Type: int32(cb.Policy_IMPLICIT_META), + Value: implicitMetaPolicy, + }, + } + case SignaturePolicyType: + sp, err := policydsl.FromString(policy.Rule) + if err != nil { + return fmt.Errorf("invalid signature policy rule: '%s': %v", policy.Rule, err) + } + + signaturePolicy, err := proto.Marshal(sp) + if err != nil { + return fmt.Errorf("marshaling signature policy: %v", err) + } + + if policy.ModPolicy == "" { + policy.ModPolicy = AdminsPolicyKey + } + + cg.Policies[policyName] = &cb.ConfigPolicy{ + ModPolicy: policy.ModPolicy, + Policy: &cb.Policy{ + Type: int32(cb.Policy_SIGNATURE), + Value: signaturePolicy, + }, + } + default: + return fmt.Errorf("unknown policy type: %s", policy.Type) + } + + return nil +} + +// removePolicy removes an existing policy from an group key organization. +func removePolicy(configGroup *cb.ConfigGroup, policyName string, policies map[string]Policy) { + delete(configGroup.Policies, policyName) +} diff --git a/v2/configtx/policies_test.go b/v2/configtx/policies_test.go new file mode 100644 index 0000000..4280063 --- /dev/null +++ b/v2/configtx/policies_test.go @@ -0,0 +1,135 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "testing" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func TestPolicies(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + expectedPolicies := map[string]Policy{ + ReadersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ALL Member", + ModPolicy: AdminsPolicyKey, + }, + WritersPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "ANY Member", + ModPolicy: AdminsPolicyKey, + }, + AdminsPolicyKey: { + Type: ImplicitMetaPolicyType, + Rule: "MAJORITY Member", + ModPolicy: AdminsPolicyKey, + }, + "SignaturePolicy": { + Type: SignaturePolicyType, + Rule: "AND('Org1.member', 'Org2.client', OR('Org3.peer', 'Org3.admin'), OUTOF(2, 'Org4.member', 'Org4.peer', 'Org4.admin'))", + ModPolicy: AdminsPolicyKey, + }, + } + orgGroup := newConfigGroup() + err := setPolicies(orgGroup, expectedPolicies) + gt.Expect(err).NotTo(HaveOccurred()) + + policies, err := getPolicies(orgGroup.Policies) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(expectedPolicies).To(Equal(policies)) + + policies, err = getPolicies(nil) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(map[string]Policy{}).To(Equal(policies)) +} + +func TestSetConsortiumChannelCreationPolicy(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + }, + } + + c := New(config) + + updatedPolicy := Policy{Type: ImplicitMetaPolicyType, Rule: "MAJORITY Admins"} + + consortium1 := c.Consortium("Consortium1") + err = consortium1.SetChannelCreationPolicy(updatedPolicy) + gt.Expect(err).NotTo(HaveOccurred()) + + creationPolicy := consortium1.consortiumGroup.Values[ChannelCreationPolicyKey] + policy := &cb.Policy{} + err = proto.Unmarshal(creationPolicy.Value, policy) + gt.Expect(err).NotTo(HaveOccurred()) + imp := &cb.ImplicitMetaPolicy{} + err = proto.Unmarshal(policy.Value, imp) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(imp.Rule).To(Equal(cb.ImplicitMetaPolicy_MAJORITY)) + gt.Expect(imp.SubPolicy).To(Equal("Admins")) +} + +func TestSetConsortiumChannelCreationPolicyFailures(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + consortiums, _ := baseConsortiums(t) + + consortiumsGroup, err := newConsortiumsGroup(consortiums) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ConsortiumsGroupKey: consortiumsGroup, + }, + }, + } + + c := New(config) + + tests := []struct { + name string + consortiumName string + updatedpolicy Policy + expectedErr string + }{ + { + name: "when policy is invalid", + consortiumName: "Consortium1", + updatedpolicy: Policy{Type: ImplicitMetaPolicyType, Rule: "Bad Admins"}, + expectedErr: "invalid implicit meta policy rule 'Bad Admins': unknown rule type 'Bad', expected ALL, ANY, or MAJORITY", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + gt := NewGomegaWithT(t) + err := c.Consortium(tt.consortiumName).SetChannelCreationPolicy(tt.updatedpolicy) + gt.Expect(err).To(MatchError(tt.expectedErr)) + }) + } +} diff --git a/v2/configtx/signer.go b/v2/configtx/signer.go new file mode 100644 index 0000000..9aa0c8a --- /dev/null +++ b/v2/configtx/signer.go @@ -0,0 +1,190 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/pem" + "fmt" + "io" + "math/big" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "google.golang.org/protobuf/proto" +) + +// SigningIdentity is an MSP Identity that can be used to sign configuration +// updates. +type SigningIdentity struct { + Certificate *x509.Certificate + PrivateKey crypto.PrivateKey + MSPID string +} + +type ecdsaSignature struct { + R, S *big.Int +} + +// Public returns the public key associated with this signing +// identity's certificate. +func (s *SigningIdentity) Public() crypto.PublicKey { + return s.Certificate.PublicKey +} + +// Sign performs ECDSA sign with this signing identity's private key on the +// given message hashed using SHA-256. It ensures signatures are created with +// Low S values since Fabric normalizes all signatures to Low S. +// See https://github.com/bitcoin/bips/blob/master/bip-0146.mediawiki#low_s +// for more detail. +func (s *SigningIdentity) Sign(reader io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) { + switch pk := s.PrivateKey.(type) { + case *ecdsa.PrivateKey: + hasher := sha256.New() + hasher.Write(msg) + digest := hasher.Sum(nil) + + rr, ss, err := ecdsa.Sign(reader, pk, digest) + if err != nil { + return nil, err + } + + // ensure Low S signatures + sig := toLowS( + pk.PublicKey, + ecdsaSignature{ + R: rr, + S: ss, + }, + ) + + return asn1.Marshal(sig) + default: + return nil, fmt.Errorf("signing with private key of type %T not supported", pk) + } +} + +// toLows normalizes all signatures to a canonical form where s is at most +// half the order of the curve. By doing so, it compliant with what Fabric +// expected as well as protect against signature malleability attacks. +func toLowS(key ecdsa.PublicKey, sig ecdsaSignature) ecdsaSignature { + // calculate half order of the curve + halfOrder := new(big.Int).Div(key.Curve.Params().N, big.NewInt(2)) + // check if s is greater than half order of curve + if sig.S.Cmp(halfOrder) == 1 { + // Set s to N - s so that s will be less than or equal to half order + sig.S.Sub(key.Params().N, sig.S) + } + + return sig +} + +// CreateConfigSignature creates a config signature for the the given configuration +// update using the specified signing identity. +func (s *SigningIdentity) CreateConfigSignature(marshaledUpdate []byte) (*cb.ConfigSignature, error) { + signatureHeader, err := s.signatureHeader() + if err != nil { + return nil, fmt.Errorf("creating signature header: %v", err) + } + + header, err := proto.Marshal(signatureHeader) + if err != nil { + return nil, fmt.Errorf("marshaling signature header: %v", err) + } + + configSignature := &cb.ConfigSignature{ + SignatureHeader: header, + } + + configSignature.Signature, err = s.Sign( + rand.Reader, + concatenateBytes(configSignature.SignatureHeader, marshaledUpdate), + nil, + ) + if err != nil { + return nil, fmt.Errorf("signing config update: %v", err) + } + + return configSignature, nil +} + +// SignEnvelope signs an envelope using the SigningIdentity. +func (s *SigningIdentity) SignEnvelope(e *cb.Envelope) error { + signatureHeader, err := s.signatureHeader() + if err != nil { + return fmt.Errorf("creating signature header: %v", err) + } + + sHeader, err := proto.Marshal(signatureHeader) + if err != nil { + return fmt.Errorf("marshaling signature header: %v", err) + } + + payload := &cb.Payload{} + err = proto.Unmarshal(e.Payload, payload) + if err != nil { + return fmt.Errorf("unmarshaling envelope payload: %v", err) + } + payload.Header.SignatureHeader = sHeader + + payloadBytes, err := proto.Marshal(payload) + if err != nil { + return fmt.Errorf("marshaling payload: %v", err) + } + + sig, err := s.Sign(rand.Reader, payloadBytes, nil) + if err != nil { + return fmt.Errorf("signing envelope payload: %v", err) + } + + e.Payload = payloadBytes + e.Signature = sig + + return nil +} + +func (s *SigningIdentity) signatureHeader() (*cb.SignatureHeader, error) { + pemBytes := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: s.Certificate.Raw, + }) + + idBytes, err := proto.Marshal(&mb.SerializedIdentity{ + Mspid: s.MSPID, + IdBytes: pemBytes, + }) + if err != nil { + return nil, fmt.Errorf("marshaling serialized identity: %v", err) + } + + nonce, err := newNonce() + if err != nil { + return nil, err + } + + return &cb.SignatureHeader{ + Creator: idBytes, + Nonce: nonce, + }, nil +} + +// newNonce generates a 24-byte nonce using the crypto/rand package. +func newNonce() ([]byte, error) { + nonce := make([]byte, 24) + + _, err := rand.Read(nonce) + if err != nil { + return nil, fmt.Errorf("failed to get random bytes: %v", err) + } + + return nonce, nil +} diff --git a/v2/configtx/signer_test.go b/v2/configtx/signer_test.go new file mode 100644 index 0000000..5dba867 --- /dev/null +++ b/v2/configtx/signer_test.go @@ -0,0 +1,425 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "io" + "math/big" + "testing" + "time" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func TestSign(t *testing.T) { + t.Parallel() + + cert, privateKey := generateCACertAndPrivateKey(t, "org1.example.com") + + tests := []struct { + spec string + privateKey crypto.PrivateKey + reader io.Reader + msg []byte + expectedErr string + }{ + { + spec: "success", + privateKey: privateKey, + reader: rand.Reader, + msg: []byte("banana"), + expectedErr: "", + }, + { + spec: "unsupported rsa private key", + privateKey: &rsa.PrivateKey{}, + reader: rand.Reader, + msg: []byte("banana"), + expectedErr: "signing with private key of type *rsa.PrivateKey not supported", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.spec, func(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + signingIdentity := &SigningIdentity{ + Certificate: cert, + PrivateKey: tc.privateKey, + MSPID: "test-msp", + } + + signature, err := signingIdentity.Sign(tc.reader, tc.msg, nil) + if tc.expectedErr == "" { + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(signature).NotTo(BeNil()) + sig := &ecdsaSignature{} + _, err := asn1.Unmarshal(signature, sig) + gt.Expect(err).NotTo(HaveOccurred()) + hash := sha256.New() + hash.Write(tc.msg) + digest := hash.Sum(nil) + valid := ecdsa.Verify(cert.PublicKey.(*ecdsa.PublicKey), digest, sig.R, sig.S) + gt.Expect(valid).To(BeTrue()) + } else { + gt.Expect(err).To(MatchError(tc.expectedErr)) + gt.Expect(signature).To(BeNil()) + } + }) + } +} + +func TestPublic(t *testing.T) { + gt := NewGomegaWithT(t) + + cert, privateKey := generateCACertAndPrivateKey(t, "org1.example.com") + signingIdentity := &SigningIdentity{ + Certificate: cert, + PrivateKey: privateKey, + } + gt.Expect(signingIdentity.Public()).To(Equal(cert.PublicKey)) +} + +func TestCreateSignature(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + + cert, privateKey := generateCACertAndPrivateKey(t, "org1.example.com") + signingIdentity := SigningIdentity{ + Certificate: cert, + PrivateKey: privateKey, + MSPID: "test-msp", + } + + configSignature, err := signingIdentity.CreateConfigSignature([]byte("config")) + gt.Expect(err).NotTo(HaveOccurred()) + + sh, err := signingIdentity.signatureHeader() + gt.Expect(err).NotTo(HaveOccurred()) + expectedCreator := sh.Creator + signatureHeader := &cb.SignatureHeader{} + err = proto.Unmarshal(configSignature.SignatureHeader, signatureHeader) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(signatureHeader.Creator).To(Equal(expectedCreator)) +} + +func TestSignEnvelope(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + // create signingIdentity + cert, privateKey := generateCACertAndPrivateKey(t, "org1.example.com") + signingIdentity := SigningIdentity{ + Certificate: cert, + PrivateKey: privateKey, + MSPID: "test-msp", + } + + // create detached config signature + configUpdate := &cb.ConfigUpdate{ + ChannelId: "testchannel", + } + marshaledUpdate, err := proto.Marshal(configUpdate) + gt.Expect(err).NotTo(HaveOccurred()) + configSignature, err := signingIdentity.CreateConfigSignature(marshaledUpdate) + gt.Expect(err).NotTo(HaveOccurred()) + + // create signed config envelope + env, err := NewEnvelope(marshaledUpdate, configSignature) + gt.Expect(err).NotTo(HaveOccurred()) + err = signingIdentity.SignEnvelope(env) + gt.Expect(err).NotTo(HaveOccurred()) + + payload := &cb.Payload{} + err = proto.Unmarshal(env.Payload, payload) + gt.Expect(err).NotTo(HaveOccurred()) + // check header channel ID equal + channelHeader := &cb.ChannelHeader{} + err = proto.Unmarshal(payload.GetHeader().GetChannelHeader(), channelHeader) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(channelHeader.ChannelId).To(Equal(configUpdate.ChannelId)) + // check config update envelope signatures are equal + configEnv := &cb.ConfigUpdateEnvelope{} + err = proto.Unmarshal(payload.Data, configEnv) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(len(configEnv.Signatures)).To(Equal(1)) + expectedSignatures := configEnv.Signatures[0] + gt.Expect(expectedSignatures.SignatureHeader).To(Equal(configSignature.SignatureHeader)) + gt.Expect(expectedSignatures.Signature).To(Equal(configSignature.Signature)) +} + +func TestSignEnvelopeWithAnchorPeers(t *testing.T) { + t.Parallel() + gt := NewGomegaWithT(t) + + baseApplicationConf, _ := baseApplication(t) + + applicationGroup, err := newApplicationGroup(baseApplicationConf) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.Config{ + ChannelGroup: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + ApplicationGroupKey: applicationGroup, + }, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + }, + } + + c := New(config) + + newOrg1AnchorPeer := Address{ + Host: "host3", + Port: 123, + } + + newOrg2AnchorPeer := Address{ + Host: "host4", + Port: 123, + } + + err = c.Application().Organization("Org1").AddAnchorPeer(newOrg1AnchorPeer) + gt.Expect(err).NotTo(HaveOccurred()) + + err = c.Application().Organization("Org2").AddAnchorPeer(newOrg2AnchorPeer) + gt.Expect(err).NotTo(HaveOccurred()) + + // create signingIdentity + cert, privateKey := generateCACertAndPrivateKey(t, "org1.example.com") + signingIdentity := SigningIdentity{ + Certificate: cert, + PrivateKey: privateKey, + MSPID: "test-msp", + } + + configUpdate, err := c.ComputeMarshaledUpdate("fake-channel") + gt.Expect(err).NotTo(HaveOccurred()) + + configSignature, err := signingIdentity.CreateConfigSignature(configUpdate) + gt.Expect(err).NotTo(HaveOccurred()) + + // create signed config envelope + env, err := NewEnvelope(configUpdate, configSignature) + gt.Expect(err).NotTo(HaveOccurred()) + err = signingIdentity.SignEnvelope(env) + gt.Expect(err).NotTo(HaveOccurred()) + + // check envelope signature is valid + // env.Signature + sig := &ecdsaSignature{} + _, err = asn1.Unmarshal(env.Signature, sig) + gt.Expect(err).NotTo(HaveOccurred()) + hash := sha256.New() + hash.Write(env.Payload) + digest := hash.Sum(nil) + valid := ecdsa.Verify(cert.PublicKey.(*ecdsa.PublicKey), digest, sig.R, sig.S) + gt.Expect(valid).To(BeTrue()) + + payload := &cb.Payload{} + err = proto.Unmarshal(env.Payload, payload) + gt.Expect(err).NotTo(HaveOccurred()) + + configUpdateEnvelope := &cb.ConfigUpdateEnvelope{} + err = proto.Unmarshal(payload.Data, configUpdateEnvelope) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(configUpdateEnvelope.Signatures).To(HaveLen(1)) + + sig = &ecdsaSignature{} + configSig := configUpdateEnvelope.Signatures[0] + _, err = asn1.Unmarshal(configSig.Signature, sig) + gt.Expect(err).NotTo(HaveOccurred()) + hash = sha256.New() + hash.Write(concatenateBytes(configSig.SignatureHeader, configUpdateEnvelope.ConfigUpdate)) + digest = hash.Sum(nil) + valid = ecdsa.Verify(cert.PublicKey.(*ecdsa.PublicKey), digest, sig.R, sig.S) + gt.Expect(valid).To(BeTrue()) +} + +func TestToLowS(t *testing.T) { + t.Parallel() + + curve := elliptic.P256() + halfOrder := new(big.Int).Div(curve.Params().N, big.NewInt(2)) + + for _, test := range []struct { + name string + sig ecdsaSignature + expectedSig ecdsaSignature + }{ + { + name: "HighS", + sig: ecdsaSignature{ + R: big.NewInt(1), + // set S to halfOrder + 1 + S: new(big.Int).Add(halfOrder, big.NewInt(1)), + }, + // expected signature should be (sig.R, -sig.S mod N) + expectedSig: ecdsaSignature{ + R: big.NewInt(1), + S: new(big.Int).Mod(new(big.Int).Neg(new(big.Int).Add(halfOrder, big.NewInt(1))), curve.Params().N), + }, + }, + { + name: "LowS", + sig: ecdsaSignature{ + R: big.NewInt(1), + // set S to halfOrder - 1 + S: new(big.Int).Sub(halfOrder, big.NewInt(1)), + }, + // expected signature should be sig + expectedSig: ecdsaSignature{ + R: big.NewInt(1), + S: new(big.Int).Sub(halfOrder, big.NewInt(1)), + }, + }, + { + name: "HalfOrder", + sig: ecdsaSignature{ + R: big.NewInt(1), + // set S to halfOrder + S: halfOrder, + }, + // expected signature should be sig + expectedSig: ecdsaSignature{ + R: big.NewInt(1), + S: halfOrder, + }, + }, + } { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + gt := NewGomegaWithT(t) + curve := elliptic.P256() + key := ecdsa.PublicKey{ + Curve: curve, + } + gt.Expect(toLowS(key, test.sig), test.expectedSig) + }) + } +} + +// generateCACertAndPrivateKey returns CA cert and private key. +func generateCACertAndPrivateKey(t *testing.T, orgName string) (*x509.Certificate, *ecdsa.PrivateKey) { + serialNumber := generateSerialNumber(t) + template := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: "ca." + orgName, + Organization: []string{orgName}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IsCA: true, + } + return generateCertAndPrivateKey(t, template, template, nil) +} + +func generateIntermediateCACertAndPrivateKey(t *testing.T, orgName string, rootCACert *x509.Certificate, rootPrivKey *ecdsa.PrivateKey) (*x509.Certificate, *ecdsa.PrivateKey) { + serialNumber := generateSerialNumber(t) + template := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: "intermediateca." + orgName, + Organization: []string{orgName}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IsCA: true, + } + return generateCertAndPrivateKey(t, template, rootCACert, rootPrivKey) +} + +// generateCertAndPrivateKeyFromCACert returns a cert and private key signed by the given CACert. +func generateCertAndPrivateKeyFromCACert(t *testing.T, orgName string, caCert *x509.Certificate, privateKey *ecdsa.PrivateKey) (*x509.Certificate, *ecdsa.PrivateKey) { + serialNumber := generateSerialNumber(t) + template := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: "user." + orgName, + Organization: []string{orgName}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + return generateCertAndPrivateKey(t, template, caCert, privateKey) +} + +func generateCertAndPrivateKey(t *testing.T, template, parent *x509.Certificate, parentPriv *ecdsa.PrivateKey) (*x509.Certificate, *ecdsa.PrivateKey) { + gt := NewGomegaWithT(t) + + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + gt.Expect(err).NotTo(HaveOccurred()) + + if parentPriv == nil { + // create self-signed cert + parentPriv = priv + } + derBytes, err := x509.CreateCertificate(rand.Reader, template, parent, &priv.PublicKey, parentPriv) + gt.Expect(err).NotTo(HaveOccurred()) + + cert, err := x509.ParseCertificate(derBytes) + gt.Expect(err).NotTo(HaveOccurred()) + + return cert, priv +} + +// generateSerialNumber returns a random serialNumber +func generateSerialNumber(t *testing.T) *big.Int { + gt := NewGomegaWithT(t) + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + gt.Expect(err).NotTo(HaveOccurred()) + + return serialNumber +} + +// generateCert returns cert. +func generateCert(t *testing.T, orgName string) *x509.Certificate { + serialNumber := generateSerialNumber(t) + template := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: orgName, + Organization: []string{orgName}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IsCA: false, + } + cert, _ := generateCertAndPrivateKey(t, template, template, nil) + return cert +} diff --git a/v2/configtx/update.go b/v2/configtx/update.go new file mode 100644 index 0000000..c80b079 --- /dev/null +++ b/v2/configtx/update.go @@ -0,0 +1,242 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "bytes" + "fmt" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + "google.golang.org/protobuf/proto" +) + +// Compute computes the difference between two *cb.Configs and returns the +// ReadSet and WriteSet diff as a *cb.ConfigUpdate +func computeConfigUpdate(original, updated *cb.Config) (*cb.ConfigUpdate, error) { + if original.ChannelGroup == nil { + return nil, fmt.Errorf("no channel group included for original config") + } + + if updated.ChannelGroup == nil { + return nil, fmt.Errorf("no channel group included for updated config") + } + + readSet, writeSet, groupUpdated := computeGroupUpdate(original.ChannelGroup, updated.ChannelGroup) + if !groupUpdated { + return nil, fmt.Errorf("no differences detected between original and updated config") + } + + updated.Sequence = original.Sequence + 1 + + return &cb.ConfigUpdate{ + ReadSet: readSet, + WriteSet: writeSet, + }, nil +} + +func computePoliciesMapUpdate(original, updated map[string]*cb.ConfigPolicy) (readSet, writeSet, sameSet map[string]*cb.ConfigPolicy, updatedMembers bool) { + readSet = make(map[string]*cb.ConfigPolicy) + writeSet = make(map[string]*cb.ConfigPolicy) + + // All modified config goes into the read/write sets, but in case the map membership changes, we retain the + // config which was the same to add to the read/write sets + sameSet = make(map[string]*cb.ConfigPolicy) + + for policyName, originalPolicy := range original { + updatedPolicy, ok := updated[policyName] + if !ok { + updatedMembers = true + continue + } + + if originalPolicy.ModPolicy == updatedPolicy.ModPolicy && proto.Equal(originalPolicy.Policy, updatedPolicy.Policy) { + sameSet[policyName] = &cb.ConfigPolicy{ + Version: originalPolicy.Version, + } + continue + } + + updatedPolicy.Version = originalPolicy.Version + 1 + writeSet[policyName] = &cb.ConfigPolicy{ + Version: originalPolicy.Version + 1, + ModPolicy: updatedPolicy.ModPolicy, + Policy: updatedPolicy.Policy, + } + } + + for policyName, updatedPolicy := range updated { + if _, ok := original[policyName]; ok { + // If the updatedPolicy is in the original set of policies, it was already handled + continue + } + updatedMembers = true + writeSet[policyName] = &cb.ConfigPolicy{ + Version: 0, + ModPolicy: updatedPolicy.ModPolicy, + Policy: updatedPolicy.Policy, + } + } + + return +} + +func computeValuesMapUpdate(original, updated map[string]*cb.ConfigValue) (readSet, writeSet, sameSet map[string]*cb.ConfigValue, updatedMembers bool) { + readSet = make(map[string]*cb.ConfigValue) + writeSet = make(map[string]*cb.ConfigValue) + + // All modified config goes into the read/write sets, but in case the map membership changes, we retain the + // config which was the same to add to the read/write sets + sameSet = make(map[string]*cb.ConfigValue) + + for valueName, originalValue := range original { + updatedValue, ok := updated[valueName] + if !ok { + updatedMembers = true + continue + } + + if originalValue.ModPolicy == updatedValue.ModPolicy && bytes.Equal(originalValue.Value, updatedValue.Value) { + sameSet[valueName] = &cb.ConfigValue{ + Version: originalValue.Version, + } + continue + } + + updatedValue.Version = originalValue.Version + 1 + writeSet[valueName] = &cb.ConfigValue{ + Version: originalValue.Version + 1, + ModPolicy: updatedValue.ModPolicy, + Value: updatedValue.Value, + } + } + + for valueName, updatedValue := range updated { + if _, ok := original[valueName]; ok { + // If the updatedValue is in the original set of values, it was already handled + continue + } + updatedMembers = true + writeSet[valueName] = &cb.ConfigValue{ + Version: 0, + ModPolicy: updatedValue.ModPolicy, + Value: updatedValue.Value, + } + } + + return +} + +func computeGroupsMapUpdate(original, updated map[string]*cb.ConfigGroup) (readSet, writeSet, sameSet map[string]*cb.ConfigGroup, updatedMembers bool) { + readSet = make(map[string]*cb.ConfigGroup) + writeSet = make(map[string]*cb.ConfigGroup) + + // All modified config goes into the read/write sets, but in case the map membership changes, we retain the + // config which was the same to add to the read/write sets + sameSet = make(map[string]*cb.ConfigGroup) + + for groupName, originalGroup := range original { + updatedGroup, ok := updated[groupName] + if !ok { + updatedMembers = true + continue + } + + groupReadSet, groupWriteSet, groupUpdated := computeGroupUpdate(originalGroup, updatedGroup) + if !groupUpdated { + sameSet[groupName] = groupReadSet + continue + } + + readSet[groupName] = groupReadSet + writeSet[groupName] = groupWriteSet + + } + + for groupName, updatedGroup := range updated { + if _, ok := original[groupName]; ok { + // If the updatedGroup is in the original set of groups, it was already handled + continue + } + updatedMembers = true + _, groupWriteSet, _ := computeGroupUpdate(newConfigGroup(), updatedGroup) + writeSet[groupName] = &cb.ConfigGroup{ + Version: 0, + ModPolicy: updatedGroup.ModPolicy, + Policies: groupWriteSet.Policies, + Values: groupWriteSet.Values, + Groups: groupWriteSet.Groups, + } + } + + return +} + +func computeGroupUpdate(original, updated *cb.ConfigGroup) (readSet, writeSet *cb.ConfigGroup, updatedGroup bool) { + readSetPolicies, writeSetPolicies, sameSetPolicies, policiesMembersUpdated := computePoliciesMapUpdate(original.Policies, updated.Policies) + readSetValues, writeSetValues, sameSetValues, valuesMembersUpdated := computeValuesMapUpdate(original.Values, updated.Values) + readSetGroups, writeSetGroups, sameSetGroups, groupsMembersUpdated := computeGroupsMapUpdate(original.Groups, updated.Groups) + + // If the updated group is 'Equal' to the updated group (none of the members nor the mod policy changed) + if !(policiesMembersUpdated || valuesMembersUpdated || groupsMembersUpdated || original.ModPolicy != updated.ModPolicy) { + + // If there were no modified entries in any of the policies/values/groups maps + if len(readSetPolicies) == 0 && + len(writeSetPolicies) == 0 && + len(readSetValues) == 0 && + len(writeSetValues) == 0 && + len(readSetGroups) == 0 && + len(writeSetGroups) == 0 { + return &cb.ConfigGroup{ + Version: original.Version, + }, &cb.ConfigGroup{ + Version: original.Version, + }, false + } + + return &cb.ConfigGroup{ + Version: original.Version, + Policies: readSetPolicies, + Values: readSetValues, + Groups: readSetGroups, + }, &cb.ConfigGroup{ + Version: original.Version, + Policies: writeSetPolicies, + Values: writeSetValues, + Groups: writeSetGroups, + }, true + } + + for k, samePolicy := range sameSetPolicies { + readSetPolicies[k] = samePolicy + writeSetPolicies[k] = samePolicy + } + + for k, sameValue := range sameSetValues { + readSetValues[k] = sameValue + writeSetValues[k] = sameValue + } + + for k, sameGroup := range sameSetGroups { + readSetGroups[k] = sameGroup + writeSetGroups[k] = sameGroup + } + + updated.Version = original.Version + 1 + + return &cb.ConfigGroup{ + Version: original.Version, + Policies: readSetPolicies, + Values: readSetValues, + Groups: readSetGroups, + }, &cb.ConfigGroup{ + Version: original.Version + 1, + Policies: writeSetPolicies, + Values: writeSetValues, + Groups: writeSetGroups, + ModPolicy: updated.ModPolicy, + }, true +} diff --git a/v2/configtx/update_test.go b/v2/configtx/update_test.go new file mode 100644 index 0000000..0e9d31f --- /dev/null +++ b/v2/configtx/update_test.go @@ -0,0 +1,594 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package configtx + +import ( + "testing" + + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func TestNoUpdate(t *testing.T) { + gt := NewGomegaWithT(t) + original := &cb.ConfigGroup{ + Version: 7, + } + updated := &cb.ConfigGroup{} + + _, err := computeConfigUpdate(&cb.Config{ + ChannelGroup: original, + }, &cb.Config{ + ChannelGroup: updated, + }) + + gt.Expect(err).To(HaveOccurred()) +} + +func TestMissingGroup(t *testing.T) { + gt := NewGomegaWithT(t) + group := &cb.ConfigGroup{} + t.Run("MissingOriginal", func(t *testing.T) { + _, err := computeConfigUpdate(&cb.Config{}, &cb.Config{ChannelGroup: group}) + + gt.Expect(err).To(HaveOccurred()) + gt.Expect(err).To(MatchError("no channel group included for original config")) + }) + t.Run("MissingOriginal", func(t *testing.T) { + _, err := computeConfigUpdate(&cb.Config{ChannelGroup: group}, &cb.Config{}) + + gt.Expect(err).To(HaveOccurred()) + gt.Expect(err).To(MatchError("no channel group included for updated config")) + }) +} + +func TestGroupModPolicyUpdate(t *testing.T) { + gt := NewGomegaWithT(t) + original := &cb.ConfigGroup{ + Version: 7, + ModPolicy: "foo", + } + updated := &cb.ConfigGroup{ + ModPolicy: "bar", + } + + cu, err := computeConfigUpdate(&cb.Config{ + ChannelGroup: original, + }, &cb.Config{ + ChannelGroup: updated, + }) + + gt.Expect(err).NotTo(HaveOccurred()) + + expectedReadSet := &cb.ConfigGroup{ + Version: original.Version, + Groups: map[string]*cb.ConfigGroup{}, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + } + + gt.Expect(expectedReadSet).To(Equal(cu.ReadSet), "Mismatched read set") + + expectedWriteSet := &cb.ConfigGroup{ + Version: original.Version + 1, + Groups: map[string]*cb.ConfigGroup{}, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + ModPolicy: updated.ModPolicy, + } + + gt.Expect(expectedWriteSet).To(Equal(cu.WriteSet), "Mismatched write set") +} + +func TestGroupPolicyModification(t *testing.T) { + gt := NewGomegaWithT(t) + policy1Name := "foo" + policy2Name := "bar" + original := &cb.ConfigGroup{ + Version: 4, + Policies: map[string]*cb.ConfigPolicy{ + policy1Name: { + Version: 2, + Policy: &cb.Policy{ + Type: 3, + }, + }, + policy2Name: { + Version: 1, + Policy: &cb.Policy{ + Type: 5, + }, + }, + }, + } + updated := &cb.ConfigGroup{ + Policies: map[string]*cb.ConfigPolicy{ + policy1Name: original.Policies[policy1Name], + policy2Name: { + Policy: &cb.Policy{ + Type: 9, + }, + }, + }, + } + + cu, err := computeConfigUpdate(&cb.Config{ + ChannelGroup: original, + }, &cb.Config{ + ChannelGroup: updated, + }) + + gt.Expect(err).NotTo(HaveOccurred()) + + expectedReadSet := &cb.ConfigGroup{ + Version: original.Version, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + Groups: map[string]*cb.ConfigGroup{}, + } + + gt.Expect(expectedReadSet).To(Equal(cu.ReadSet), "Mismatched read set") + + expectedWriteSet := &cb.ConfigGroup{ + Version: original.Version, + Policies: map[string]*cb.ConfigPolicy{ + policy2Name: { + Policy: &cb.Policy{ + Type: updated.Policies[policy2Name].Policy.Type, + }, + Version: original.Policies[policy2Name].Version + 1, + }, + }, + Values: map[string]*cb.ConfigValue{}, + Groups: map[string]*cb.ConfigGroup{}, + } + + gt.Expect(proto.Equal(expectedWriteSet, cu.WriteSet)).To(BeTrue(), "Mismatched write set") +} + +func TestGroupValueModification(t *testing.T) { + gt := NewGomegaWithT(t) + value1Name := "foo" + value2Name := "bar" + original := &cb.ConfigGroup{ + Version: 7, + Values: map[string]*cb.ConfigValue{ + value1Name: { + Version: 3, + Value: []byte("value1value"), + }, + value2Name: { + Version: 6, + Value: []byte("value2value"), + }, + }, + } + updated := &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{ + value1Name: original.Values[value1Name], + value2Name: { + Value: []byte("updatedValued2Value"), + }, + }, + } + + cu, err := computeConfigUpdate(&cb.Config{ + ChannelGroup: original, + }, &cb.Config{ + ChannelGroup: updated, + }) + + gt.Expect(err).NotTo(HaveOccurred()) + + expectedReadSet := &cb.ConfigGroup{ + Version: original.Version, + Values: map[string]*cb.ConfigValue{}, + Policies: map[string]*cb.ConfigPolicy{}, + Groups: map[string]*cb.ConfigGroup{}, + } + + gt.Expect(expectedReadSet).To(Equal(cu.ReadSet), "Mismatched read set") + + expectedWriteSet := &cb.ConfigGroup{ + Version: original.Version, + Values: map[string]*cb.ConfigValue{ + value2Name: { + Value: updated.Values[value2Name].Value, + Version: original.Values[value2Name].Version + 1, + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + Groups: map[string]*cb.ConfigGroup{}, + } + + gt.Expect(expectedWriteSet).To(Equal(cu.WriteSet), "Mismatched write set") +} + +func TestGroupGroupsModification(t *testing.T) { + gt := NewGomegaWithT(t) + subGroupName := "foo" + original := &cb.ConfigGroup{ + Version: 7, + Groups: map[string]*cb.ConfigGroup{ + subGroupName: { + Version: 3, + Values: map[string]*cb.ConfigValue{ + "testValue": { + Version: 3, + }, + }, + }, + }, + } + updated := &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + subGroupName: {}, + }, + } + + cu, err := computeConfigUpdate(&cb.Config{ + ChannelGroup: original, + }, &cb.Config{ + ChannelGroup: updated, + }) + + gt.Expect(err).NotTo(HaveOccurred()) + + expectedReadSet := &cb.ConfigGroup{ + Version: original.Version, + Groups: map[string]*cb.ConfigGroup{ + subGroupName: { + Version: original.Groups[subGroupName].Version, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + Groups: map[string]*cb.ConfigGroup{}, + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + } + + gt.Expect(expectedReadSet).To(Equal(cu.ReadSet), "Mismatched read set") + + expectedWriteSet := &cb.ConfigGroup{ + Version: original.Version, + Groups: map[string]*cb.ConfigGroup{ + subGroupName: { + Version: original.Groups[subGroupName].Version + 1, + Groups: map[string]*cb.ConfigGroup{}, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + } + + gt.Expect(expectedWriteSet).To(Equal(cu.WriteSet), "Mismatched write set") +} + +func TestGroupValueAddition(t *testing.T) { + gt := NewGomegaWithT(t) + value1Name := "foo" + value2Name := "bar" + original := &cb.ConfigGroup{ + Version: 7, + Values: map[string]*cb.ConfigValue{ + value1Name: { + Version: 3, + Value: []byte("value1value"), + }, + }, + } + updated := &cb.ConfigGroup{ + Values: map[string]*cb.ConfigValue{ + value1Name: original.Values[value1Name], + value2Name: { + Version: 9, + Value: []byte("newValue2"), + }, + }, + } + + cu, err := computeConfigUpdate(&cb.Config{ + ChannelGroup: original, + }, &cb.Config{ + ChannelGroup: updated, + }) + + gt.Expect(err).NotTo(HaveOccurred()) + + expectedReadSet := &cb.ConfigGroup{ + Version: original.Version, + Values: map[string]*cb.ConfigValue{ + value1Name: { + Version: original.Values[value1Name].Version, + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + Groups: map[string]*cb.ConfigGroup{}, + } + + gt.Expect(expectedReadSet).To(Equal(cu.ReadSet), "Mismatched read set") + + expectedWriteSet := &cb.ConfigGroup{ + Version: original.Version + 1, + Values: map[string]*cb.ConfigValue{ + value1Name: { + Version: original.Values[value1Name].Version, + }, + value2Name: { + Value: updated.Values[value2Name].Value, + Version: 0, + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + Groups: map[string]*cb.ConfigGroup{}, + } + + gt.Expect(expectedWriteSet).To(Equal(cu.WriteSet), "Mismatched write set") +} + +func TestGroupPolicySwap(t *testing.T) { + gt := NewGomegaWithT(t) + policy1Name := "foo" + policy2Name := "bar" + original := &cb.ConfigGroup{ + Version: 4, + Policies: map[string]*cb.ConfigPolicy{ + policy1Name: { + Version: 2, + Policy: &cb.Policy{ + Type: 3, + }, + }, + }, + } + updated := &cb.ConfigGroup{ + Policies: map[string]*cb.ConfigPolicy{ + policy2Name: { + Version: 1, + Policy: &cb.Policy{ + Type: 5, + }, + }, + }, + } + + cu, err := computeConfigUpdate(&cb.Config{ + ChannelGroup: original, + }, &cb.Config{ + ChannelGroup: updated, + }) + + gt.Expect(err).NotTo(HaveOccurred()) + + expectedReadSet := &cb.ConfigGroup{ + Version: original.Version, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + Groups: map[string]*cb.ConfigGroup{}, + } + + gt.Expect(expectedReadSet).To(Equal(cu.ReadSet), "Mismatched read set") + + expectedWriteSet := &cb.ConfigGroup{ + Version: original.Version + 1, + Policies: map[string]*cb.ConfigPolicy{ + policy2Name: { + Policy: &cb.Policy{ + Type: updated.Policies[policy2Name].Policy.Type, + }, + Version: 0, + }, + }, + Values: map[string]*cb.ConfigValue{}, + Groups: map[string]*cb.ConfigGroup{}, + } + + gt.Expect(expectedWriteSet).To(Equal(cu.WriteSet), "Mismatched write set") +} + +func TestComplex(t *testing.T) { + gt := NewGomegaWithT(t) + existingGroup1Name := "existingGroup1" + existingGroup2Name := "existingGroup2" + existingPolicyName := "existingPolicy" + original := &cb.ConfigGroup{ + Version: 4, + Groups: map[string]*cb.ConfigGroup{ + existingGroup1Name: { + Version: 2, + }, + existingGroup2Name: { + Version: 2, + }, + }, + Policies: map[string]*cb.ConfigPolicy{ + existingPolicyName: { + Version: 8, + Policy: &cb.Policy{ + Type: 5, + }, + }, + }, + } + + newGroupName := "newGroup" + newPolicyName := "newPolicy" + newValueName := "newValue" + updated := &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + existingGroup1Name: {}, + newGroupName: { + Values: map[string]*cb.ConfigValue{ + newValueName: {}, + }, + }, + }, + Policies: map[string]*cb.ConfigPolicy{ + existingPolicyName: { + Policy: &cb.Policy{ + Type: 5, + }, + }, + newPolicyName: { + Version: 6, + Policy: &cb.Policy{ + Type: 5, + }, + }, + }, + } + + cu, err := computeConfigUpdate(&cb.Config{ + ChannelGroup: original, + }, &cb.Config{ + ChannelGroup: updated, + }) + + gt.Expect(err).NotTo(HaveOccurred()) + + expectedReadSet := &cb.ConfigGroup{ + Version: original.Version, + Policies: map[string]*cb.ConfigPolicy{ + existingPolicyName: { + Version: original.Policies[existingPolicyName].Version, + }, + }, + Values: map[string]*cb.ConfigValue{}, + Groups: map[string]*cb.ConfigGroup{ + existingGroup1Name: { + Version: original.Groups[existingGroup1Name].Version, + }, + }, + } + + gt.Expect(expectedReadSet).To(Equal(cu.ReadSet), "Mismatched read set") + + expectedWriteSet := &cb.ConfigGroup{ + Version: original.Version + 1, + Policies: map[string]*cb.ConfigPolicy{ + existingPolicyName: { + Version: original.Policies[existingPolicyName].Version, + }, + newPolicyName: { + Version: 0, + Policy: &cb.Policy{ + Type: 5, + }, + }, + }, + Groups: map[string]*cb.ConfigGroup{ + existingGroup1Name: { + Version: original.Groups[existingGroup1Name].Version, + }, + newGroupName: { + Version: 0, + Values: map[string]*cb.ConfigValue{ + newValueName: {}, + }, + Policies: map[string]*cb.ConfigPolicy{}, + Groups: map[string]*cb.ConfigGroup{}, + }, + }, + Values: map[string]*cb.ConfigValue{}, + } + + gt.Expect(expectedWriteSet).To(Equal(cu.WriteSet), "Mismatched write set") +} + +func TestTwiceNestedModification(t *testing.T) { + gt := NewGomegaWithT(t) + subGroupName := "foo" + subSubGroupName := "bar" + valueName := "testValue" + original := &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + subGroupName: { + Groups: map[string]*cb.ConfigGroup{ + subSubGroupName: { + Values: map[string]*cb.ConfigValue{ + valueName: {}, + }, + }, + }, + }, + }, + } + updated := &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + subGroupName: { + Groups: map[string]*cb.ConfigGroup{ + subSubGroupName: { + Values: map[string]*cb.ConfigValue{ + valueName: { + ModPolicy: "new", + }, + }, + }, + }, + }, + }, + } + + cu, err := computeConfigUpdate(&cb.Config{ + ChannelGroup: original, + }, &cb.Config{ + ChannelGroup: updated, + }) + + gt.Expect(err).NotTo(HaveOccurred()) + + expectedReadSet := &cb.ConfigGroup{ + Version: original.Version, + Groups: map[string]*cb.ConfigGroup{ + subGroupName: { + Groups: map[string]*cb.ConfigGroup{ + subSubGroupName: { + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + Groups: map[string]*cb.ConfigGroup{}, + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + } + + gt.Expect(expectedReadSet).To(Equal(cu.ReadSet), "Mismatched read set") + + expectedWriteSet := &cb.ConfigGroup{ + Version: original.Version, + Groups: map[string]*cb.ConfigGroup{ + subGroupName: { + Groups: map[string]*cb.ConfigGroup{ + subSubGroupName: { + Values: map[string]*cb.ConfigValue{ + valueName: { + Version: original.Groups[subGroupName].Groups[subSubGroupName].Values[valueName].Version + 1, + ModPolicy: updated.Groups[subGroupName].Groups[subSubGroupName].Values[valueName].ModPolicy, + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + Groups: map[string]*cb.ConfigGroup{}, + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + }, + }, + Policies: map[string]*cb.ConfigPolicy{}, + Values: map[string]*cb.ConfigValue{}, + } + + gt.Expect(expectedWriteSet).To(Equal(cu.WriteSet), "Mismatched write set") +} diff --git a/v2/go.mod b/v2/go.mod new file mode 100644 index 0000000..3644860 --- /dev/null +++ b/v2/go.mod @@ -0,0 +1,21 @@ +module github.com/hyperledger/fabric-config/v2 + +go 1.20 + +require ( + github.com/Knetic/govaluate v3.0.0+incompatible + github.com/hyperledger/fabric-protos-go-apiv2 v0.3.3 + github.com/onsi/gomega v1.9.0 + google.golang.org/protobuf v1.33.0 +) + +require ( + github.com/golang/protobuf v1.5.3 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/grpc v1.57.0 // indirect + gopkg.in/yaml.v2 v2.2.4 // indirect +) diff --git a/v2/go.sum b/v2/go.sum new file mode 100644 index 0000000..b785551 --- /dev/null +++ b/v2/go.sum @@ -0,0 +1,52 @@ +github.com/Knetic/govaluate v3.0.0+incompatible h1:7o6+MAPhYTCF0+fdvoz1xDedhRb4f6s9Tn1Tt7/WTEg= +github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hyperledger/fabric-protos-go v0.3.3 h1:0nssqz8QWJNVNBVQz+IIfAd2j1ku7QPKFSM/1anKizI= +github.com/hyperledger/fabric-protos-go v0.3.3/go.mod h1:BPXse9gIOQwyAePQrwQVUcc44bTW4bB5V3tujuvyArk= +github.com/hyperledger/fabric-protos-go-apiv2 v0.3.3/go.mod h1:2pq0ui6ZWA0cC8J+eCErgnMDCS1kPOEYVY+06ZAK0qE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I= +google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/v2/protolator/api.go b/v2/protolator/api.go new file mode 100644 index 0000000..c6b03c4 --- /dev/null +++ b/v2/protolator/api.go @@ -0,0 +1,147 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package protolator + +import ( + "google.golang.org/protobuf/proto" +) + +// ///////////////////////////////////////////////////////////////////////////////////////////////// +// +// This set of interfaces and methods is designed to allow protos to have Go methods attached +// to them, so that they may be automatically marshaled to human readable JSON (where the +// opaque byte fields are represented as their expanded proto contents) and back once again +// to standard proto messages. +// +// There are currently three different types of interfaces available for protos to implement: +// +// 1. StaticallyOpaque*FieldProto: These interfaces should be implemented by protos which have +// opaque byte fields whose marshaled type is known at compile time. This is mostly true +// for the signature oriented fields like the Envelope.Payload, or Header.ChannelHeader +// +// 2. VariablyOpaque*FieldProto: These interfaces are identical to the StaticallyOpaque*FieldProto +// definitions, with the exception that they are guaranteed to be evaluated after the +// StaticallyOpaque*FieldProto definitions. In particular, this allows for the type selection of +// a VariablyOpaque*FieldProto to depend on data populated by the StaticallyOpaque*FieldProtos. +// For example, the Payload.data field depends upon the Payload.Header.ChannelHeader.type field, +// which is along a statically marshaled path. +// +// 3. Dynamic*FieldProto: These interfaces are for messages which contain other messages whose +// attributes cannot be determined at compile time. For example, a ConfigValue message may evaluate +// the map field values["MSP"] successfully in an organization context, but not at all in a channel +// context. Because go is not a dynamic language, this dynamic behavior must be simulated by +// wrapping the underlying proto message in another type which can be configured at runtime with +// different contextual behavior. (See tests for examples) +// +// ///////////////////////////////////////////////////////////////////////////////////////////////// + +// StaticallyOpaqueFieldProto should be implemented by protos which have bytes fields which +// are the marshaled value of a fixed type +type StaticallyOpaqueFieldProto interface { + // StaticallyOpaqueFields returns the field names which contain opaque data + StaticallyOpaqueFields() []string + + // StaticallyOpaqueFieldProto returns a newly allocated proto message of the correct + // type for the field name. + StaticallyOpaqueFieldProto(name string) (proto.Message, error) +} + +// StaticallyOpaqueMapFieldProto should be implemented by protos which have maps to bytes fields +// which are the marshaled value of a fixed type +type StaticallyOpaqueMapFieldProto interface { + // StaticallyOpaqueMapFields returns the field names which contain opaque data + StaticallyOpaqueMapFields() []string + + // StaticallyOpaqueMapFieldProto returns a newly allocated proto message of the correct + // type for the field name. + StaticallyOpaqueMapFieldProto(name string, key string) (proto.Message, error) +} + +// StaticallyOpaqueSliceFieldProto should be implemented by protos which have maps to bytes fields +// which are the marshaled value of a fixed type +type StaticallyOpaqueSliceFieldProto interface { + // StaticallyOpaqueSliceFields returns the field names which contain opaque data + StaticallyOpaqueSliceFields() []string + + // StaticallyOpaqueSliceFieldProto returns a newly allocated proto message of the correct + // type for the field name. + StaticallyOpaqueSliceFieldProto(name string, index int) (proto.Message, error) +} + +// VariablyOpaqueFieldProto should be implemented by protos which have bytes fields which +// are the marshaled value depends upon the other contents of the proto +type VariablyOpaqueFieldProto interface { + // VariablyOpaqueFields returns the field names which contain opaque data + VariablyOpaqueFields() []string + + // VariablyOpaqueFieldProto returns a newly allocated proto message of the correct + // type for the field name. + VariablyOpaqueFieldProto(name string) (proto.Message, error) +} + +// VariablyOpaqueMapFieldProto should be implemented by protos which have maps to bytes fields +// which are the marshaled value of a a message type determined by the other contents of the proto +type VariablyOpaqueMapFieldProto interface { + // VariablyOpaqueMapFields returns the field names which contain opaque data + VariablyOpaqueMapFields() []string + + // VariablyOpaqueMapFieldProto returns a newly allocated proto message of the correct + // type for the field name. + VariablyOpaqueMapFieldProto(name string, key string) (proto.Message, error) +} + +// VariablyOpaqueSliceFieldProto should be implemented by protos which have maps to bytes fields +// which are the marshaled value of a a message type determined by the other contents of the proto +type VariablyOpaqueSliceFieldProto interface { + // VariablyOpaqueSliceFields returns the field names which contain opaque data + VariablyOpaqueSliceFields() []string + + // VariablyOpaqueSliceFieldProto returns a newly allocated proto message of the correct + // type for the field name. + VariablyOpaqueSliceFieldProto(name string, index int) (proto.Message, error) +} + +// DynamicFieldProto should be implemented by protos which have nested fields whose attributes +// (such as their opaque types) cannot be determined until runtime +type DynamicFieldProto interface { + // DynamicFields returns the field names which are dynamic + DynamicFields() []string + + // DynamicFieldProto returns a newly allocated dynamic message, decorating an underlying + // proto message with the runtime determined function + DynamicFieldProto(name string, underlying proto.Message) (proto.Message, error) +} + +// DynamicMapFieldProto should be implemented by protos which have maps to messages whose attributes +// (such as their opaque types) cannot be determined until runtime +type DynamicMapFieldProto interface { + // DynamicMapFields returns the field names which are dynamic + DynamicMapFields() []string + + // DynamicMapFieldProto returns a newly allocated dynamic message, decorating an underlying + // proto message with the runtime determined function + DynamicMapFieldProto(name string, key string, underlying proto.Message) (proto.Message, error) +} + +// DynamicSliceFieldProto should be implemented by protos which have slices of messages whose attributes +// (such as their opaque types) cannot be determined until runtime +type DynamicSliceFieldProto interface { + // DynamicSliceFields returns the field names which are dynamic + DynamicSliceFields() []string + + // DynamicSliceFieldProto returns a newly allocated dynamic message, decorating an underlying + // proto message with the runtime determined function + DynamicSliceFieldProto(name string, index int, underlying proto.Message) (proto.Message, error) +} + +// DecoratedProto should be implemented by the dynamic wrappers applied by the Dynamic*FieldProto interfaces +// This is necessary for the proto system to unmarshal, because it discovers proto message type by reflection +// (Rather than by interface definition as it probably should ( https://google.golang.org/protobuf/issues/291 ) +type DecoratedProto interface { + // Underlying returns the underlying proto message which is being dynamically decorated + Underlying() proto.Message +} diff --git a/v2/protolator/dynamic.go b/v2/protolator/dynamic.go new file mode 100644 index 0000000..ce95884 --- /dev/null +++ b/v2/protolator/dynamic.go @@ -0,0 +1,149 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protolator + +import ( + "reflect" + + "google.golang.org/protobuf/proto" +) + +func dynamicFrom(dynamicMsg func(underlying proto.Message) (proto.Message, error), value interface{}, destType reflect.Type) (reflect.Value, error) { + tree := value.(map[string]interface{}) // Safe, already checked + uMsg := reflect.New(destType.Elem()) + nMsg, err := dynamicMsg(uMsg.Interface().(proto.Message)) // Safe, already checked + if err != nil { + return reflect.Value{}, err + } + if err := recursivelyPopulateMessageFromTree(tree, nMsg); err != nil { + return reflect.Value{}, err + } + return uMsg, nil +} + +func dynamicTo(dynamicMsg func(underlying proto.Message) (proto.Message, error), value reflect.Value) (interface{}, error) { + nMsg, err := dynamicMsg(value.Interface().(proto.Message)) // Safe, already checked + if err != nil { + return nil, err + } + return recursivelyCreateTreeFromMessage(nMsg) +} + +type dynamicFieldFactory struct{} + +func (dff dynamicFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + dynamicProto, ok := msg.(DynamicFieldProto) + if !ok { + return false + } + + return stringInSlice(fieldName, dynamicProto.DynamicFields()) +} + +func (dff dynamicFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + dynamicProto, _ := msg.(DynamicFieldProto) // Type checked in Handles + + return &plainField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(v interface{}, dT reflect.Type) (reflect.Value, error) { + return dynamicFrom(func(underlying proto.Message) (proto.Message, error) { + return dynamicProto.DynamicFieldProto(fieldName, underlying) + }, v, dT) + }, + populateTo: func(v reflect.Value) (interface{}, error) { + return dynamicTo(func(underlying proto.Message) (proto.Message, error) { + return dynamicProto.DynamicFieldProto(fieldName, underlying) + }, v) + }, + }, nil +} + +type dynamicMapFieldFactory struct{} + +func (dmff dynamicMapFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + dynamicProto, ok := msg.(DynamicMapFieldProto) + if !ok { + return false + } + + return stringInSlice(fieldName, dynamicProto.DynamicMapFields()) +} + +func (dmff dynamicMapFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + dynamicProto := msg.(DynamicMapFieldProto) // Type checked by Handles + + return &mapField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(k string, v interface{}, dT reflect.Type) (reflect.Value, error) { + return dynamicFrom(func(underlying proto.Message) (proto.Message, error) { + return dynamicProto.DynamicMapFieldProto(fieldName, k, underlying) + }, v, dT) + }, + populateTo: func(k string, v reflect.Value) (interface{}, error) { + return dynamicTo(func(underlying proto.Message) (proto.Message, error) { + return dynamicProto.DynamicMapFieldProto(fieldName, k, underlying) + }, v) + }, + }, nil +} + +type dynamicSliceFieldFactory struct{} + +func (dmff dynamicSliceFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + dynamicProto, ok := msg.(DynamicSliceFieldProto) + if !ok { + return false + } + + return stringInSlice(fieldName, dynamicProto.DynamicSliceFields()) +} + +func (dmff dynamicSliceFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + dynamicProto := msg.(DynamicSliceFieldProto) // Type checked by Handles + + return &sliceField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(i int, v interface{}, dT reflect.Type) (reflect.Value, error) { + return dynamicFrom(func(underlying proto.Message) (proto.Message, error) { + return dynamicProto.DynamicSliceFieldProto(fieldName, i, underlying) + }, v, dT) + }, + populateTo: func(i int, v reflect.Value) (interface{}, error) { + return dynamicTo(func(underlying proto.Message) (proto.Message, error) { + return dynamicProto.DynamicSliceFieldProto(fieldName, i, underlying) + }, v) + }, + }, nil +} diff --git a/v2/protolator/dynamic_test.go b/v2/protolator/dynamic_test.go new file mode 100644 index 0000000..2561418 --- /dev/null +++ b/v2/protolator/dynamic_test.go @@ -0,0 +1,140 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package protolator + +import ( + "bytes" + "testing" + + "github.com/hyperledger/fabric-config/v2/protolator/testprotos" + . "github.com/onsi/gomega" +) + +func TestPlainDynamicMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff, variablyOpaqueFieldFactory{}} + + pfValue := "foo" + startMsg := &testprotos.DynamicMsg{ + DynamicType: "SimpleMsg", + PlainDynamicField: &testprotos.ContextlessMsg{ + OpaqueField: protoMarshalOrPanic(&testprotos.SimpleMsg{ + PlainField: pfValue, + }), + }, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.DynamicMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.PlainDynamicField.OpaqueField)).NotTo(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.PlainDynamicField.OpaqueField))) + + fieldFactories = []protoFieldFactory{tppff, variablyOpaqueFieldFactory{}, dynamicFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.PlainDynamicField.OpaqueField)).To(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.PlainDynamicField.OpaqueField))) +} + +func TestMapDynamicMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff, variablyOpaqueFieldFactory{}} + + pfValue := "foo" + mapKey := "bar" + startMsg := &testprotos.DynamicMsg{ + DynamicType: "SimpleMsg", + MapDynamicField: map[string]*testprotos.ContextlessMsg{ + mapKey: { + OpaqueField: protoMarshalOrPanic(&testprotos.SimpleMsg{ + PlainField: pfValue, + }), + }, + }, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.DynamicMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.MapDynamicField[mapKey].OpaqueField)).NotTo(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.MapDynamicField[mapKey].OpaqueField))) + + fieldFactories = []protoFieldFactory{tppff, variablyOpaqueFieldFactory{}, dynamicMapFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.MapDynamicField[mapKey].OpaqueField)).To(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.MapDynamicField[mapKey].OpaqueField))) +} + +func TestSliceDynamicMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff, variablyOpaqueFieldFactory{}} + + pfValue := "foo" + startMsg := &testprotos.DynamicMsg{ + DynamicType: "SimpleMsg", + SliceDynamicField: []*testprotos.ContextlessMsg{ + { + OpaqueField: protoMarshalOrPanic(&testprotos.SimpleMsg{ + PlainField: pfValue, + }), + }, + }, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.DynamicMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.SliceDynamicField[0].OpaqueField)).NotTo(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.SliceDynamicField[0].OpaqueField))) + + fieldFactories = []protoFieldFactory{tppff, variablyOpaqueFieldFactory{}, dynamicSliceFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.SliceDynamicField[0].OpaqueField)).To(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.SliceDynamicField[0].OpaqueField))) +} diff --git a/v2/protolator/integration/integration_test.go b/v2/protolator/integration/integration_test.go new file mode 100644 index 0000000..dcaa2f8 --- /dev/null +++ b/v2/protolator/integration/integration_test.go @@ -0,0 +1,327 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package integration + +import ( + "bytes" + "os" + "testing" + + "github.com/hyperledger/fabric-config/v2/protolator" + cb "github.com/hyperledger/fabric-protos-go-apiv2/common" + mb "github.com/hyperledger/fabric-protos-go-apiv2/msp" + pb "github.com/hyperledger/fabric-protos-go-apiv2/peer" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func bidirectionalMarshal(t *testing.T, doc proto.Message) { + gt := NewGomegaWithT(t) + + var buffer bytes.Buffer + + err := protolator.DeepMarshalJSON(&buffer, doc) + gt.Expect(err).NotTo(HaveOccurred()) + + newRoot := proto.Clone(doc) + proto.Reset(newRoot) + err = protolator.DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newRoot) + gt.Expect(err).NotTo(HaveOccurred()) + + // Note, we cannot do an equality check between newRoot and sampleDoc + // because of the nondeterministic nature of binary proto marshaling + // So instead we re-marshal to JSON which is a deterministic marshaling + // and compare equality there instead + + var remarshaled bytes.Buffer + err = protolator.DeepMarshalJSON(&remarshaled, newRoot) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(remarshaled.String()).To(MatchJSON(buffer.String())) +} + +func TestConfigUpdate(t *testing.T) { + gt := NewGomegaWithT(t) + + blockBin, err := os.ReadFile("testdata/block.pb") + gt.Expect(err).NotTo(HaveOccurred()) + + block := &cb.Block{} + err = proto.Unmarshal(blockBin, block) + gt.Expect(err).NotTo(HaveOccurred()) + + envelope := &cb.Envelope{} + err = proto.Unmarshal(block.Data.Data[0], envelope) + gt.Expect(err).NotTo(HaveOccurred()) + + blockDataPayload := &cb.Payload{} + err = proto.Unmarshal(envelope.Payload, blockDataPayload) + gt.Expect(err).NotTo(HaveOccurred()) + + config := &cb.ConfigEnvelope{} + err = proto.Unmarshal(blockDataPayload.Data, config) + gt.Expect(err).NotTo(HaveOccurred()) + + bidirectionalMarshal(t, &cb.ConfigUpdateEnvelope{ + ConfigUpdate: protoMarshalOrPanic(&cb.ConfigUpdate{ + ReadSet: config.Config.ChannelGroup, + WriteSet: config.Config.ChannelGroup, + }), + }) +} + +func TestIdemix(t *testing.T) { + bidirectionalMarshal(t, &mb.MSPConfig{ + Type: 1, + Config: protoMarshalOrPanic(&mb.IdemixMSPConfig{ + Name: "fooo", + }), + }) +} + +func TestBlock(t *testing.T) { + gt := NewGomegaWithT(t) + + blockBin, err := os.ReadFile("testdata/block.pb") + gt.Expect(err).NotTo(HaveOccurred()) + + block := &cb.Block{} + err = proto.Unmarshal(blockBin, block) + gt.Expect(err).NotTo(HaveOccurred()) + + bidirectionalMarshal(t, block) +} + +func TestEmitDefaultsBug(t *testing.T) { + gt := NewGomegaWithT(t) + + block := &cb.Block{ + Header: &cb.BlockHeader{ + PreviousHash: []byte("foo"), + }, + Data: &cb.BlockData{ + Data: [][]byte{ + protoMarshalOrPanic(&cb.Envelope{ + Payload: protoMarshalOrPanic(&cb.Payload{ + Header: &cb.Header{ + ChannelHeader: protoMarshalOrPanic(&cb.ChannelHeader{ + Type: int32(cb.HeaderType_CONFIG), + }), + }, + }), + Signature: []byte("bar"), + }), + }, + }, + } + + buf := &bytes.Buffer{} + err := protolator.DeepMarshalJSON(buf, block) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(buf.String()).To(MatchJSON(` +{ + "data": { + "data": [ + { + "payload": { + "data": null, + "header": { + "channel_header": { + "channel_id": "", + "epoch": "0", + "extension": null, + "timestamp": null, + "tls_cert_hash": "", + "tx_id": "", + "type": 1, + "version": 0 + }, + "signature_header": null + } + }, + "signature": "YmFy" + } + ] + }, + "header": { + "data_hash": "", + "number": "0", + "previous_hash": "Zm9v" + }, + "metadata": null +} +`)) +} + +func TestProposalResponsePayload(t *testing.T) { + gt := NewGomegaWithT(t) + + prp := &pb.ProposalResponsePayload{} + err := protolator.DeepUnmarshalJSON(bytes.NewReader([]byte(`{ + "extension": { + "chaincode_id": { + "name": "test", + "path": "", + "version": "1.0" + }, + "events": { + "chaincode_id": "test" + }, + "response": { + "message": "", + "payload": null, + "status": 200 + }, + "results": { + "data_model": "KV", + "ns_rwset": [ + { + "collection_hashed_rwset": [], + "namespace": "lscc", + "rwset": { + "metadata_writes": [], + "range_queries_info": [], + "reads": [ + { + "key": "cc1", + "version": { + "block_num": "3", + "tx_num": "0" + } + }, + { + "key": "cc2", + "version": { + "block_num": "4", + "tx_num": "0" + } + } + ], + "writes": [] + } + }, + { + "collection_hashed_rwset": [], + "namespace": "cc1", + "rwset": { + "metadata_writes": [], + "range_queries_info": [], + "reads": [ + { + "key": "key1", + "version": { + "block_num": "8", + "tx_num": "0" + } + } + ], + "writes": [ + { + "is_delete": false, + "key": "key2" + } + ] + } + }, + { + "collection_hashed_rwset": [], + "namespace": "cc2", + "rwset": { + "metadata_writes": [], + "range_queries_info": [], + "reads": [ + { + "key": "key1", + "version": { + "block_num": "9", + "tx_num": "0" + } + }, + { + "key": "key2", + "version": { + "block_num": "10", + "tx_num": "0" + } + } + ], + "writes": [ + { + "is_delete": false, + "key": "key1" + }, + { + "is_delete": true, + "key": "key2" + } + ] + } + } + ] + } + } + }`)), prp) + gt.Expect(err).NotTo(HaveOccurred()) + bidirectionalMarshal(t, prp) +} + +func TestChannelCreationPolicy(t *testing.T) { + cu := &cb.ConfigUpdate{ + WriteSet: &cb.ConfigGroup{ + Groups: map[string]*cb.ConfigGroup{ + "Consortiums": { + Groups: map[string]*cb.ConfigGroup{ + "SampleConsortium": { + Values: map[string]*cb.ConfigValue{ + "ChannelCreationPolicy": { + Version: 0, + }, + }, + }, + }, + }, + }, + }, + } + + bidirectionalMarshal(t, cu) +} + +func TestStaticMarshal(t *testing.T) { + gt := NewGomegaWithT(t) + + // To generate artifacts: + // e.g. + // FABRICPATH=$GOPATH/src/github.com/hyperledger/fabric + // configtxgen -channelID test -outputBlock block.pb -profile SampleSingleMSPSolo -configPath FABRICPATH/sampleconfig + // configtxgen -configPath FABRICPATH/sampleconfig -inspectBlock block.pb > block.json + + blockBin, err := os.ReadFile("testdata/block.pb") + gt.Expect(err).NotTo(HaveOccurred()) + + block := &cb.Block{} + err = proto.Unmarshal(blockBin, block) + gt.Expect(err).NotTo(HaveOccurred()) + + jsonBin, err := os.ReadFile("testdata/block.json") + gt.Expect(err).NotTo(HaveOccurred()) + + buf := &bytes.Buffer{} + err = protolator.DeepMarshalJSON(buf, block) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(buf).To(MatchJSON(jsonBin)) +} + +// protoMarshalOrPanic serializes a protobuf message and panics if this +// operation fails +func protoMarshalOrPanic(pb proto.Message) []byte { + data, err := proto.Marshal(pb) + if err != nil { + panic(err) + } + + return data +} diff --git a/v2/protolator/integration/testdata/block.json b/v2/protolator/integration/testdata/block.json new file mode 100644 index 0000000..be326fa --- /dev/null +++ b/v2/protolator/integration/testdata/block.json @@ -0,0 +1,591 @@ +{ + "data": { + "data": [ + { + "payload": { + "data": { + "config": { + "channel_group": { + "groups": { + "Consortiums": { + "groups": { + "SampleConsortium": { + "groups": { + "SampleOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 1, + "value": { + "identities": [ + { + "principal": { + "msp_identifier": "SampleOrg", + "role": "ADMIN" + }, + "principal_classification": "ROLE" + } + ], + "rule": { + "n_out_of": { + "n": 1, + "rules": [ + { + "signed_by": 0 + } + ] + } + }, + "version": 0 + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 1, + "value": { + "identities": [ + { + "principal": { + "msp_identifier": "SampleOrg", + "role": "MEMBER" + }, + "principal_classification": "ROLE" + } + ], + "rule": { + "n_out_of": { + "n": 1, + "rules": [ + { + "signed_by": 0 + } + ] + } + }, + "version": 0 + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 1, + "value": { + "identities": [ + { + "principal": { + "msp_identifier": "SampleOrg", + "role": "MEMBER" + }, + "principal_classification": "ROLE" + } + ], + "rule": { + "n_out_of": { + "n": 1, + "rules": [ + { + "signed_by": 0 + } + ] + } + }, + "version": 0 + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 1, + "value": { + "identities": [ + { + "principal": { + "msp_identifier": "SampleOrg", + "role": "MEMBER" + }, + "principal_classification": "ROLE" + } + ], + "rule": { + "n_out_of": { + "n": 1, + "rules": [ + { + "signed_by": 0 + } + ] + } + }, + "version": 0 + } + }, + "version": "0" + } + }, + "values": { + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNOakNDQWQyZ0F3SUJBZ0lSQU1uZjkvZG1WOVJ2Q0NWdzlwWlFVZlV3Q2dZSUtvWkl6ajBFQXdJd2dZRXgKQ3pBSkJnTlZCQVlUQWxWVE1STXdFUVlEVlFRSUV3cERZV3hwWm05eWJtbGhNUll3RkFZRFZRUUhFdzFUWVc0ZwpSbkpoYm1OcGMyTnZNUmt3RndZRFZRUUtFeEJ2Y21jeExtVjRZVzF3YkdVdVkyOXRNUXd3Q2dZRFZRUUxFd05EClQxQXhIREFhQmdOVkJBTVRFMk5oTG05eVp6RXVaWGhoYlhCc1pTNWpiMjB3SGhjTk1UY3hNVEV5TVRNME1URXgKV2hjTk1qY3hNVEV3TVRNME1URXhXakJwTVFzd0NRWURWUVFHRXdKVlV6RVRNQkVHQTFVRUNCTUtRMkZzYVdadgpjbTVwWVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVNTUFvR0ExVUVDeE1EUTA5UU1SOHdIUVlEClZRUURFeFp3WldWeU1DNXZjbWN4TG1WNFlXMXdiR1V1WTI5dE1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMEQKQVFjRFFnQUVaOFM0VjcxT0JKcHlNSVZaZHdZZEZYQWNrSXRycHZTckNmMEhRZzQwV1c5WFNvT09PNzZJK1VtZgpFa21UbElKWFA3L0F5UlJTUlUzOG9JOEl2dHU0TTZOTk1Fc3dEZ1lEVlIwUEFRSC9CQVFEQWdlQU1Bd0dBMVVkCkV3RUIvd1FDTUFBd0t3WURWUjBqQkNRd0lvQWdpbk9SSWhuUEVGWlVoWG02ZVdCa203SzdaYzhSNC96N0xXNEgKb3NzRGxDc3dDZ1lJS29aSXpqMEVBd0lEUndBd1JBSWdWaWtJVVp6Z2Z1RnNHTFFIV0pVVkpDVTdwRGFFVGthegpQekZnc0NpTHhVQUNJQ2d6SllsVzdudlp4UDdiNnRiZXUzdDhtcmhNWFFzOTU2bUQ0K0JvS3VOSQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA2" + }, + "fabric_node_ous": null, + "intermediate_certs": [], + "name": "SampleOrg", + "organizational_unit_identifiers": [ + { + "certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNZakNDQWdpZ0F3SUJBZ0lSQUwxZkVBbno1enA0bW9KOE1kU2IvbFl3Q2dZSUtvWkl6ajBFQXdJd2dZRXgKQ3pBSkJnTlZCQVlUQWxWVE1STXdFUVlEVlFRSUV3cERZV3hwWm05eWJtbGhNUll3RkFZRFZRUUhFdzFUWVc0ZwpSbkpoYm1OcGMyTnZNUmt3RndZRFZRUUtFeEJ2Y21jeExtVjRZVzF3YkdVdVkyOXRNUXd3Q2dZRFZRUUxFd05EClQxQXhIREFhQmdOVkJBTVRFMk5oTG05eVp6RXVaWGhoYlhCc1pTNWpiMjB3SGhjTk1UY3hNVEV5TVRNME1URXgKV2hjTk1qY3hNVEV3TVRNME1URXhXakNCZ1RFTE1Ba0dBMVVFQmhNQ1ZWTXhFekFSQmdOVkJBZ1RDa05oYkdsbQpiM0p1YVdFeEZqQVVCZ05WQkFjVERWTmhiaUJHY21GdVkybHpZMjh4R1RBWEJnTlZCQW9URUc5eVp6RXVaWGhoCmJYQnNaUzVqYjIweEREQUtCZ05WQkFzVEEwTlBVREVjTUJvR0ExVUVBeE1UWTJFdWIzSm5NUzVsZUdGdGNHeGwKTG1OdmJUQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJHcnNRNm9KcGs2aERXZjYzSFUzT1NOZApib3U5S053L1ZJZWUxSW5nUERJNFlKVTdPK1hhL1hMSnV3bkZ2N0JwUjhZdGwzZituakM4aS9SWlAyL3N2TytqClh6QmRNQTRHQTFVZER3RUIvd1FFQXdJQnBqQVBCZ05WSFNVRUNEQUdCZ1JWSFNVQU1BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdLUVlEVlIwT0JDSUVJSXB6a1NJWnp4QldWSVY1dW5sZ1pKdXl1MlhQRWVQOCt5MXVCNkxMQTVRcgpNQW9HQ0NxR1NNNDlCQU1DQTBnQU1FVUNJUURVaC8rQ0MyZEFJQ25ZdEFDWHNwd1VhYUViaXlaeFlJeCtYRHZXCm84VlZjZ0lnR3o1UzRpQzUreGt4Z2VhSVNQZnhLVFRWeTZ5elRkWUd6Q3cxdlBwcGp6bz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", + "organizational_unit_identifier": "COP" + } + ], + "revocation_list": [], + "root_certs": [ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNZakNDQWdpZ0F3SUJBZ0lSQUwxZkVBbno1enA0bW9KOE1kU2IvbFl3Q2dZSUtvWkl6ajBFQXdJd2dZRXgKQ3pBSkJnTlZCQVlUQWxWVE1STXdFUVlEVlFRSUV3cERZV3hwWm05eWJtbGhNUll3RkFZRFZRUUhFdzFUWVc0ZwpSbkpoYm1OcGMyTnZNUmt3RndZRFZRUUtFeEJ2Y21jeExtVjRZVzF3YkdVdVkyOXRNUXd3Q2dZRFZRUUxFd05EClQxQXhIREFhQmdOVkJBTVRFMk5oTG05eVp6RXVaWGhoYlhCc1pTNWpiMjB3SGhjTk1UY3hNVEV5TVRNME1URXgKV2hjTk1qY3hNVEV3TVRNME1URXhXakNCZ1RFTE1Ba0dBMVVFQmhNQ1ZWTXhFekFSQmdOVkJBZ1RDa05oYkdsbQpiM0p1YVdFeEZqQVVCZ05WQkFjVERWTmhiaUJHY21GdVkybHpZMjh4R1RBWEJnTlZCQW9URUc5eVp6RXVaWGhoCmJYQnNaUzVqYjIweEREQUtCZ05WQkFzVEEwTlBVREVjTUJvR0ExVUVBeE1UWTJFdWIzSm5NUzVsZUdGdGNHeGwKTG1OdmJUQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJHcnNRNm9KcGs2aERXZjYzSFUzT1NOZApib3U5S053L1ZJZWUxSW5nUERJNFlKVTdPK1hhL1hMSnV3bkZ2N0JwUjhZdGwzZituakM4aS9SWlAyL3N2TytqClh6QmRNQTRHQTFVZER3RUIvd1FFQXdJQnBqQVBCZ05WSFNVRUNEQUdCZ1JWSFNVQU1BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdLUVlEVlIwT0JDSUVJSXB6a1NJWnp4QldWSVY1dW5sZ1pKdXl1MlhQRWVQOCt5MXVCNkxMQTVRcgpNQW9HQ0NxR1NNNDlCQU1DQTBnQU1FVUNJUURVaC8rQ0MyZEFJQ25ZdEFDWHNwd1VhYUViaXlaeFlJeCtYRHZXCm84VlZjZ0lnR3o1UzRpQzUreGt4Z2VhSVNQZnhLVFRWeTZ5elRkWUd6Q3cxdlBwcGp6bz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNFVENDQWJhZ0F3SUJBZ0lRTnBnb0FTRTlmaTBvb1pWS2Nud25aekFLQmdncWhrak9QUVFEQWpCWU1Rc3cKQ1FZRFZRUUdFd0pWVXpFVE1CRUdBMVVFQ0JNS1EyRnNhV1p2Y201cFlURVdNQlFHQTFVRUJ4TU5VMkZ1SUVaeQpZVzVqYVhOamJ6RU5NQXNHQTFVRUNoTUVUM0puTWpFTk1Bc0dBMVVFQXhNRVQzSm5NakFlRncweE56QTFNRGd3Ck9UTXdNelJhRncweU56QTFNRFl3T1RNd016UmFNR1l4Q3pBSkJnTlZCQVlUQWxWVE1STXdFUVlEVlFRSUV3cEQKWVd4cFptOXlibWxoTVJZd0ZBWURWUVFIRXcxVFlXNGdSbkpoYm1OcGMyTnZNUlF3RWdZRFZRUUtFd3RQY21jeQpMV05vYVd4a01URVVNQklHQTFVRUF4TUxUM0puTWkxamFHbHNaREV3V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPClBRTUJCd05DQUFSVEJKOC9vMXRwSFB3dWl4WURnUndjcnpBcnUwY1dKSmhFNktXSEFhMHZCQ0c0bmwwempqUlMKb2craUF1VWNZNFovZ0pvSG9sNmRLU0hrOWg1anJxdEVvMVF3VWpBT0JnTlZIUThCQWY4RUJBTUNBYVl3RHdZRApWUjBsQkFnd0JnWUVWUjBsQURBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUEwR0ExVWREZ1FHQkFRQkFnTUVNQThHCkExVWRJd1FJTUFhQUJBRUNBd1F3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUlrUHprN09SVi9XaGZHN1FZLzYKL09KZzQrK2Z0ejJTWmM0NE5JdW9nTUFyQWlFQXFibnBubW1IbnpvMlFjNmdubGlDZWdwR25KMThSVVQvalpsagoxcVhIY3ZnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + ], + "tls_root_certs": [ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUI4akNDQVppZ0F3SUJBZ0lSQU54ZDREM3NZMDY1Nk5xT2g4UmhhMEF3Q2dZSUtvWkl6ajBFQXdJd1dERUwKTUFrR0ExVUVCaE1DVlZNeEV6QVJCZ05WQkFnVENrTmhiR2xtYjNKdWFXRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhEVEFMQmdOVkJBb1RCRTl5WnpJeERUQUxCZ05WQkFNVEJFOXlaekl3SGhjTk1UY3dOVEE0Ck1Ea3pNRE0wV2hjTk1qY3dOVEEyTURrek1ETTBXakJZTVFzd0NRWURWUVFHRXdKVlV6RVRNQkVHQTFVRUNCTUsKUTJGc2FXWnZjbTVwWVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVOTUFzR0ExVUVDaE1FVDNKbgpNakVOTUFzR0ExVUVBeE1FVDNKbk1qQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJEWXkrcXpTCkovOENNZmhwQkZoVWhoeis3dXA0K2x3akJXRFNTMDFrb3N6Tmg4Y2FtSFRBOHZTNFpzTitEWjJEUnNTbVJaZ3MKdEcyb29nTExJZGg2WjFDalF6QkJNQTRHQTFVZER3RUIvd1FFQXdJQnBqQVBCZ05WSFNVRUNEQUdCZ1JWSFNVQQpNQThHQTFVZEV3RUIvd1FGTUFNQkFmOHdEUVlEVlIwT0JBWUVCQUVDQXdRd0NnWUlLb1pJemowRUF3SURTQUF3ClJRSWdXbk1tSDB5eEFqdWIzcWZ6eFFpb0hLUTgrV3ZVakFYbTBlaklkOVErckRJQ0lRRHIzMFVDUGorU1h6T2IKQ3U0cHNNTUJmTHVqS29pQk5kTEUxS0VwdDhsTjFnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": {}, + "values": { + "ChannelCreationPolicy": { + "mod_policy": "/Channel/Orderer/Admins", + "value": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Admins" + } + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "/Channel/Orderer/Admins", + "policies": { + "Admins": { + "mod_policy": "/Channel/Orderer/Admins", + "policy": { + "type": 1, + "value": { + "identities": [], + "rule": { + "n_out_of": { + "n": 0, + "rules": [] + } + }, + "version": 0 + } + }, + "version": "0" + } + }, + "values": {}, + "version": "0" + }, + "Orderer": { + "groups": { + "SampleOrg": { + "groups": {}, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 1, + "value": { + "identities": [ + { + "principal": { + "msp_identifier": "SampleOrg", + "role": "ADMIN" + }, + "principal_classification": "ROLE" + } + ], + "rule": { + "n_out_of": { + "n": 1, + "rules": [ + { + "signed_by": 0 + } + ] + } + }, + "version": 0 + } + }, + "version": "0" + }, + "Endorsement": { + "mod_policy": "Admins", + "policy": { + "type": 1, + "value": { + "identities": [ + { + "principal": { + "msp_identifier": "SampleOrg", + "role": "MEMBER" + }, + "principal_classification": "ROLE" + } + ], + "rule": { + "n_out_of": { + "n": 1, + "rules": [ + { + "signed_by": 0 + } + ] + } + }, + "version": 0 + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 1, + "value": { + "identities": [ + { + "principal": { + "msp_identifier": "SampleOrg", + "role": "MEMBER" + }, + "principal_classification": "ROLE" + } + ], + "rule": { + "n_out_of": { + "n": 1, + "rules": [ + { + "signed_by": 0 + } + ] + } + }, + "version": 0 + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 1, + "value": { + "identities": [ + { + "principal": { + "msp_identifier": "SampleOrg", + "role": "MEMBER" + }, + "principal_classification": "ROLE" + } + ], + "rule": { + "n_out_of": { + "n": 1, + "rules": [ + { + "signed_by": 0 + } + ] + } + }, + "version": 0 + } + }, + "version": "0" + } + }, + "values": { + "Endpoints": { + "mod_policy": "Admins", + "value": { + "addresses": [ + "127.0.0.1:7050" + ] + }, + "version": "0" + }, + "MSP": { + "mod_policy": "Admins", + "value": { + "config": { + "admins": [ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNOakNDQWQyZ0F3SUJBZ0lSQU1uZjkvZG1WOVJ2Q0NWdzlwWlFVZlV3Q2dZSUtvWkl6ajBFQXdJd2dZRXgKQ3pBSkJnTlZCQVlUQWxWVE1STXdFUVlEVlFRSUV3cERZV3hwWm05eWJtbGhNUll3RkFZRFZRUUhFdzFUWVc0ZwpSbkpoYm1OcGMyTnZNUmt3RndZRFZRUUtFeEJ2Y21jeExtVjRZVzF3YkdVdVkyOXRNUXd3Q2dZRFZRUUxFd05EClQxQXhIREFhQmdOVkJBTVRFMk5oTG05eVp6RXVaWGhoYlhCc1pTNWpiMjB3SGhjTk1UY3hNVEV5TVRNME1URXgKV2hjTk1qY3hNVEV3TVRNME1URXhXakJwTVFzd0NRWURWUVFHRXdKVlV6RVRNQkVHQTFVRUNCTUtRMkZzYVdadgpjbTVwWVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVNTUFvR0ExVUVDeE1EUTA5UU1SOHdIUVlEClZRUURFeFp3WldWeU1DNXZjbWN4TG1WNFlXMXdiR1V1WTI5dE1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMEQKQVFjRFFnQUVaOFM0VjcxT0JKcHlNSVZaZHdZZEZYQWNrSXRycHZTckNmMEhRZzQwV1c5WFNvT09PNzZJK1VtZgpFa21UbElKWFA3L0F5UlJTUlUzOG9JOEl2dHU0TTZOTk1Fc3dEZ1lEVlIwUEFRSC9CQVFEQWdlQU1Bd0dBMVVkCkV3RUIvd1FDTUFBd0t3WURWUjBqQkNRd0lvQWdpbk9SSWhuUEVGWlVoWG02ZVdCa203SzdaYzhSNC96N0xXNEgKb3NzRGxDc3dDZ1lJS29aSXpqMEVBd0lEUndBd1JBSWdWaWtJVVp6Z2Z1RnNHTFFIV0pVVkpDVTdwRGFFVGthegpQekZnc0NpTHhVQUNJQ2d6SllsVzdudlp4UDdiNnRiZXUzdDhtcmhNWFFzOTU2bUQ0K0JvS3VOSQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + ], + "crypto_config": { + "identity_identifier_hash_function": "SHA256", + "signature_hash_family": "SHA2" + }, + "fabric_node_ous": null, + "intermediate_certs": [], + "name": "SampleOrg", + "organizational_unit_identifiers": [ + { + "certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNZakNDQWdpZ0F3SUJBZ0lSQUwxZkVBbno1enA0bW9KOE1kU2IvbFl3Q2dZSUtvWkl6ajBFQXdJd2dZRXgKQ3pBSkJnTlZCQVlUQWxWVE1STXdFUVlEVlFRSUV3cERZV3hwWm05eWJtbGhNUll3RkFZRFZRUUhFdzFUWVc0ZwpSbkpoYm1OcGMyTnZNUmt3RndZRFZRUUtFeEJ2Y21jeExtVjRZVzF3YkdVdVkyOXRNUXd3Q2dZRFZRUUxFd05EClQxQXhIREFhQmdOVkJBTVRFMk5oTG05eVp6RXVaWGhoYlhCc1pTNWpiMjB3SGhjTk1UY3hNVEV5TVRNME1URXgKV2hjTk1qY3hNVEV3TVRNME1URXhXakNCZ1RFTE1Ba0dBMVVFQmhNQ1ZWTXhFekFSQmdOVkJBZ1RDa05oYkdsbQpiM0p1YVdFeEZqQVVCZ05WQkFjVERWTmhiaUJHY21GdVkybHpZMjh4R1RBWEJnTlZCQW9URUc5eVp6RXVaWGhoCmJYQnNaUzVqYjIweEREQUtCZ05WQkFzVEEwTlBVREVjTUJvR0ExVUVBeE1UWTJFdWIzSm5NUzVsZUdGdGNHeGwKTG1OdmJUQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJHcnNRNm9KcGs2aERXZjYzSFUzT1NOZApib3U5S053L1ZJZWUxSW5nUERJNFlKVTdPK1hhL1hMSnV3bkZ2N0JwUjhZdGwzZituakM4aS9SWlAyL3N2TytqClh6QmRNQTRHQTFVZER3RUIvd1FFQXdJQnBqQVBCZ05WSFNVRUNEQUdCZ1JWSFNVQU1BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdLUVlEVlIwT0JDSUVJSXB6a1NJWnp4QldWSVY1dW5sZ1pKdXl1MlhQRWVQOCt5MXVCNkxMQTVRcgpNQW9HQ0NxR1NNNDlCQU1DQTBnQU1FVUNJUURVaC8rQ0MyZEFJQ25ZdEFDWHNwd1VhYUViaXlaeFlJeCtYRHZXCm84VlZjZ0lnR3o1UzRpQzUreGt4Z2VhSVNQZnhLVFRWeTZ5elRkWUd6Q3cxdlBwcGp6bz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", + "organizational_unit_identifier": "COP" + } + ], + "revocation_list": [], + "root_certs": [ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNZakNDQWdpZ0F3SUJBZ0lSQUwxZkVBbno1enA0bW9KOE1kU2IvbFl3Q2dZSUtvWkl6ajBFQXdJd2dZRXgKQ3pBSkJnTlZCQVlUQWxWVE1STXdFUVlEVlFRSUV3cERZV3hwWm05eWJtbGhNUll3RkFZRFZRUUhFdzFUWVc0ZwpSbkpoYm1OcGMyTnZNUmt3RndZRFZRUUtFeEJ2Y21jeExtVjRZVzF3YkdVdVkyOXRNUXd3Q2dZRFZRUUxFd05EClQxQXhIREFhQmdOVkJBTVRFMk5oTG05eVp6RXVaWGhoYlhCc1pTNWpiMjB3SGhjTk1UY3hNVEV5TVRNME1URXgKV2hjTk1qY3hNVEV3TVRNME1URXhXakNCZ1RFTE1Ba0dBMVVFQmhNQ1ZWTXhFekFSQmdOVkJBZ1RDa05oYkdsbQpiM0p1YVdFeEZqQVVCZ05WQkFjVERWTmhiaUJHY21GdVkybHpZMjh4R1RBWEJnTlZCQW9URUc5eVp6RXVaWGhoCmJYQnNaUzVqYjIweEREQUtCZ05WQkFzVEEwTlBVREVjTUJvR0ExVUVBeE1UWTJFdWIzSm5NUzVsZUdGdGNHeGwKTG1OdmJUQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJHcnNRNm9KcGs2aERXZjYzSFUzT1NOZApib3U5S053L1ZJZWUxSW5nUERJNFlKVTdPK1hhL1hMSnV3bkZ2N0JwUjhZdGwzZituakM4aS9SWlAyL3N2TytqClh6QmRNQTRHQTFVZER3RUIvd1FFQXdJQnBqQVBCZ05WSFNVRUNEQUdCZ1JWSFNVQU1BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdLUVlEVlIwT0JDSUVJSXB6a1NJWnp4QldWSVY1dW5sZ1pKdXl1MlhQRWVQOCt5MXVCNkxMQTVRcgpNQW9HQ0NxR1NNNDlCQU1DQTBnQU1FVUNJUURVaC8rQ0MyZEFJQ25ZdEFDWHNwd1VhYUViaXlaeFlJeCtYRHZXCm84VlZjZ0lnR3o1UzRpQzUreGt4Z2VhSVNQZnhLVFRWeTZ5elRkWUd6Q3cxdlBwcGp6bz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + ], + "signing_identity": null, + "tls_intermediate_certs": [ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNFVENDQWJhZ0F3SUJBZ0lRTnBnb0FTRTlmaTBvb1pWS2Nud25aekFLQmdncWhrak9QUVFEQWpCWU1Rc3cKQ1FZRFZRUUdFd0pWVXpFVE1CRUdBMVVFQ0JNS1EyRnNhV1p2Y201cFlURVdNQlFHQTFVRUJ4TU5VMkZ1SUVaeQpZVzVqYVhOamJ6RU5NQXNHQTFVRUNoTUVUM0puTWpFTk1Bc0dBMVVFQXhNRVQzSm5NakFlRncweE56QTFNRGd3Ck9UTXdNelJhRncweU56QTFNRFl3T1RNd016UmFNR1l4Q3pBSkJnTlZCQVlUQWxWVE1STXdFUVlEVlFRSUV3cEQKWVd4cFptOXlibWxoTVJZd0ZBWURWUVFIRXcxVFlXNGdSbkpoYm1OcGMyTnZNUlF3RWdZRFZRUUtFd3RQY21jeQpMV05vYVd4a01URVVNQklHQTFVRUF4TUxUM0puTWkxamFHbHNaREV3V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPClBRTUJCd05DQUFSVEJKOC9vMXRwSFB3dWl4WURnUndjcnpBcnUwY1dKSmhFNktXSEFhMHZCQ0c0bmwwempqUlMKb2craUF1VWNZNFovZ0pvSG9sNmRLU0hrOWg1anJxdEVvMVF3VWpBT0JnTlZIUThCQWY4RUJBTUNBYVl3RHdZRApWUjBsQkFnd0JnWUVWUjBsQURBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUEwR0ExVWREZ1FHQkFRQkFnTUVNQThHCkExVWRJd1FJTUFhQUJBRUNBd1F3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUlrUHprN09SVi9XaGZHN1FZLzYKL09KZzQrK2Z0ejJTWmM0NE5JdW9nTUFyQWlFQXFibnBubW1IbnpvMlFjNmdubGlDZWdwR25KMThSVVQvalpsagoxcVhIY3ZnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + ], + "tls_root_certs": [ + "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUI4akNDQVppZ0F3SUJBZ0lSQU54ZDREM3NZMDY1Nk5xT2g4UmhhMEF3Q2dZSUtvWkl6ajBFQXdJd1dERUwKTUFrR0ExVUVCaE1DVlZNeEV6QVJCZ05WQkFnVENrTmhiR2xtYjNKdWFXRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhEVEFMQmdOVkJBb1RCRTl5WnpJeERUQUxCZ05WQkFNVEJFOXlaekl3SGhjTk1UY3dOVEE0Ck1Ea3pNRE0wV2hjTk1qY3dOVEEyTURrek1ETTBXakJZTVFzd0NRWURWUVFHRXdKVlV6RVRNQkVHQTFVRUNCTUsKUTJGc2FXWnZjbTVwWVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVOTUFzR0ExVUVDaE1FVDNKbgpNakVOTUFzR0ExVUVBeE1FVDNKbk1qQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJEWXkrcXpTCkovOENNZmhwQkZoVWhoeis3dXA0K2x3akJXRFNTMDFrb3N6Tmg4Y2FtSFRBOHZTNFpzTitEWjJEUnNTbVJaZ3MKdEcyb29nTExJZGg2WjFDalF6QkJNQTRHQTFVZER3RUIvd1FFQXdJQnBqQVBCZ05WSFNVRUNEQUdCZ1JWSFNVQQpNQThHQTFVZEV3RUIvd1FGTUFNQkFmOHdEUVlEVlIwT0JBWUVCQUVDQXdRd0NnWUlLb1pJemowRUF3SURTQUF3ClJRSWdXbk1tSDB5eEFqdWIzcWZ6eFFpb0hLUTgrV3ZVakFYbTBlaklkOVErckRJQ0lRRHIzMFVDUGorU1h6T2IKQ3U0cHNNTUJmTHVqS29pQk5kTEUxS0VwdDhsTjFnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + ] + }, + "type": 0 + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "BlockValidation": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BatchSize": { + "mod_policy": "Admins", + "value": { + "absolute_max_bytes": 10485760, + "max_message_count": 500, + "preferred_max_bytes": 2097152 + }, + "version": "0" + }, + "BatchTimeout": { + "mod_policy": "Admins", + "value": { + "timeout": "2s" + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V1_1": {} + } + }, + "version": "0" + }, + "ChannelRestrictions": { + "mod_policy": "Admins", + "value": null, + "version": "0" + }, + "ConsensusType": { + "mod_policy": "Admins", + "value": { + "metadata": null, + "state": "STATE_NORMAL", + "type": "solo" + }, + "version": "0" + } + }, + "version": "0" + } + }, + "mod_policy": "Admins", + "policies": { + "Admins": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "MAJORITY", + "sub_policy": "Admins" + } + }, + "version": "0" + }, + "Readers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Readers" + } + }, + "version": "0" + }, + "Writers": { + "mod_policy": "Admins", + "policy": { + "type": 3, + "value": { + "rule": "ANY", + "sub_policy": "Writers" + } + }, + "version": "0" + } + }, + "values": { + "BlockDataHashingStructure": { + "mod_policy": "Admins", + "value": { + "width": 4294967295 + }, + "version": "0" + }, + "Capabilities": { + "mod_policy": "Admins", + "value": { + "capabilities": { + "V2_0": {} + } + }, + "version": "0" + }, + "HashingAlgorithm": { + "mod_policy": "Admins", + "value": { + "name": "SHA256" + }, + "version": "0" + }, + "OrdererAddresses": { + "mod_policy": "/Channel/Orderer/Admins", + "value": { + "addresses": [ + "127.0.0.1:7050" + ] + }, + "version": "0" + } + }, + "version": "0" + }, + "sequence": "0" + }, + "last_update": null + }, + "header": { + "channel_header": { + "channel_id": "test", + "epoch": "0", + "extension": null, + "timestamp": "2019-04-21T19:35:42Z", + "tls_cert_hash": "", + "tx_id": "7c179b7100bb0c0b33dad5114f0294e2509e9be9fbfa0ac9d38b43703ea57138", + "type": 1, + "version": 1 + }, + "signature_header": { + "creator": null, + "nonce": "1gQF896QI8wgNZSwJ73vaPUcuOugIvB/" + } + } + }, + "signature": "" + } + ] + }, + "header": { + "data_hash": "idkpZn9B0jX9nL0kASTuPnA1PJDeXToBquHOas7f3Kk=", + "number": "0", + "previous_hash": "" + }, + "metadata": { + "metadata": [ + "", + "", + "", + "" + ] + } +} diff --git a/v2/protolator/integration/testdata/block.pb b/v2/protolator/integration/testdata/block.pb new file mode 100644 index 0000000000000000000000000000000000000000..f585ce22a461179c0851646cd994821ae3c3551e GIT binary patch literal 9940 zcmeHNU996q74H7_?zFqTGN87XpDuT|RBcq^F#zV=mR8? zV(ZMAnd6yr=6vUK!12-bFaP@K!AtNL*?)fX^_%Bz{^|2>_H(bi^~LATeedlztv7%7 z+wT&uVel#j-@;%D#DjB(=Z-EPeC@06{Nd8^#hLESo_Qg!rSipIK1Gtf9;uPNlas#M z&!$qD0huml^mLXi>cyU39P|b%scOakNuifH$&)9#n$4$93d9ld*sm^LdgrZIZv5h(XHfB(b}{`l(gUtS`1;Tkc*K`(59;9zr0TTKx&m!MtWGQf!Npabmj7==aehIGHD#%ZXvXn7^ zU{}rFgmbktx4^>4FZnBl8Vaa|HqnCmq?3`Ql;112=W@C@!@M6`5>HS^KF0tt1%nC$ z)wLB^L}|{fM>8rRnk!w?>~$$mX=SZmn)EBC#$izlBCRDBF&SM0AjLv!CG@vK$)YJk zM0E^DWteIs%EWZ1g98*oVLg!{(j(67mB$m%JE_i92?Zq!w%3iC$aJE{Ayw8UrO51Y zD5ncSS%lqnyCb6VEepXJ69a~UwY8EbLXvB?8Khy#Sy=%i!{s!ZM-$q(l^yHl(o8D{ zV^E)Pi=Id+n8Iahd0KAaOc5HWOw*ftA5|d9LaIFV_?%O9M>&&`2Dy_;`=rt0`k?2` zi#5(qc353cv9{4<*o<6l=NpNxn(WrAbKfp4@{}tSS>!hn)GY$rRn$A(g!FX!oQ6Y0%DVwsv#xq2OD`DrwOvwL0&Vj7YL#HdZP zJkvIl37Ss#Axqox4ANcC_1mh7dc&m>$ZU}4G7AYfg-%B^Sfd@0K3Q0&X)koHrHrt4av^E_CE5#pGC53S=L-ZS#JIP&`GTakzDK{45%bc(`) zraOMoRrvOx{eKpDl#6-Kr`Lx_8Tr*tJ47OmPOvxC&7<%2JlHmfGd4UWjn)8>`L%W_Ys5W1-+zq6t8BiZCcpd`0RkF`a$?6Q?xt zkuO&^XCDpu-8skr$ZHI5K%^8}nNB{{psMZ?vmK@H%l%RpY9n?wbr-EEJs>N*ks&3i z*ljtDMkAkN6Ya?WpwUDev(;`hpM*<6XbJ6;g2NWr#cZCzIgZ1~^O^X(6UZjyD@h9S z3^ep8fc#YeflUO_7(%}mAD#kfQ8e$fPIRc-4S_Z7CMqdyvpdP@5;dCSYk5U02$^J< zuS=Nmv3Cic=ia>x_L*y+ zxr%U~xjyvFMO>g8P3juB*s8!ZaXJ3Y=BjUBTIISs14*h7r!(aYkmQ1XhB@(MGMCM9 zrwy|pm?{b1d(ik2gX&TGzBkgk8_VR0kd;z6GiVFqAk z6k-OG8$1{b(>s|($@j`iV4IdcU{>NV^wwF)#7K-F9l#da2_vkx5@PEEz&b`=CQic^ zs3r?E9+)mwGTWvZCh~JPlNkFJB{8iQnHo7>$e9IAom50vShO;V$0Zmg%?Ms=A}EFj zX63Zw81*{aH*-pgw)l{u?g!RI+ju*!+rm0#3+teaD2Qn2^Y8TorWIu{An>dq*?3YR zmjSdQsCzmH1AgdKYJ4FfE!r0BPDtHi`$ax6W!MOm^LK-6!dLDI*^n6dcu(CzHlA}02ewdgFeDvE>C`max0Nu4 z@W!b*vKq|@V8D&@fDeLc?&dt;45ryR3cc~3_jewsmwYndLYTsg;e&>VeH;pEBwVhA zvcHw#vK-v2oQo{Ddp&o1Imi2GU8>cPKWj$i+!E9!&QYac6qR0WOtGr~hs2LK+X zEVVrL6bAW{2t~@!HjBto+XV$p9#fRh(GUtERV^f))Xc3k{rNDE8AI^3X$YrtQj@Ax z6Xj}B1*+tNqRSb3Ooo;vw18tIhH&22RwWFg#7GiBH4MoWL}LuT9Kp7z z3r2B#fVfCveV)Zog?9`FK0;pxYeII^3`hBf&`C<>pq%IBWDX=7RU?x~3}#`vrD&N9 z$IcxC!zmmhc-pgFdoromp_AsdoMDfLv~IX%yP7HpZ82#nV+*8CyA^F=+$|W6F2nw0 zXnV&w@DQ^5&eYQ2P*meNMo}QsQaiiO8gnpE2zb;_4aGVs2;sP|s|xM=yYn(WYwK`k0>T>TKxPO=mpR zmXXO1A6y^~wqqYXn%w9m8`J2S^l5T4(w!QPpWXH)ZV(5NT^EV-!~^#~4)OCR!GoJQ zi8r1k{(2kx^ZPyfvuA(y?9ZP4`Cs8(_w3J}{n@iW`}Y>>cNly2XV3m@-?Q!6pF6zN z+p|A=_Gi!j?Af3H2K#gL6u1(xKdv*hXI}JAmP2qYmCk>bjJ~Po@?@6WGAhRr-SeSP zHyck;& z8C>+7v9nXT39eA;Ofy@{AsH2XD{S~dXn(P8`Ww%Q^-Q!z4 zU2M=wXG7eb)!A)n z?nJH~-DyM8FOp|A0;F%{qD(Jib~kMxCjf4z1aA quFdAt)g~Ui8+{(xsvk!y_Mcddc$_Umx?9!vto&}p4!{HJ@4o>11pnLs literal 0 HcmV?d00001 diff --git a/v2/protolator/json.go b/v2/protolator/json.go new file mode 100644 index 0000000..5753a2b --- /dev/null +++ b/v2/protolator/json.go @@ -0,0 +1,479 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package protolator + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + + "github.com/hyperledger/fabric-config/v2/protolator/protoext" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// MostlyDeterministicMarshal is _NOT_ the function you are looking for. +// It causes protobuf serialization consistent within a single build. It +// does not guarantee that the serialization is deterministic across proto +// versions or proto implementations. It is useful for situations where +// the same process wants to compare binary messages for equality without +// needing to unmarshal first, but should not be used generally. +func MostlyDeterministicMarshal(msg proto.Message) ([]byte, error) { + return proto.MarshalOptions{ + Deterministic: true, + }.Marshal(msg) +} + +type protoFieldFactory interface { + // Handles should return whether or not this particular protoFieldFactory instance + // is responsible for the given proto's field + Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool + + // NewProtoField should create a backing protoField implementor + // Note that the fieldValue may represent nil, so the fieldType is also + // included (as reflecting the type of a nil value causes a panic) + NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) +} + +type protoField interface { + // Name returns the proto name of the field + Name() string + + // PopulateFrom mutates the underlying object, by taking the intermediate JSON representation + // and converting it into the proto representation, then assigning it to the backing value + // via reflection + PopulateFrom(source interface{}) error + + // PopulateTo does not mutate the underlying object, but instead converts it + // into the intermediate JSON representation (ie a struct -> map[string]interface{} + // or a slice of structs to []map[string]interface{} + PopulateTo() (interface{}, error) +} + +var ( + protoMsgType = reflect.TypeOf((*proto.Message)(nil)).Elem() + mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) + bytesType = reflect.TypeOf([]byte{}) +) + +type baseField struct { + msg proto.Message + name string + fType reflect.Type + vType reflect.Type + value reflect.Value +} + +func (bf *baseField) Name() string { + return bf.name +} + +type plainField struct { + baseField + populateFrom func(source interface{}, destType reflect.Type) (reflect.Value, error) + populateTo func(source reflect.Value) (interface{}, error) +} + +func (pf *plainField) PopulateFrom(source interface{}) error { + if source == nil { + return nil + } + + if !reflect.TypeOf(source).AssignableTo(pf.fType) { + return fmt.Errorf("expected field %s for message %T to be assignable from %v but was not. Is %T", pf.name, pf.msg, pf.fType, source) + } + value, err := pf.populateFrom(source, pf.vType) + if err != nil { + return fmt.Errorf("error in PopulateFrom for field %s for message %T: %s", pf.name, pf.msg, err) + } + pf.value.Set(value) + return nil +} + +func (pf *plainField) PopulateTo() (interface{}, error) { + if !pf.value.Type().AssignableTo(pf.vType) { + return nil, fmt.Errorf("expected field %s for message %T to be assignable to %v but was not. Got %T.", pf.name, pf.msg, pf.fType, pf.value) + } + + kind := pf.value.Type().Kind() + // Do not try to deeply encode nil fields, as without correct type info etc. they + // may return errors + if (kind == reflect.Ptr || kind == reflect.Slice || kind == reflect.Map) && pf.value.IsNil() { + return nil, nil + } + + value, err := pf.populateTo(pf.value) + if err != nil { + return nil, fmt.Errorf("error in PopulateTo for field %s for message %T: %s", pf.name, pf.msg, err) + } + return value, nil +} + +type mapField struct { + baseField + populateFrom func(key string, value interface{}, destType reflect.Type) (reflect.Value, error) + populateTo func(key string, value reflect.Value) (interface{}, error) +} + +func (mf *mapField) PopulateFrom(source interface{}) error { + tree, ok := source.(map[string]interface{}) + if !ok { + return fmt.Errorf("expected map field %s for message %T to be assignable from map[string]interface{} but was not. Got %T", mf.name, mf.msg, source) + } + + result := reflect.MakeMap(mf.vType) + + for k, v := range tree { + if !reflect.TypeOf(v).AssignableTo(mf.fType) { + return fmt.Errorf("expected map field %s value for %s for message %T to be assignable from %v but was not. Is %T", mf.name, k, mf.msg, mf.fType, v) + } + newValue, err := mf.populateFrom(k, v, mf.vType.Elem()) + if err != nil { + return fmt.Errorf("error in PopulateFrom for map field %s with key %s for message %T: %s", mf.name, k, mf.msg, err) + } + result.SetMapIndex(reflect.ValueOf(k), newValue) + } + + mf.value.Set(result) + return nil +} + +func (mf *mapField) PopulateTo() (interface{}, error) { + result := make(map[string]interface{}) + keys := mf.value.MapKeys() + for _, key := range keys { + k, ok := key.Interface().(string) + if !ok { + return nil, fmt.Errorf("expected map field %s for message %T to have string keys, but did not.", mf.name, mf.msg) + } + + subValue := mf.value.MapIndex(key) + kind := subValue.Type().Kind() + if (kind == reflect.Ptr || kind == reflect.Slice || kind == reflect.Map) && subValue.IsNil() { + continue + } + + if !subValue.Type().AssignableTo(mf.vType.Elem()) { + return nil, fmt.Errorf("expected map field %s with key %s for message %T to be assignable to %v but was not. Got %v.", mf.name, k, mf.msg, mf.vType.Elem(), subValue.Type()) + } + + value, err := mf.populateTo(k, subValue) + if err != nil { + return nil, fmt.Errorf("error in PopulateTo for map field %s and key %s for message %T: %s", mf.name, k, mf.msg, err) + } + result[k] = value + } + + return result, nil +} + +type sliceField struct { + baseField + populateTo func(i int, source reflect.Value) (interface{}, error) + populateFrom func(i int, source interface{}, destType reflect.Type) (reflect.Value, error) +} + +func (sf *sliceField) PopulateFrom(source interface{}) error { + slice, ok := source.([]interface{}) + if !ok { + return fmt.Errorf("expected slice field %s for message %T to be assignable from []interface{} but was not. Got %T", sf.name, sf.msg, source) + } + + result := reflect.MakeSlice(sf.vType, len(slice), len(slice)) + + for i, v := range slice { + if !reflect.TypeOf(v).AssignableTo(sf.fType) { + return fmt.Errorf("expected slice field %s value at index %d for message %T to be assignable from %v but was not. Is %T", sf.name, i, sf.msg, sf.fType, v) + } + subValue, err := sf.populateFrom(i, v, sf.vType.Elem()) + if err != nil { + return fmt.Errorf("error in PopulateFrom for slice field %s at index %d for message %T: %s", sf.name, i, sf.msg, err) + } + result.Index(i).Set(subValue) + } + + sf.value.Set(result) + return nil +} + +func (sf *sliceField) PopulateTo() (interface{}, error) { + result := make([]interface{}, sf.value.Len()) + for i := range result { + subValue := sf.value.Index(i) + kind := subValue.Type().Kind() + if (kind == reflect.Ptr || kind == reflect.Slice || kind == reflect.Map) && subValue.IsNil() { + continue + } + + if !subValue.Type().AssignableTo(sf.vType.Elem()) { + return nil, fmt.Errorf("expected slice field %s at index %d for message %T to be assignable to %v but was not. Got %v.", sf.name, i, sf.msg, sf.vType.Elem(), subValue.Type()) + } + + value, err := sf.populateTo(i, subValue) + if err != nil { + return nil, fmt.Errorf("error in PopulateTo for slice field %s at index %d for message %T: %s", sf.name, i, sf.msg, err) + } + result[i] = value + } + + return result, nil +} + +func stringInSlice(target string, slice []string) bool { + for _, name := range slice { + if name == target { + return true + } + } + return false +} + +// protoToJSON is a simple shortcut wrapper around the proto JSON marshaler +func protoToJSON(msg proto.Message) ([]byte, error) { + if reflect.ValueOf(msg).IsNil() { + panic("We're nil here") + } + m := protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: true, + Indent: " ", + UseProtoNames: true, + } + b, err := m.Marshal(msg) + if err != nil { + return nil, err + } + return b, nil +} + +func mapToProto(tree map[string]interface{}, msg proto.Message) error { + jsonOut, err := json.Marshal(tree) + if err != nil { + return err + } + + return protojson.Unmarshal(jsonOut, msg) +} + +// jsonToMap allocates a map[string]interface{}, unmarshals a JSON document into it +// and returns it, or error +func jsonToMap(marshaled []byte) (map[string]interface{}, error) { + tree := make(map[string]interface{}) + d := json.NewDecoder(bytes.NewReader(marshaled)) + d.UseNumber() + err := d.Decode(&tree) + if err != nil { + return nil, fmt.Errorf("error unmarshaling intermediate JSON: %s", err) + } + return tree, nil +} + +// The factory implementations, listed in order of most greedy to least. +// Factories listed lower, may depend on factories listed higher being +// evaluated first. +var fieldFactories = []protoFieldFactory{ + dynamicSliceFieldFactory{}, + dynamicMapFieldFactory{}, + dynamicFieldFactory{}, + variablyOpaqueSliceFieldFactory{}, + variablyOpaqueMapFieldFactory{}, + variablyOpaqueFieldFactory{}, + staticallyOpaqueSliceFieldFactory{}, + staticallyOpaqueMapFieldFactory{}, + staticallyOpaqueFieldFactory{}, + nestedSliceFieldFactory{}, + nestedMapFieldFactory{}, + nestedFieldFactory{}, +} + +func protoFields(msg proto.Message, uMsg proto.Message) ([]protoField, error) { + var result []protoField + + pmVal := reflect.ValueOf(uMsg) + if pmVal.Kind() != reflect.Ptr { + return nil, fmt.Errorf("expected proto.Message %T to be pointer kind", uMsg) + } + + if pmVal.IsNil() { + return nil, nil + } + + mVal := pmVal.Elem() + if mVal.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected proto.Message %T ptr value to be struct, was %v", uMsg, mVal.Kind()) + } + + iResult := make([][]protoField, len(fieldFactories)) + t := mVal.Type() + // TODO, this will skip oneof fields, this should be handled + // correctly at some point + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + fieldName := f.Name + tagField := f.Tag.Get("protobuf") + for _, s := range strings.Split(tagField, ",") { + if strings.HasPrefix(s, "name=") { + fieldName = s[len("name="):] + break + } + } + + fieldValue := mVal.FieldByName(f.Name) + fieldTypeStruct, ok := mVal.Type().FieldByName(f.Name) + if !ok { + return nil, fmt.Errorf("programming error: proto does not have field advertised by proto package") + } + fieldType := fieldTypeStruct.Type + + for j, factory := range fieldFactories { + if !factory.Handles(msg, fieldName, fieldType, fieldValue) { + continue + } + + field, err := factory.NewProtoField(msg, fieldName, fieldType, fieldValue) + if err != nil { + return nil, err + } + iResult[j] = append(iResult[j], field) + break + } + } + + // Loop over the collected fields in reverse order to collect them in + // correct dependency order as specified in fieldFactories + for i := len(iResult) - 1; i >= 0; i-- { + result = append(result, iResult[i]...) + } + + return result, nil +} + +func recursivelyCreateTreeFromMessage(msg proto.Message) (tree map[string]interface{}, err error) { + defer func() { + // Because this function is recursive, it's difficult to determine which level + // of the proto the error originated from, this wrapper leaves breadcrumbs for debugging + if err != nil { + err = fmt.Errorf("%T: %s", msg, err) + } + }() + + msg = protoext.Decorate(msg) + uMsg := msg + if decorated, ok := msg.(DecoratedProto); ok { + uMsg = decorated.Underlying() + } + + fields, err := protoFields(msg, uMsg) + if err != nil { + return nil, err + } + + jsonBytes, err := protoToJSON(uMsg) + if err != nil { + return nil, err + } + + tree, err = jsonToMap(jsonBytes) + if err != nil { + return nil, err + } + + for _, field := range fields { + if _, ok := tree[field.Name()]; !ok { + continue + } + delete(tree, field.Name()) + tree[field.Name()], err = field.PopulateTo() + if err != nil { + return nil, err + } + } + + return tree, nil +} + +// DeepMarshalJSON marshals msg to w as JSON, but instead of marshaling bytes fields which contain nested +// marshaled messages as base64 (like the standard proto encoding), these nested messages are remarshaled +// as the JSON representation of those messages. This is done so that the JSON representation is as non-binary +// and human readable as possible. +func DeepMarshalJSON(w io.Writer, msg proto.Message) error { + root, err := recursivelyCreateTreeFromMessage(msg) + if err != nil { + return err + } + + encoder := json.NewEncoder(w) + encoder.SetIndent("", "\t") + return encoder.Encode(root) +} + +func recursivelyPopulateMessageFromTree(tree map[string]interface{}, msg proto.Message) (err error) { + defer func() { + // Because this function is recursive, it's difficult to determine which level + // of the proto the error orginated from, this wrapper leaves breadcrumbs for debugging + if err != nil { + err = fmt.Errorf("%T: %s", msg, err) + } + }() + + msg = protoext.Decorate(msg) + uMsg := msg + if decorated, ok := msg.(DecoratedProto); ok { + uMsg = decorated.Underlying() + } + + fields, err := protoFields(msg, uMsg) + if err != nil { + return err + } + + specialFieldsMap := make(map[string]interface{}) + + for _, field := range fields { + specialField, ok := tree[field.Name()] + if !ok { + continue + } + specialFieldsMap[field.Name()] = specialField + delete(tree, field.Name()) + } + + if err = mapToProto(tree, uMsg); err != nil { + return err + } + + for _, field := range fields { + specialField, ok := specialFieldsMap[field.Name()] + if !ok { + continue + } + if err = field.PopulateFrom(specialField); err != nil { + return err + } + } + + return nil +} + +// DeepUnmarshalJSON takes JSON output as generated by DeepMarshalJSON and decodes it into msg +// This includes re-marshaling the expanded nested elements to binary form +func DeepUnmarshalJSON(r io.Reader, msg proto.Message) error { + b, err := io.ReadAll(r) + if err != nil { + return err + } + + root, err := jsonToMap(b) + if err != nil { + return err + } + + return recursivelyPopulateMessageFromTree(root, msg) +} diff --git a/v2/protolator/json_test.go b/v2/protolator/json_test.go new file mode 100644 index 0000000..d0f877b --- /dev/null +++ b/v2/protolator/json_test.go @@ -0,0 +1,311 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protolator + +import ( + "bytes" + "encoding/json" + "fmt" + "math" + "reflect" + "testing" + + "github.com/hyperledger/fabric-config/v2/protolator/testprotos" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +type testProtoPlainFieldFactory struct { + fromPrefix string + toPrefix string + fromError error + toError error +} + +func (tpff *testProtoPlainFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + return fieldName == "plain_field" +} + +func (tpff *testProtoPlainFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + return &plainField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: reflect.TypeOf(""), + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(source interface{}, destType reflect.Type) (reflect.Value, error) { + sourceAsString := source.(string) + return reflect.ValueOf(tpff.fromPrefix + sourceAsString), tpff.fromError + }, + populateTo: func(source reflect.Value) (interface{}, error) { + return tpff.toPrefix + source.Interface().(string), tpff.toError + }, + }, nil +} + +func TestSimpleMsgPlainField(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff} + + pfValue := "foo" + startMsg := &testprotos.SimpleMsg{ + PlainField: pfValue, + MapField: map[string]string{"1": "2"}, + SliceField: []string{"a", "b"}, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + + newMsg := &testprotos.SimpleMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(newMsg.MapField).To(Equal(startMsg.MapField)) + gt.Expect(newMsg.SliceField).To(Equal(startMsg.SliceField)) + gt.Expect(newMsg.PlainField).To(Equal(fromPrefix + toPrefix + startMsg.PlainField)) + + tppff.fromError = fmt.Errorf("Failing from intentionally") + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).To(MatchError("*testprotos.SimpleMsg: error in PopulateFrom for field plain_field for message *testprotos.SimpleMsg: Failing from intentionally")) + + tppff.toError = fmt.Errorf("Failing to intentionally") + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).To(MatchError("*testprotos.SimpleMsg: error in PopulateTo for field plain_field for message *testprotos.SimpleMsg: Failing to intentionally")) +} + +type testProtoMapFieldFactory struct { + fromPrefix string + toPrefix string + fromError error + toError error +} + +func (tpff *testProtoMapFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + return fieldName == "map_field" +} + +func (tpff *testProtoMapFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + return &mapField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: reflect.TypeOf(""), + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(key string, source interface{}, destType reflect.Type) (reflect.Value, error) { + sourceAsString := source.(string) + return reflect.ValueOf(tpff.fromPrefix + key + sourceAsString), tpff.fromError + }, + populateTo: func(key string, source reflect.Value) (interface{}, error) { + return tpff.toPrefix + key + source.Interface().(string), tpff.toError + }, + }, nil +} + +func TestSimpleMsgMapField(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tpmff := &testProtoMapFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + fieldFactories = []protoFieldFactory{tpmff} + + key := "foo" + value := "bar" + startMsg := &testprotos.SimpleMsg{ + PlainField: "1", + MapField: map[string]string{key: value}, + SliceField: []string{"a", "b"}, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + + newMsg := &testprotos.SimpleMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(newMsg.PlainField).To(Equal(startMsg.PlainField)) + gt.Expect(newMsg.SliceField).To(Equal(startMsg.SliceField)) + gt.Expect(newMsg.MapField[key]).To(Equal(fromPrefix + key + toPrefix + key + startMsg.MapField[key])) + + tpmff.fromError = fmt.Errorf("Failing from intentionally") + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).To(MatchError("*testprotos.SimpleMsg: error in PopulateFrom for map field map_field with key foo for message *testprotos.SimpleMsg: Failing from intentionally")) + + tpmff.toError = fmt.Errorf("Failing to intentionally") + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).To(MatchError("*testprotos.SimpleMsg: error in PopulateTo for map field map_field and key foo for message *testprotos.SimpleMsg: Failing to intentionally")) +} + +type testProtoSliceFieldFactory struct { + fromPrefix string + toPrefix string + fromError error + toError error +} + +func (tpff *testProtoSliceFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + return fieldName == "slice_field" +} + +func (tpff *testProtoSliceFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + return &sliceField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: reflect.TypeOf(""), + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(index int, source interface{}, destType reflect.Type) (reflect.Value, error) { + sourceAsString := source.(string) + return reflect.ValueOf(tpff.fromPrefix + fmt.Sprintf("%d", index) + sourceAsString), tpff.fromError + }, + populateTo: func(index int, source reflect.Value) (interface{}, error) { + return tpff.toPrefix + fmt.Sprintf("%d", index) + source.Interface().(string), tpff.toError + }, + }, nil +} + +func TestSimpleMsgSliceField(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tpsff := &testProtoSliceFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + fieldFactories = []protoFieldFactory{tpsff} + + value := "foo" + startMsg := &testprotos.SimpleMsg{ + PlainField: "1", + MapField: map[string]string{"a": "b"}, + SliceField: []string{value}, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + + newMsg := &testprotos.SimpleMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + + gt.Expect(newMsg.PlainField).To(Equal(startMsg.PlainField)) + gt.Expect(newMsg.MapField).To(Equal(startMsg.MapField)) + gt.Expect(newMsg.SliceField[0]).To(Equal(fromPrefix + "0" + toPrefix + "0" + startMsg.SliceField[0])) + + tpsff.fromError = fmt.Errorf("Failing from intentionally") + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).To(MatchError("*testprotos.SimpleMsg: error in PopulateFrom for slice field slice_field at index 0 for message *testprotos.SimpleMsg: Failing from intentionally")) + + tpsff.toError = fmt.Errorf("Failing to intentionally") + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).To(MatchError("*testprotos.SimpleMsg: error in PopulateTo for slice field slice_field at index 0 for message *testprotos.SimpleMsg: Failing to intentionally")) +} + +type testProtoFailFactory struct{} + +func (tpff testProtoFailFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + return true +} + +func (tpff testProtoFailFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + return nil, fmt.Errorf("Intentionally failing") +} + +func TestFailFactory(t *testing.T) { + gt := NewGomegaWithT(t) + + fieldFactories = []protoFieldFactory{&testProtoFailFactory{}} + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, &testprotos.SimpleMsg{}) + gt.Expect(err).To(MatchError("*testprotos.SimpleMsg: Intentionally failing")) +} + +func TestJSONUnmarshalMaxUint32(t *testing.T) { + gt := NewGomegaWithT(t) + + fieldName := "numField" + jsonString := fmt.Sprintf("{\"%s\":%d}", fieldName, math.MaxUint32) + m, err := jsonToMap([]byte(jsonString)) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(m[fieldName]).To(BeAssignableToTypeOf(json.Number(""))) +} + +func TestMostlyDeterministicMarshal(t *testing.T) { + gt := NewGomegaWithT(t) + + multiKeyMap := &testprotos.SimpleMsg{ + MapField: map[string]string{ + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + }, + } + + result, err := MostlyDeterministicMarshal(multiKeyMap) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(result).NotTo(BeNil()) + + // Golang map marshaling is non-deterministic by default, by marshaling + // the same message with an embedded map multiple times, we should + // detect a mismatch if the default behavior persists. Even with 3 map + // elements, there is usually a mismatch within 2-3 iterations, so 13 + // entries and 10 iterations seems like a reasonable check. + for i := 0; i < 10; i++ { + newResult, err := MostlyDeterministicMarshal(multiKeyMap) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(newResult).To(Equal(result)) + } + + unmarshaled := &testprotos.SimpleMsg{} + err = proto.Unmarshal(result, unmarshaled) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(proto.Equal(unmarshaled, multiKeyMap)).To(BeTrue()) +} diff --git a/v2/protolator/nested.go b/v2/protolator/nested.go new file mode 100644 index 0000000..babd035 --- /dev/null +++ b/v2/protolator/nested.go @@ -0,0 +1,111 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protolator + +import ( + "reflect" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func nestedFrom(value interface{}, destType reflect.Type) (reflect.Value, error) { + tree := value.(map[string]interface{}) // Safe, already checked + result := reflect.New(destType.Elem()) + nMsg := result.Interface().(proto.Message) // Safe, already checked + if err := recursivelyPopulateMessageFromTree(tree, nMsg); err != nil { + return reflect.Value{}, err + } + return result, nil +} + +func nestedTo(value reflect.Value) (interface{}, error) { + nMsg := value.Interface().(proto.Message) // Safe, already checked + return recursivelyCreateTreeFromMessage(nMsg) +} + +var timestampType = reflect.TypeOf(×tamppb.Timestamp{}) + +type nestedFieldFactory struct{} + +func (nff nestedFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + // Note, we skip recursing into the field if it is a proto native timestamp, because there is other custom marshaling this conflicts with + // this should probably be revisited more generally to prevent custom marshaling of 'well known messages' + return fieldType.Kind() == reflect.Ptr && fieldType.AssignableTo(protoMsgType) && !fieldType.AssignableTo(timestampType) +} + +func (nff nestedFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + return &plainField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: fieldType, + value: fieldValue, + }, + populateFrom: nestedFrom, + populateTo: nestedTo, + }, nil +} + +type nestedMapFieldFactory struct{} + +func (nmff nestedMapFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + return fieldType.Kind() == reflect.Map && fieldType.Elem().AssignableTo(protoMsgType) && !fieldType.Elem().AssignableTo(timestampType) && fieldType.Key().Kind() == reflect.String +} + +func (nmff nestedMapFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + return &mapField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(k string, v interface{}, dT reflect.Type) (reflect.Value, error) { + return nestedFrom(v, dT) + }, + populateTo: func(k string, v reflect.Value) (interface{}, error) { + return nestedTo(v) + }, + }, nil +} + +type nestedSliceFieldFactory struct{} + +func (nmff nestedSliceFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + return fieldType.Kind() == reflect.Slice && fieldType.Elem().AssignableTo(protoMsgType) && !fieldType.Elem().AssignableTo(timestampType) +} + +func (nmff nestedSliceFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + return &sliceField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(i int, v interface{}, dT reflect.Type) (reflect.Value, error) { + return nestedFrom(v, dT) + }, + populateTo: func(i int, v reflect.Value) (interface{}, error) { + return nestedTo(v) + }, + }, nil +} diff --git a/v2/protolator/nested_test.go b/v2/protolator/nested_test.go new file mode 100644 index 0000000..ba23c46 --- /dev/null +++ b/v2/protolator/nested_test.go @@ -0,0 +1,141 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protolator + +import ( + "bytes" + "testing" + + "github.com/hyperledger/fabric-config/v2/protolator/testprotos" + . "github.com/onsi/gomega" +) + +func TestPlainNestedMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff} + + pfValue := "foo" + startMsg := &testprotos.NestedMsg{ + PlainNestedField: &testprotos.SimpleMsg{ + PlainField: pfValue, + }, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.NestedMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(newMsg.PlainNestedField.PlainField).NotTo(Equal(fromPrefix + toPrefix + startMsg.PlainNestedField.PlainField)) + + fieldFactories = []protoFieldFactory{tppff, nestedFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(newMsg.PlainNestedField.PlainField).To(Equal(fromPrefix + toPrefix + startMsg.PlainNestedField.PlainField)) +} + +func TestMapNestedMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff} + + pfValue := "foo" + mapKey := "bar" + startMsg := &testprotos.NestedMsg{ + MapNestedField: map[string]*testprotos.SimpleMsg{ + mapKey: { + PlainField: pfValue, + }, + }, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.NestedMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(newMsg.MapNestedField[mapKey].PlainField).NotTo(Equal(fromPrefix + toPrefix + startMsg.MapNestedField[mapKey].PlainField)) + + fieldFactories = []protoFieldFactory{tppff, nestedMapFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(newMsg.MapNestedField[mapKey].PlainField).To(Equal(fromPrefix + toPrefix + startMsg.MapNestedField[mapKey].PlainField)) +} + +func TestSliceNestedMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff} + + pfValue := "foo" + startMsg := &testprotos.NestedMsg{ + SliceNestedField: []*testprotos.SimpleMsg{ + { + PlainField: pfValue, + }, + }, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.NestedMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(newMsg.SliceNestedField[0].PlainField).NotTo(Equal(fromPrefix + toPrefix + startMsg.SliceNestedField[0].PlainField)) + + fieldFactories = []protoFieldFactory{tppff, nestedSliceFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(newMsg.SliceNestedField[0].PlainField).To(Equal(fromPrefix + toPrefix + startMsg.SliceNestedField[0].PlainField)) +} diff --git a/v2/protolator/protoext/commonext/common.go b/v2/protolator/protoext/commonext/common.go new file mode 100644 index 0000000..c14750b --- /dev/null +++ b/v2/protolator/protoext/commonext/common.go @@ -0,0 +1,153 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package commonext + +import ( + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/common" + "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "github.com/hyperledger/fabric-protos-go-apiv2/peer" + "google.golang.org/protobuf/proto" +) + +type Envelope struct{ *common.Envelope } + +func (e *Envelope) Underlying() proto.Message { + return e.Envelope +} + +func (e *Envelope) StaticallyOpaqueFields() []string { + return []string{"payload"} +} + +func (e *Envelope) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != e.StaticallyOpaqueFields()[0] { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + return &common.Payload{}, nil +} + +type Payload struct{ *common.Payload } + +func (p *Payload) Underlying() proto.Message { + return p.Payload +} + +func (p *Payload) VariablyOpaqueFields() []string { + return []string{"data"} +} + +func (p *Payload) VariablyOpaqueFieldProto(name string) (proto.Message, error) { + if name != p.VariablyOpaqueFields()[0] { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + if p.Header == nil { + return nil, fmt.Errorf("cannot determine payload type when header is missing") + } + ch := &common.ChannelHeader{} + if err := proto.Unmarshal(p.Header.ChannelHeader, ch); err != nil { + return nil, fmt.Errorf("corrupt channel header: %s", err) + } + + switch ch.Type { + case int32(common.HeaderType_CONFIG): + return &common.ConfigEnvelope{}, nil + case int32(common.HeaderType_ORDERER_TRANSACTION): + return &common.Envelope{}, nil + case int32(common.HeaderType_CONFIG_UPDATE): + return &common.ConfigUpdateEnvelope{}, nil + case int32(common.HeaderType_MESSAGE): + // Only used by broadcast_msg sample client + return &common.ConfigValue{}, nil + case int32(common.HeaderType_ENDORSER_TRANSACTION): + return &peer.Transaction{}, nil + default: + return nil, fmt.Errorf("decoding type %v is unimplemented", ch.Type) + } +} + +type ChannelHeader struct{ *common.ChannelHeader } + +func (ch *ChannelHeader) Underlying() proto.Message { + return ch.ChannelHeader +} + +func (ch *ChannelHeader) VariablyOpaqueFields() []string { + return []string{"extension"} +} + +func (ch *ChannelHeader) VariablyOpaqueFieldProto(name string) (proto.Message, error) { + if name != "extension" { + return nil, fmt.Errorf("not an opaque field") + } + + switch ch.Type { + case int32(common.HeaderType_ENDORSER_TRANSACTION): + return &peer.ChaincodeHeaderExtension{}, nil + default: + return nil, fmt.Errorf("channel header extension only valid for endorser transactions") + } +} + +type Header struct{ *common.Header } + +func (h *Header) Underlying() proto.Message { + return h.Header +} + +func (h *Header) StaticallyOpaqueFields() []string { + return []string{"channel_header", "signature_header"} +} + +func (h *Header) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + switch name { + case h.StaticallyOpaqueFields()[0]: // channel_header + return &common.ChannelHeader{}, nil + case h.StaticallyOpaqueFields()[1]: // signature_header + return &common.SignatureHeader{}, nil + default: + return nil, fmt.Errorf("unknown header field: %s", name) + } +} + +type SignatureHeader struct{ *common.SignatureHeader } + +func (sh *SignatureHeader) Underlying() proto.Message { + return sh.SignatureHeader +} + +func (sh *SignatureHeader) StaticallyOpaqueFields() []string { + return []string{"creator"} +} + +func (sh *SignatureHeader) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + switch name { + case sh.StaticallyOpaqueFields()[0]: // creator + return &msp.SerializedIdentity{}, nil + default: + return nil, fmt.Errorf("unknown header field: %s", name) + } +} + +type BlockData struct{ *common.BlockData } + +func (bd *BlockData) Underlying() proto.Message { + return bd.BlockData +} + +func (bd *BlockData) StaticallyOpaqueSliceFields() []string { + return []string{"data"} +} + +func (bd *BlockData) StaticallyOpaqueSliceFieldProto(fieldName string, index int) (proto.Message, error) { + if fieldName != bd.StaticallyOpaqueSliceFields()[0] { + return nil, fmt.Errorf("not an opaque slice field: %s", fieldName) + } + + return &common.Envelope{}, nil +} diff --git a/v2/protolator/protoext/commonext/common_test.go b/v2/protolator/protoext/commonext/common_test.go new file mode 100644 index 0000000..02b3f3b --- /dev/null +++ b/v2/protolator/protoext/commonext/common_test.go @@ -0,0 +1,124 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package commonext + +import ( + "testing" + + "github.com/hyperledger/fabric-protos-go-apiv2/common" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func TestCommonProtolator(t *testing.T) { + gt := NewGomegaWithT(t) + + // Envelope + env := &Envelope{Envelope: &common.Envelope{}} + gt.Expect(env.StaticallyOpaqueFields()).To(Equal([]string{"payload"})) + msg, err := env.StaticallyOpaqueFieldProto("badproto") + gt.Expect(msg).To(BeNil()) + gt.Expect(err).To(MatchError("not a marshaled field: badproto")) + msg, err = env.StaticallyOpaqueFieldProto("payload") + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(msg).To(Equal(&common.Payload{})) + + // Payload + payload := &Payload{Payload: &common.Payload{}} + gt.Expect(payload.VariablyOpaqueFields()).To(Equal([]string{"data"})) + msg, err = payload.VariablyOpaqueFieldProto("badproto") + gt.Expect(msg).To(BeNil()) + gt.Expect(err).To(MatchError("not a marshaled field: badproto")) + msg, err = payload.VariablyOpaqueFieldProto("data") + gt.Expect(msg).To(BeNil()) + gt.Expect(err).To(MatchError("cannot determine payload type when header is missing")) + + payload = &Payload{ + Payload: &common.Payload{ + Header: &common.Header{ + ChannelHeader: []byte("badbytes"), + }, + }, + } + msg, err = payload.VariablyOpaqueFieldProto("data") + gt.Expect(msg).To(BeNil()) + gt.Expect(err.Error()).To(ContainSubstring("corrupt channel header: proto:")) + gt.Expect(err.Error()).To(ContainSubstring("cannot parse invalid wire-format data")) + + ch := &common.ChannelHeader{ + Type: int32(common.HeaderType_CONFIG), + } + chbytes, _ := proto.Marshal(ch) + payload = &Payload{ + Payload: &common.Payload{ + Header: &common.Header{ + ChannelHeader: chbytes, + }, + }, + } + msg, err = payload.VariablyOpaqueFieldProto("data") + gt.Expect(msg).To(Equal(&common.ConfigEnvelope{})) + gt.Expect(err).NotTo(HaveOccurred()) + + ch = &common.ChannelHeader{ + Type: int32(common.HeaderType_CONFIG_UPDATE), + } + chbytes, _ = proto.Marshal(ch) + payload = &Payload{ + Payload: &common.Payload{ + Header: &common.Header{ + ChannelHeader: chbytes, + }, + }, + } + msg, err = payload.VariablyOpaqueFieldProto("data") + gt.Expect(msg).To(Equal(&common.ConfigUpdateEnvelope{})) + gt.Expect(err).NotTo(HaveOccurred()) + + ch = &common.ChannelHeader{ + Type: int32(common.HeaderType_CHAINCODE_PACKAGE), + } + chbytes, _ = proto.Marshal(ch) + payload = &Payload{ + Payload: &common.Payload{ + Header: &common.Header{ + ChannelHeader: chbytes, + }, + }, + } + msg, err = payload.VariablyOpaqueFieldProto("data") + gt.Expect(msg).To(BeNil()) + gt.Expect(err).To(MatchError("decoding type 6 is unimplemented")) + + // Header + var header *Header + gt.Expect(header.StaticallyOpaqueFields()).To(Equal( + []string{"channel_header", "signature_header"})) + + msg, err = header.StaticallyOpaqueFieldProto("badproto") + gt.Expect(msg).To(BeNil()) + gt.Expect(err).To(MatchError("unknown header field: badproto")) + + msg, err = header.StaticallyOpaqueFieldProto("channel_header") + gt.Expect(msg).To(Equal(&common.ChannelHeader{})) + gt.Expect(err).NotTo(HaveOccurred()) + + msg, err = header.StaticallyOpaqueFieldProto("signature_header") + gt.Expect(msg).To(Equal(&common.SignatureHeader{})) + gt.Expect(err).NotTo(HaveOccurred()) + + // BlockData + var bd *BlockData + gt.Expect(bd.StaticallyOpaqueSliceFields()).To(Equal([]string{"data"})) + + msg, err = bd.StaticallyOpaqueSliceFieldProto("badslice", 0) + gt.Expect(msg).To(BeNil()) + gt.Expect(err).To(MatchError("not an opaque slice field: badslice")) + msg, err = bd.StaticallyOpaqueSliceFieldProto("data", 0) + gt.Expect(msg).To(Equal(&common.Envelope{})) + gt.Expect(err).NotTo(HaveOccurred()) +} diff --git a/v2/protolator/protoext/commonext/commonext_test.go b/v2/protolator/protoext/commonext/commonext_test.go new file mode 100644 index 0000000..0245a7d --- /dev/null +++ b/v2/protolator/protoext/commonext/commonext_test.go @@ -0,0 +1,50 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package commonext_test + +import ( + "github.com/hyperledger/fabric-config/v2/protolator" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/commonext" +) + +// ensure structs implement expected interfaces +var ( + _ protolator.StaticallyOpaqueFieldProto = &commonext.Envelope{} + _ protolator.DecoratedProto = &commonext.Envelope{} + _ protolator.VariablyOpaqueFieldProto = &commonext.Payload{} + _ protolator.DecoratedProto = &commonext.Payload{} + _ protolator.StaticallyOpaqueFieldProto = &commonext.Header{} + _ protolator.DecoratedProto = &commonext.Header{} + _ protolator.StaticallyOpaqueFieldProto = &commonext.SignatureHeader{} + _ protolator.DecoratedProto = &commonext.SignatureHeader{} + _ protolator.StaticallyOpaqueSliceFieldProto = &commonext.BlockData{} + _ protolator.DecoratedProto = &commonext.BlockData{} + + _ protolator.StaticallyOpaqueFieldProto = &commonext.ConfigUpdateEnvelope{} + _ protolator.DecoratedProto = &commonext.ConfigUpdateEnvelope{} + _ protolator.StaticallyOpaqueFieldProto = &commonext.ConfigSignature{} + _ protolator.DecoratedProto = &commonext.ConfigSignature{} + _ protolator.DynamicFieldProto = &commonext.Config{} + _ protolator.DecoratedProto = &commonext.Config{} + _ protolator.StaticallyOpaqueMapFieldProto = &commonext.ConfigUpdate{} + _ protolator.DecoratedProto = &commonext.ConfigUpdate{} + + _ protolator.DynamicMapFieldProto = &commonext.DynamicChannelGroup{} + _ protolator.DecoratedProto = &commonext.DynamicChannelGroup{} + _ protolator.StaticallyOpaqueFieldProto = &commonext.DynamicChannelConfigValue{} + _ protolator.DecoratedProto = &commonext.DynamicChannelConfigValue{} + _ protolator.DynamicMapFieldProto = &commonext.DynamicConsortiumsGroup{} + _ protolator.DecoratedProto = &commonext.DynamicConsortiumsGroup{} + _ protolator.DynamicMapFieldProto = &commonext.DynamicConsortiumGroup{} + _ protolator.DecoratedProto = &commonext.DynamicConsortiumGroup{} + _ protolator.VariablyOpaqueFieldProto = &commonext.DynamicConsortiumConfigValue{} + _ protolator.DecoratedProto = &commonext.DynamicConsortiumConfigValue{} + _ protolator.DynamicMapFieldProto = &commonext.DynamicConsortiumOrgGroup{} + _ protolator.DecoratedProto = &commonext.DynamicConsortiumOrgGroup{} + _ protolator.StaticallyOpaqueFieldProto = &commonext.DynamicConsortiumOrgConfigValue{} + _ protolator.DecoratedProto = &commonext.DynamicConsortiumOrgConfigValue{} +) diff --git a/v2/protolator/protoext/commonext/configtx.go b/v2/protolator/protoext/commonext/configtx.go new file mode 100644 index 0000000..6745eea --- /dev/null +++ b/v2/protolator/protoext/commonext/configtx.go @@ -0,0 +1,115 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package commonext + +import ( + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/common" + "google.golang.org/protobuf/proto" +) + +type ConfigUpdateEnvelope struct{ *common.ConfigUpdateEnvelope } + +func (cue *ConfigUpdateEnvelope) Underlying() proto.Message { + return cue.ConfigUpdateEnvelope +} + +func (cue *ConfigUpdateEnvelope) StaticallyOpaqueFields() []string { + return []string{"config_update"} +} + +func (cue *ConfigUpdateEnvelope) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != cue.StaticallyOpaqueFields()[0] { + return nil, fmt.Errorf("Not a marshaled field: %s", name) + } + return &common.ConfigUpdate{}, nil +} + +type ConfigSignature struct{ *common.ConfigSignature } + +func (cs *ConfigSignature) Underlying() proto.Message { + return cs.ConfigSignature +} + +func (cs *ConfigSignature) StaticallyOpaqueFields() []string { + return []string{"signature_header"} +} + +func (cs *ConfigSignature) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != cs.StaticallyOpaqueFields()[0] { + return nil, fmt.Errorf("Not a marshaled field: %s", name) + } + return &common.SignatureHeader{}, nil +} + +type Config struct{ *common.Config } + +func (c *Config) Underlying() proto.Message { + return c.Config +} + +func (c *Config) DynamicFields() []string { + return []string{"channel_group"} +} + +func (c *Config) DynamicFieldProto(name string, base proto.Message) (proto.Message, error) { + if name != c.DynamicFields()[0] { + return nil, fmt.Errorf("Not a dynamic field: %s", name) + } + + cg, ok := base.(*common.ConfigGroup) + if !ok { + return nil, fmt.Errorf("Config must embed a config group as its dynamic field") + } + + return &DynamicChannelGroup{ConfigGroup: cg}, nil +} + +// ConfigUpdateIsolatedDataTypes allows other proto packages to register types for +// the isolated_data field. This is necessary to break import cycles. +var ConfigUpdateIsolatedDataTypes = map[string]func(string) proto.Message{} + +type ConfigUpdate struct{ *common.ConfigUpdate } + +func (c *ConfigUpdate) Underlying() proto.Message { + return c.ConfigUpdate +} + +func (c *ConfigUpdate) StaticallyOpaqueMapFields() []string { + return []string{"isolated_data"} +} + +func (c *ConfigUpdate) StaticallyOpaqueMapFieldProto(name string, key string) (proto.Message, error) { + if name != c.StaticallyOpaqueMapFields()[0] { + return nil, fmt.Errorf("Not a statically opaque map field: %s", name) + } + + mf, ok := ConfigUpdateIsolatedDataTypes[key] + if !ok { + return nil, fmt.Errorf("Unknown map key: %s", key) + } + + return mf(key), nil +} + +func (c *ConfigUpdate) DynamicFields() []string { + return []string{"read_set", "write_set"} +} + +func (c *ConfigUpdate) DynamicFieldProto(name string, base proto.Message) (proto.Message, error) { + if name != c.DynamicFields()[0] && name != c.DynamicFields()[1] { + return nil, fmt.Errorf("Not a dynamic field: %s", name) + } + + cg, ok := base.(*common.ConfigGroup) + if !ok { + return nil, fmt.Errorf("Expected base to be *ConfigGroup, got %T", base) + } + + return &DynamicChannelGroup{ConfigGroup: cg}, nil +} diff --git a/v2/protolator/protoext/commonext/configuration.go b/v2/protolator/protoext/commonext/configuration.go new file mode 100644 index 0000000..e5df014 --- /dev/null +++ b/v2/protolator/protoext/commonext/configuration.go @@ -0,0 +1,242 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package commonext + +import ( + "fmt" + + "github.com/hyperledger/fabric-config/v2/protolator/protoext/ordererext" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/peerext" + "github.com/hyperledger/fabric-protos-go-apiv2/common" + "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "google.golang.org/protobuf/proto" +) + +type DynamicChannelGroup struct { + *common.ConfigGroup +} + +func (dcg *DynamicChannelGroup) DynamicMapFields() []string { + return []string{"values", "groups"} +} + +func (dcg *DynamicChannelGroup) Underlying() proto.Message { + return dcg.ConfigGroup +} + +func (dcg *DynamicChannelGroup) DynamicMapFieldProto(name string, key string, base proto.Message) (proto.Message, error) { + switch name { + case "groups": + cg, ok := base.(*common.ConfigGroup) + if !ok { + return nil, fmt.Errorf("ConfigGroup groups can only contain ConfigGroup messages") + } + + switch key { + case "Consortiums": + return &DynamicConsortiumsGroup{ConfigGroup: cg}, nil + case "Orderer": + return &ordererext.DynamicOrdererGroup{ConfigGroup: cg}, nil + case "Application": + return &peerext.DynamicApplicationGroup{ConfigGroup: cg}, nil + default: + return nil, fmt.Errorf("unknown channel group sub-group '%s'", key) + } + case "values": + cv, ok := base.(*common.ConfigValue) + if !ok { + return nil, fmt.Errorf("ConfigGroup values can only contain ConfigValue messages") + } + return &DynamicChannelConfigValue{ + ConfigValue: cv, + name: key, + }, nil + default: + return nil, fmt.Errorf("ConfigGroup does not have a dynamic field: %s", name) + } +} + +type DynamicChannelConfigValue struct { + *common.ConfigValue + name string +} + +func (dccv *DynamicChannelConfigValue) StaticallyOpaqueFields() []string { + return []string{"value"} +} + +func (dccv *DynamicChannelConfigValue) Underlying() proto.Message { + return dccv.ConfigValue +} + +func (dccv *DynamicChannelConfigValue) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != "value" { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + switch dccv.name { + case "HashingAlgorithm": + return &common.HashingAlgorithm{}, nil + case "BlockDataHashingStructure": + return &common.BlockDataHashingStructure{}, nil + case "OrdererAddresses": + return &common.OrdererAddresses{}, nil + case "Consortium": + return &common.Consortium{}, nil + case "Capabilities": + return &common.Capabilities{}, nil + default: + return nil, fmt.Errorf("unknown Channel ConfigValue name: %s", dccv.name) + } +} + +type DynamicConsortiumsGroup struct { + *common.ConfigGroup +} + +func (dcg *DynamicConsortiumsGroup) Underlying() proto.Message { + return dcg.ConfigGroup +} + +func (dcg *DynamicConsortiumsGroup) DynamicMapFields() []string { + return []string{"values", "groups"} +} + +func (dcg *DynamicConsortiumsGroup) DynamicMapFieldProto(name string, key string, base proto.Message) (proto.Message, error) { + switch name { + case "groups": + cg, ok := base.(*common.ConfigGroup) + if !ok { + return nil, fmt.Errorf("ConfigGroup groups can only contain ConfigGroup messages") + } + + return &DynamicConsortiumGroup{ + ConfigGroup: cg, + }, nil + case "values": + return nil, fmt.Errorf("Consortiums currently support no config values") + default: + return nil, fmt.Errorf("ConfigGroup does not have a dynamic field: %s", name) + } +} + +type DynamicConsortiumGroup struct { + *common.ConfigGroup +} + +func (dcg *DynamicConsortiumGroup) Underlying() proto.Message { + return dcg.ConfigGroup +} + +func (dcg *DynamicConsortiumGroup) DynamicMapFields() []string { + return []string{"values", "groups"} +} + +func (dcg *DynamicConsortiumGroup) DynamicMapFieldProto(name string, key string, base proto.Message) (proto.Message, error) { + switch name { + case "groups": + cg, ok := base.(*common.ConfigGroup) + if !ok { + return nil, fmt.Errorf("ConfigGroup groups can only contain ConfigGroup messages") + } + return &DynamicConsortiumOrgGroup{ + ConfigGroup: cg, + }, nil + case "values": + cv, ok := base.(*common.ConfigValue) + if !ok { + return nil, fmt.Errorf("ConfigGroup values can only contain ConfigValue messages") + } + + return &DynamicConsortiumConfigValue{ + ConfigValue: cv, + name: key, + }, nil + default: + return nil, fmt.Errorf("not a dynamic orderer map field: %s", name) + } +} + +type DynamicConsortiumConfigValue struct { + *common.ConfigValue + name string +} + +func (dccv *DynamicConsortiumConfigValue) Underlying() proto.Message { + return dccv.ConfigValue +} + +func (dccv *DynamicConsortiumConfigValue) VariablyOpaqueFields() []string { + return []string{"value"} +} + +func (dccv *DynamicConsortiumConfigValue) VariablyOpaqueFieldProto(name string) (proto.Message, error) { + if name != "value" { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + switch dccv.name { + case "ChannelCreationPolicy": + return &common.Policy{}, nil + default: + return nil, fmt.Errorf("unknown Consortium ConfigValue name: %s", dccv.name) + } +} + +type DynamicConsortiumOrgGroup struct { + *common.ConfigGroup +} + +func (dcg *DynamicConsortiumOrgGroup) Underlying() proto.Message { + return dcg.ConfigGroup +} + +func (dcg *DynamicConsortiumOrgGroup) DynamicMapFields() []string { + return []string{"groups", "values"} +} + +func (dcg *DynamicConsortiumOrgGroup) DynamicMapFieldProto(name string, key string, base proto.Message) (proto.Message, error) { + switch name { + case "groups": + return nil, fmt.Errorf("ConsortiumOrg groups do not support sub groups") + case "values": + cv, ok := base.(*common.ConfigValue) + if !ok { + return nil, fmt.Errorf("ConfigGroup values can only contain ConfigValue messages") + } + + return &DynamicConsortiumOrgConfigValue{ + ConfigValue: cv, + name: key, + }, nil + default: + return nil, fmt.Errorf("not a dynamic orderer map field: %s", name) + } +} + +type DynamicConsortiumOrgConfigValue struct { + *common.ConfigValue + name string +} + +func (dcocv *DynamicConsortiumOrgConfigValue) Underlying() proto.Message { + return dcocv.ConfigValue +} + +func (dcocv *DynamicConsortiumOrgConfigValue) StaticallyOpaqueFields() []string { + return []string{"value"} +} + +func (dcocv *DynamicConsortiumOrgConfigValue) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != "value" { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + switch dcocv.name { + case "MSP": + return &msp.MSPConfig{}, nil + default: + return nil, fmt.Errorf("unknown Consortium Org ConfigValue name: %s", dcocv.name) + } +} diff --git a/v2/protolator/protoext/commonext/policies.go b/v2/protolator/protoext/commonext/policies.go new file mode 100644 index 0000000..6ff1984 --- /dev/null +++ b/v2/protolator/protoext/commonext/policies.go @@ -0,0 +1,38 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package commonext + +import ( + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/common" + "google.golang.org/protobuf/proto" +) + +type Policy struct{ *common.Policy } + +func (p *Policy) Underlying() proto.Message { + return p.Policy +} + +func (p *Policy) VariablyOpaqueFields() []string { + return []string{"value"} +} + +func (p *Policy) VariablyOpaqueFieldProto(name string) (proto.Message, error) { + if name != p.VariablyOpaqueFields()[0] { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + switch p.Type { + case int32(common.Policy_SIGNATURE): + return &common.SignaturePolicyEnvelope{}, nil + case int32(common.Policy_IMPLICIT_META): + return &common.ImplicitMetaPolicy{}, nil + default: + return nil, fmt.Errorf("unable to decode policy type: %v", p.Type) + } +} diff --git a/v2/protolator/protoext/decorate.go b/v2/protolator/protoext/decorate.go new file mode 100644 index 0000000..f8f55db --- /dev/null +++ b/v2/protolator/protoext/decorate.go @@ -0,0 +1,77 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package protoext + +import ( + "github.com/hyperledger/fabric-config/v2/protolator/protoext/commonext" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/ledger/rwsetext" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/mspext" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/ordererext" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/peerext" + "github.com/hyperledger/fabric-protos-go-apiv2/common" + "github.com/hyperledger/fabric-protos-go-apiv2/ledger/rwset" + "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "github.com/hyperledger/fabric-protos-go-apiv2/orderer" + "github.com/hyperledger/fabric-protos-go-apiv2/peer" + "google.golang.org/protobuf/proto" +) + +// Docorate will add additional capabilities to some protobuf messages that +// enable proper JSON marshalling and unmarshalling in protolator. +func Decorate(msg proto.Message) proto.Message { + switch m := msg.(type) { + case *common.BlockData: + return &commonext.BlockData{BlockData: m} + case *common.Config: + return &commonext.Config{Config: m} + case *common.ConfigSignature: + return &commonext.ConfigSignature{ConfigSignature: m} + case *common.ConfigUpdate: + return &commonext.ConfigUpdate{ConfigUpdate: m} + case *common.ConfigUpdateEnvelope: + return &commonext.ConfigUpdateEnvelope{ConfigUpdateEnvelope: m} + case *common.Envelope: + return &commonext.Envelope{Envelope: m} + case *common.Header: + return &commonext.Header{Header: m} + case *common.ChannelHeader: + return &commonext.ChannelHeader{ChannelHeader: m} + case *common.SignatureHeader: + return &commonext.SignatureHeader{SignatureHeader: m} + case *common.Payload: + return &commonext.Payload{Payload: m} + case *common.Policy: + return &commonext.Policy{Policy: m} + + case *msp.MSPConfig: + return &mspext.MSPConfig{MSPConfig: m} + case *msp.MSPPrincipal: + return &mspext.MSPPrincipal{MSPPrincipal: m} + + case *orderer.ConsensusType: + return &ordererext.ConsensusType{ConsensusType: m} + + case *peer.ChaincodeAction: + return &peerext.ChaincodeAction{ChaincodeAction: m} + case *peer.ChaincodeActionPayload: + return &peerext.ChaincodeActionPayload{ChaincodeActionPayload: m} + case *peer.ChaincodeEndorsedAction: + return &peerext.ChaincodeEndorsedAction{ChaincodeEndorsedAction: m} + case *peer.ChaincodeProposalPayload: + return &peerext.ChaincodeProposalPayload{ChaincodeProposalPayload: m} + case *peer.ProposalResponsePayload: + return &peerext.ProposalResponsePayload{ProposalResponsePayload: m} + case *peer.TransactionAction: + return &peerext.TransactionAction{TransactionAction: m} + + case *rwset.TxReadWriteSet: + return &rwsetext.TxReadWriteSet{TxReadWriteSet: m} + + default: + return msg + } +} diff --git a/v2/protolator/protoext/decorate_test.go b/v2/protolator/protoext/decorate_test.go new file mode 100644 index 0000000..2a2b135 --- /dev/null +++ b/v2/protolator/protoext/decorate_test.go @@ -0,0 +1,311 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package protoext + +import ( + "testing" + + "google.golang.org/protobuf/protoadapt" + + "github.com/hyperledger/fabric-config/v2/protolator/protoext/commonext" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/ledger/rwsetext" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/mspext" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/ordererext" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/peerext" + "github.com/hyperledger/fabric-protos-go-apiv2/common" + "github.com/hyperledger/fabric-protos-go-apiv2/ledger/rwset" + "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "github.com/hyperledger/fabric-protos-go-apiv2/orderer" + "github.com/hyperledger/fabric-protos-go-apiv2/peer" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +type GenericProtoMessage struct { + GenericField string +} + +func (g *GenericProtoMessage) Reset() { + panic("not implemented") +} + +func (g *GenericProtoMessage) String() string { + return "not implemented" +} + +func (g *GenericProtoMessage) ProtoMessage() { + panic("not implemented") +} + +func TestDecorate(t *testing.T) { + tests := []struct { + testSpec string + msg proto.Message + expectedReturn proto.Message + }{ + { + testSpec: "common.BlockData", + msg: &common.BlockData{ + Data: [][]byte{ + []byte("data-bytes"), + }, + }, + expectedReturn: &commonext.BlockData{ + BlockData: &common.BlockData{ + Data: [][]byte{ + []byte("data-bytes"), + }, + }, + }, + }, + { + testSpec: "common.Config", + msg: &common.Config{ + Sequence: 5, + }, + expectedReturn: &commonext.Config{ + Config: &common.Config{ + Sequence: 5, + }, + }, + }, + { + testSpec: "common.ConfigSignature", + msg: &common.ConfigSignature{ + SignatureHeader: []byte("signature-header-bytes"), + }, + expectedReturn: &commonext.ConfigSignature{ + ConfigSignature: &common.ConfigSignature{ + SignatureHeader: []byte("signature-header-bytes"), + }, + }, + }, + { + testSpec: "common.ConfigUpdate", + msg: &common.ConfigUpdate{ + ChannelId: "testchannel", + }, + expectedReturn: &commonext.ConfigUpdate{ + ConfigUpdate: &common.ConfigUpdate{ + ChannelId: "testchannel", + }, + }, + }, + { + testSpec: "common.ConfigUpdateEnvelope", + msg: &common.ConfigUpdateEnvelope{ + ConfigUpdate: []byte("config-update-bytes"), + }, + expectedReturn: &commonext.ConfigUpdateEnvelope{ + ConfigUpdateEnvelope: &common.ConfigUpdateEnvelope{ + ConfigUpdate: []byte("config-update-bytes"), + }, + }, + }, + { + testSpec: "common.Envelope", + msg: &common.Envelope{ + Payload: []byte("payload-bytes"), + }, + expectedReturn: &commonext.Envelope{ + Envelope: &common.Envelope{ + Payload: []byte("payload-bytes"), + }, + }, + }, + { + testSpec: "common.Header", + msg: &common.Header{ + ChannelHeader: []byte("channel-header-bytes"), + }, + expectedReturn: &commonext.Header{ + Header: &common.Header{ + ChannelHeader: []byte("channel-header-bytes"), + }, + }, + }, + { + testSpec: "common.ChannelHeader", + msg: &common.ChannelHeader{ + Type: 5, + }, + expectedReturn: &commonext.ChannelHeader{ + ChannelHeader: &common.ChannelHeader{ + Type: 5, + }, + }, + }, + { + testSpec: "common.SignatureHeader", + msg: &common.SignatureHeader{ + Creator: []byte("creator-bytes"), + }, + expectedReturn: &commonext.SignatureHeader{ + SignatureHeader: &common.SignatureHeader{ + Creator: []byte("creator-bytes"), + }, + }, + }, + { + testSpec: "common.Payload", + msg: &common.Payload{ + Header: &common.Header{ChannelHeader: []byte("channel-header-bytes")}, + }, + expectedReturn: &commonext.Payload{ + Payload: &common.Payload{ + Header: &common.Header{ChannelHeader: []byte("channel-header-bytes")}, + }, + }, + }, + { + testSpec: "common.Policy", + msg: &common.Policy{ + Type: 5, + }, + expectedReturn: &commonext.Policy{ + Policy: &common.Policy{ + Type: 5, + }, + }, + }, + { + testSpec: "msp.MSPConfig", + msg: &msp.MSPConfig{ + Type: 5, + }, + expectedReturn: &mspext.MSPConfig{ + MSPConfig: &msp.MSPConfig{ + Type: 5, + }, + }, + }, + { + testSpec: "msp.MSPPrincipal", + msg: &msp.MSPPrincipal{ + Principal: []byte("principal-bytes"), + }, + expectedReturn: &mspext.MSPPrincipal{ + MSPPrincipal: &msp.MSPPrincipal{ + Principal: []byte("principal-bytes"), + }, + }, + }, + { + testSpec: "orderer.ConsensusType", + msg: &orderer.ConsensusType{ + Type: "etcdraft", + }, + expectedReturn: &ordererext.ConsensusType{ + ConsensusType: &orderer.ConsensusType{ + Type: "etcdraft", + }, + }, + }, + { + testSpec: "peer.ChaincodeAction", + msg: &peer.ChaincodeAction{ + Results: []byte("results-bytes"), + }, + expectedReturn: &peerext.ChaincodeAction{ + ChaincodeAction: &peer.ChaincodeAction{ + Results: []byte("results-bytes"), + }, + }, + }, + { + testSpec: "peer.ChaincodeActionPayload", + msg: &peer.ChaincodeActionPayload{ + ChaincodeProposalPayload: []byte("chaincode-proposal-payload-bytes"), + }, + expectedReturn: &peerext.ChaincodeActionPayload{ + ChaincodeActionPayload: &peer.ChaincodeActionPayload{ + ChaincodeProposalPayload: []byte("chaincode-proposal-payload-bytes"), + }, + }, + }, + { + testSpec: "peer.ChaincodeEndorsedAction", + msg: &peer.ChaincodeEndorsedAction{ + ProposalResponsePayload: []byte("proposal-response-payload-bytes"), + }, + expectedReturn: &peerext.ChaincodeEndorsedAction{ + ChaincodeEndorsedAction: &peer.ChaincodeEndorsedAction{ + ProposalResponsePayload: []byte("proposal-response-payload-bytes"), + }, + }, + }, + { + testSpec: "peer.ChaincodeProposalPayload", + msg: &peer.ChaincodeProposalPayload{ + Input: []byte("input-bytes"), + }, + expectedReturn: &peerext.ChaincodeProposalPayload{ + ChaincodeProposalPayload: &peer.ChaincodeProposalPayload{ + Input: []byte("input-bytes"), + }, + }, + }, + { + testSpec: "peer.ProposalResponsePayload", + msg: &peer.ProposalResponsePayload{ + ProposalHash: []byte("proposal-hash-bytes"), + }, + expectedReturn: &peerext.ProposalResponsePayload{ + ProposalResponsePayload: &peer.ProposalResponsePayload{ + ProposalHash: []byte("proposal-hash-bytes"), + }, + }, + }, + { + testSpec: "peer.TransactionAction", + msg: &peer.TransactionAction{ + Header: []byte("header-bytes"), + }, + expectedReturn: &peerext.TransactionAction{ + TransactionAction: &peer.TransactionAction{ + Header: []byte("header-bytes"), + }, + }, + }, + { + testSpec: "rwset.TxReadWriteSet", + msg: &rwset.TxReadWriteSet{ + NsRwset: []*rwset.NsReadWriteSet{ + { + Namespace: "namespace", + }, + }, + }, + expectedReturn: &rwsetext.TxReadWriteSet{ + TxReadWriteSet: &rwset.TxReadWriteSet{ + NsRwset: []*rwset.NsReadWriteSet{ + { + Namespace: "namespace", + }, + }, + }, + }, + }, + { + testSpec: "default", + msg: protoadapt.MessageV2Of(&GenericProtoMessage{ + GenericField: "test", + }), + expectedReturn: protoadapt.MessageV2Of(&GenericProtoMessage{ + GenericField: "test", + }), + }, + } + + for _, tt := range tests { + t.Run(tt.testSpec, func(t *testing.T) { + gt := NewGomegaWithT(t) + decoratedMsg := Decorate(tt.msg) + gt.Expect(proto.Equal(decoratedMsg, tt.expectedReturn)).To(BeTrue()) + }) + } +} diff --git a/v2/protolator/protoext/ledger/rwsetext/rwset.go b/v2/protolator/protoext/ledger/rwsetext/rwset.go new file mode 100644 index 0000000..f70f2c0 --- /dev/null +++ b/v2/protolator/protoext/ledger/rwsetext/rwset.go @@ -0,0 +1,125 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package rwsetext + +import ( + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/ledger/rwset" + "github.com/hyperledger/fabric-protos-go-apiv2/ledger/rwset/kvrwset" + "google.golang.org/protobuf/proto" +) + +type TxReadWriteSet struct{ *rwset.TxReadWriteSet } + +func (txrws *TxReadWriteSet) Underlying() proto.Message { + return txrws.TxReadWriteSet +} + +func (txrws *TxReadWriteSet) DynamicSliceFields() []string { + if txrws.DataModel != rwset.TxReadWriteSet_KV { + // We only know how to handle TxReadWriteSet_KV types + return []string{} + } + + return []string{"ns_rwset"} +} + +func (txrws *TxReadWriteSet) DynamicSliceFieldProto(name string, index int, base proto.Message) (proto.Message, error) { + if name != txrws.DynamicSliceFields()[0] { + return nil, fmt.Errorf("Not a dynamic field: %s", name) + } + + nsrw, ok := base.(*rwset.NsReadWriteSet) + if !ok { + return nil, fmt.Errorf("TxReadWriteSet must embed a NsReadWriteSet its dynamic field") + } + + return &DynamicNsReadWriteSet{ + NsReadWriteSet: nsrw, + DataModel: txrws.DataModel, + }, nil +} + +type DynamicNsReadWriteSet struct { + *rwset.NsReadWriteSet + DataModel rwset.TxReadWriteSet_DataModel +} + +func (dnrws *DynamicNsReadWriteSet) Underlying() proto.Message { + return dnrws.NsReadWriteSet +} + +func (dnrws *DynamicNsReadWriteSet) StaticallyOpaqueFields() []string { + return []string{"rwset"} +} + +func (dnrws *DynamicNsReadWriteSet) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + switch name { + case "rwset": + switch dnrws.DataModel { + case rwset.TxReadWriteSet_KV: + return &kvrwset.KVRWSet{}, nil + default: + return nil, fmt.Errorf("unknown data model type: %v", dnrws.DataModel) + } + default: + return nil, fmt.Errorf("not a marshaled field: %s", name) + } +} + +func (dnrws *DynamicNsReadWriteSet) DynamicSliceFields() []string { + if dnrws.DataModel != rwset.TxReadWriteSet_KV { + // We only know how to handle TxReadWriteSet_KV types + return []string{} + } + + return []string{"collection_hashed_rwset"} +} + +func (dnrws *DynamicNsReadWriteSet) DynamicSliceFieldProto(name string, index int, base proto.Message) (proto.Message, error) { + if name != dnrws.DynamicSliceFields()[0] { + return nil, fmt.Errorf("Not a dynamic field: %s", name) + } + + chrws, ok := base.(*rwset.CollectionHashedReadWriteSet) + if !ok { + return nil, fmt.Errorf("NsReadWriteSet must embed a *CollectionHashedReadWriteSet its dynamic field") + } + + return &DynamicCollectionHashedReadWriteSet{ + CollectionHashedReadWriteSet: chrws, + DataModel: dnrws.DataModel, + }, nil +} + +type DynamicCollectionHashedReadWriteSet struct { + *rwset.CollectionHashedReadWriteSet + DataModel rwset.TxReadWriteSet_DataModel +} + +func (dchrws *DynamicCollectionHashedReadWriteSet) Underlying() proto.Message { + return dchrws.CollectionHashedReadWriteSet +} + +func (dchrws *DynamicCollectionHashedReadWriteSet) StaticallyOpaqueFields() []string { + return []string{"rwset"} +} + +func (dchrws *DynamicCollectionHashedReadWriteSet) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + switch name { + case "rwset": + switch dchrws.DataModel { + case rwset.TxReadWriteSet_KV: + return &kvrwset.HashedRWSet{}, nil + default: + return nil, fmt.Errorf("unknown data model type: %v", dchrws.DataModel) + } + default: + return nil, fmt.Errorf("not a marshaled field: %s", name) + } +} diff --git a/v2/protolator/protoext/ledger/rwsetext/rwsetext_test.go b/v2/protolator/protoext/ledger/rwsetext/rwsetext_test.go new file mode 100644 index 0000000..2c5aed9 --- /dev/null +++ b/v2/protolator/protoext/ledger/rwsetext/rwsetext_test.go @@ -0,0 +1,22 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package rwsetext_test + +import ( + "github.com/hyperledger/fabric-config/v2/protolator" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/ledger/rwsetext" +) + +// ensure structs implement expected interfaces +var ( + _ protolator.DynamicSliceFieldProto = &rwsetext.TxReadWriteSet{} + _ protolator.DecoratedProto = &rwsetext.TxReadWriteSet{} + _ protolator.StaticallyOpaqueFieldProto = &rwsetext.DynamicNsReadWriteSet{} + _ protolator.DecoratedProto = &rwsetext.DynamicNsReadWriteSet{} + _ protolator.StaticallyOpaqueFieldProto = &rwsetext.DynamicCollectionHashedReadWriteSet{} + _ protolator.DecoratedProto = &rwsetext.DynamicCollectionHashedReadWriteSet{} +) diff --git a/v2/protolator/protoext/mspext/msp_config.go b/v2/protolator/protoext/mspext/msp_config.go new file mode 100644 index 0000000..2bbe661 --- /dev/null +++ b/v2/protolator/protoext/mspext/msp_config.go @@ -0,0 +1,38 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package mspext + +import ( + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "google.golang.org/protobuf/proto" +) + +type MSPConfig struct{ *msp.MSPConfig } + +func (mc *MSPConfig) Underlying() proto.Message { + return mc.MSPConfig +} + +func (mc *MSPConfig) VariablyOpaqueFields() []string { + return []string{"config"} +} + +func (mc *MSPConfig) VariablyOpaqueFieldProto(name string) (proto.Message, error) { + if name != mc.VariablyOpaqueFields()[0] { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + switch mc.Type { + case 0: + return &msp.FabricMSPConfig{}, nil + case 1: + return &msp.IdemixMSPConfig{}, nil + default: + return nil, fmt.Errorf("unable to decode MSP type: %v", mc.Type) + } +} diff --git a/v2/protolator/protoext/mspext/msp_principal.go b/v2/protolator/protoext/mspext/msp_principal.go new file mode 100644 index 0000000..43cb1d2 --- /dev/null +++ b/v2/protolator/protoext/mspext/msp_principal.go @@ -0,0 +1,40 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package mspext + +import ( + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "google.golang.org/protobuf/proto" +) + +type MSPPrincipal struct{ *msp.MSPPrincipal } + +func (mp *MSPPrincipal) Underlying() proto.Message { + return mp.MSPPrincipal +} + +func (mp *MSPPrincipal) VariablyOpaqueFields() []string { + return []string{"principal"} +} + +func (mp *MSPPrincipal) VariablyOpaqueFieldProto(name string) (proto.Message, error) { + if name != mp.VariablyOpaqueFields()[0] { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + switch mp.PrincipalClassification { + case msp.MSPPrincipal_ROLE: + return &msp.MSPRole{}, nil + case msp.MSPPrincipal_ORGANIZATION_UNIT: + return &msp.OrganizationUnit{}, nil + case msp.MSPPrincipal_IDENTITY: + return &msp.SerializedIdentity{}, nil + default: + return nil, fmt.Errorf("unable to decode MSP type: %v", mp.PrincipalClassification) + } +} diff --git a/v2/protolator/protoext/mspext/mspext_test.go b/v2/protolator/protoext/mspext/mspext_test.go new file mode 100644 index 0000000..aab1261 --- /dev/null +++ b/v2/protolator/protoext/mspext/mspext_test.go @@ -0,0 +1,21 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package mspext_test + +import ( + "github.com/hyperledger/fabric-config/v2/protolator" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/mspext" +) + +// ensure structs implement expected interfaces +var ( + _ protolator.VariablyOpaqueFieldProto = &mspext.MSPConfig{} + _ protolator.DecoratedProto = &mspext.MSPConfig{} + + _ protolator.VariablyOpaqueFieldProto = &mspext.MSPPrincipal{} + _ protolator.DecoratedProto = &mspext.MSPPrincipal{} +) diff --git a/v2/protolator/protoext/ordererext/configuration.go b/v2/protolator/protoext/ordererext/configuration.go new file mode 100644 index 0000000..4aa21d3 --- /dev/null +++ b/v2/protolator/protoext/ordererext/configuration.go @@ -0,0 +1,184 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package ordererext + +import ( + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/common" + "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "github.com/hyperledger/fabric-protos-go-apiv2/orderer" + "github.com/hyperledger/fabric-protos-go-apiv2/orderer/etcdraft" + "github.com/hyperledger/fabric-protos-go-apiv2/orderer/smartbft" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/emptypb" +) + +type DynamicOrdererGroup struct { + *common.ConfigGroup +} + +func (dcg *DynamicOrdererGroup) Underlying() proto.Message { + return dcg.ConfigGroup +} + +func (dcg *DynamicOrdererGroup) DynamicMapFields() []string { + return []string{"values", "groups"} +} + +func (dcg *DynamicOrdererGroup) DynamicMapFieldProto(name string, key string, base proto.Message) (proto.Message, error) { + switch name { + case "groups": + cg, ok := base.(*common.ConfigGroup) + if !ok { + return nil, fmt.Errorf("ConfigGroup groups can only contain ConfigGroup messages") + } + + return &DynamicOrdererOrgGroup{ + ConfigGroup: cg, + }, nil + case "values": + cv, ok := base.(*common.ConfigValue) + if !ok { + return nil, fmt.Errorf("ConfigGroup values can only contain ConfigValue messages") + } + return &DynamicOrdererConfigValue{ + ConfigValue: cv, + name: key, + }, nil + default: + return nil, fmt.Errorf("ConfigGroup does not have a dynamic field: %s", name) + } +} + +type ConsensusTypeMetadataFactory interface { + NewMessage() proto.Message +} + +// ConsensuTypeMetadataMap should have consensus implementations register their metadata message factories +var ConsensusTypeMetadataMap = map[string]ConsensusTypeMetadataFactory{} + +type ConsensusType struct { + *orderer.ConsensusType +} + +func (ct *ConsensusType) Underlying() proto.Message { + return ct.ConsensusType +} + +func (ct *ConsensusType) VariablyOpaqueFields() []string { + return []string{"metadata"} +} + +func (ct *ConsensusType) VariablyOpaqueFieldProto(name string) (proto.Message, error) { + if name != "metadata" { + return nil, fmt.Errorf("not a valid opaque field: %s", name) + } + switch ct.Type { + case "etcdraft": + return &etcdraft.ConfigMetadata{}, nil + case "BFT": + return &smartbft.Options{}, nil + default: + return &emptypb.Empty{}, nil + } +} + +type DynamicOrdererOrgGroup struct { + *common.ConfigGroup +} + +func (dcg *DynamicOrdererOrgGroup) Underlying() proto.Message { + return dcg.ConfigGroup +} + +func (dcg *DynamicOrdererOrgGroup) DynamicMapFields() []string { + return []string{"groups", "values"} +} + +func (dcg *DynamicOrdererOrgGroup) DynamicMapFieldProto(name string, key string, base proto.Message) (proto.Message, error) { + switch name { + case "groups": + return nil, fmt.Errorf("the orderer orgs do not support sub-groups") + case "values": + cv, ok := base.(*common.ConfigValue) + if !ok { + return nil, fmt.Errorf("ConfigGroup values can only contain ConfigValue messages") + } + + return &DynamicOrdererOrgConfigValue{ + ConfigValue: cv, + name: key, + }, nil + default: + return nil, fmt.Errorf("not a dynamic orderer map field: %s", name) + } +} + +type DynamicOrdererConfigValue struct { + *common.ConfigValue + name string +} + +func (docv *DynamicOrdererConfigValue) Underlying() proto.Message { + return docv.ConfigValue +} + +func (docv *DynamicOrdererConfigValue) StaticallyOpaqueFields() []string { + return []string{"value"} +} + +func (docv *DynamicOrdererConfigValue) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != "value" { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + switch docv.name { + case "ConsensusType": + return &orderer.ConsensusType{}, nil + case "BatchSize": + return &orderer.BatchSize{}, nil + case "BatchTimeout": + return &orderer.BatchTimeout{}, nil + case "KafkaBrokers": + return &orderer.KafkaBrokers{}, nil + case "ChannelRestrictions": + return &orderer.ChannelRestrictions{}, nil + case "Capabilities": + return &common.Capabilities{}, nil + case "Orderers": + return &common.Orderers{}, nil + default: + return nil, fmt.Errorf("unknown Orderer ConfigValue name: %s", docv.name) + } +} + +type DynamicOrdererOrgConfigValue struct { + *common.ConfigValue + name string +} + +func (doocv *DynamicOrdererOrgConfigValue) Underlying() proto.Message { + return doocv.ConfigValue +} + +func (doocv *DynamicOrdererOrgConfigValue) StaticallyOpaqueFields() []string { + return []string{"value"} +} + +func (doocv *DynamicOrdererOrgConfigValue) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != "value" { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + switch doocv.name { + case "MSP": + return &msp.MSPConfig{}, nil + case "Endpoints": + return &common.OrdererAddresses{}, nil + default: + return nil, fmt.Errorf("unknown Orderer Org ConfigValue name: %s", doocv.name) + } +} diff --git a/v2/protolator/protoext/ordererext/ordererext_test.go b/v2/protolator/protoext/ordererext/ordererext_test.go new file mode 100644 index 0000000..2e27971 --- /dev/null +++ b/v2/protolator/protoext/ordererext/ordererext_test.go @@ -0,0 +1,26 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package ordererext_test + +import ( + "github.com/hyperledger/fabric-config/v2/protolator" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/ordererext" +) + +// ensure structs implement expected interfaces +var ( + _ protolator.DynamicMapFieldProto = &ordererext.DynamicOrdererGroup{} + _ protolator.DecoratedProto = &ordererext.DynamicOrdererGroup{} + _ protolator.VariablyOpaqueFieldProto = &ordererext.ConsensusType{} + _ protolator.DecoratedProto = &ordererext.ConsensusType{} + _ protolator.DynamicMapFieldProto = &ordererext.DynamicOrdererOrgGroup{} + _ protolator.DecoratedProto = &ordererext.DynamicOrdererOrgGroup{} + _ protolator.StaticallyOpaqueFieldProto = &ordererext.DynamicOrdererConfigValue{} + _ protolator.DecoratedProto = &ordererext.DynamicOrdererConfigValue{} + _ protolator.StaticallyOpaqueFieldProto = &ordererext.DynamicOrdererOrgConfigValue{} + _ protolator.DecoratedProto = &ordererext.DynamicOrdererOrgConfigValue{} +) diff --git a/v2/protolator/protoext/peerext/configuration.go b/v2/protolator/protoext/peerext/configuration.go new file mode 100644 index 0000000..a7b09e1 --- /dev/null +++ b/v2/protolator/protoext/peerext/configuration.go @@ -0,0 +1,138 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package peerext + +import ( + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/common" + "github.com/hyperledger/fabric-protos-go-apiv2/msp" + "github.com/hyperledger/fabric-protos-go-apiv2/peer" + "google.golang.org/protobuf/proto" +) + +type DynamicApplicationGroup struct { + *common.ConfigGroup +} + +func (dag *DynamicApplicationGroup) Underlying() proto.Message { + return dag.ConfigGroup +} + +func (dag *DynamicApplicationGroup) DynamicMapFields() []string { + return []string{"groups", "values"} +} + +func (dag *DynamicApplicationGroup) DynamicMapFieldProto(name string, key string, base proto.Message) (proto.Message, error) { + switch name { + case "groups": + cg, ok := base.(*common.ConfigGroup) + if !ok { + return nil, fmt.Errorf("ConfigGroup groups can only contain ConfigGroup messages") + } + + return &DynamicApplicationOrgGroup{ + ConfigGroup: cg, + }, nil + case "values": + cv, ok := base.(*common.ConfigValue) + if !ok { + return nil, fmt.Errorf("ConfigGroup values can only contain ConfigValue messages") + } + return &DynamicApplicationConfigValue{ + ConfigValue: cv, + name: key, + }, nil + default: + return nil, fmt.Errorf("ConfigGroup does not have a dynamic field: %s", name) + } +} + +type DynamicApplicationOrgGroup struct { + *common.ConfigGroup +} + +func (dag *DynamicApplicationOrgGroup) Underlying() proto.Message { + return dag.ConfigGroup +} + +func (dag *DynamicApplicationOrgGroup) DynamicMapFields() []string { + return []string{"groups", "values"} +} + +func (dag *DynamicApplicationOrgGroup) DynamicMapFieldProto(name string, key string, base proto.Message) (proto.Message, error) { + switch name { + case "groups": + return nil, fmt.Errorf("The application orgs do not support sub-groups") + case "values": + cv, ok := base.(*common.ConfigValue) + if !ok { + return nil, fmt.Errorf("ConfigGroup values can only contain ConfigValue messages") + } + + return &DynamicApplicationOrgConfigValue{ + ConfigValue: cv, + name: key, + }, nil + default: + return nil, fmt.Errorf("Not a dynamic application map field: %s", name) + } +} + +type DynamicApplicationConfigValue struct { + *common.ConfigValue + name string +} + +func (ccv *DynamicApplicationConfigValue) Underlying() proto.Message { + return ccv.ConfigValue +} + +func (ccv *DynamicApplicationConfigValue) StaticallyOpaqueFields() []string { + return []string{"value"} +} + +func (ccv *DynamicApplicationConfigValue) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != "value" { + return nil, fmt.Errorf("Not a marshaled field: %s", name) + } + switch ccv.name { + case "Capabilities": + return &common.Capabilities{}, nil + case "ACLs": + return &peer.ACLs{}, nil + default: + return nil, fmt.Errorf("Unknown Application ConfigValue name: %s", ccv.name) + } +} + +type DynamicApplicationOrgConfigValue struct { + *common.ConfigValue + name string +} + +func (daocv *DynamicApplicationOrgConfigValue) Underlying() proto.Message { + return daocv.ConfigValue +} + +func (daocv *DynamicApplicationOrgConfigValue) StaticallyOpaqueFields() []string { + return []string{"value"} +} + +func (daocv *DynamicApplicationOrgConfigValue) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != "value" { + return nil, fmt.Errorf("Not a marshaled field: %s", name) + } + switch daocv.name { + case "MSP": + return &msp.MSPConfig{}, nil + case "AnchorPeers": + return &peer.AnchorPeers{}, nil + default: + return nil, fmt.Errorf("Unknown Application Org ConfigValue name: %s", daocv.name) + } +} diff --git a/v2/protolator/protoext/peerext/peerext_test.go b/v2/protolator/protoext/peerext/peerext_test.go new file mode 100644 index 0000000..00a02cc --- /dev/null +++ b/v2/protolator/protoext/peerext/peerext_test.go @@ -0,0 +1,39 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package peerext_test + +import ( + "github.com/hyperledger/fabric-config/v2/protolator" + "github.com/hyperledger/fabric-config/v2/protolator/protoext/peerext" +) + +// ensure structs implement expected interfaces +var ( + _ protolator.DynamicMapFieldProto = &peerext.DynamicApplicationGroup{} + _ protolator.DecoratedProto = &peerext.DynamicApplicationGroup{} + _ protolator.DynamicMapFieldProto = &peerext.DynamicApplicationOrgGroup{} + _ protolator.DecoratedProto = &peerext.DynamicApplicationOrgGroup{} + _ protolator.StaticallyOpaqueFieldProto = &peerext.DynamicApplicationConfigValue{} + _ protolator.DecoratedProto = &peerext.DynamicApplicationConfigValue{} + _ protolator.StaticallyOpaqueFieldProto = &peerext.DynamicApplicationOrgConfigValue{} + _ protolator.DecoratedProto = &peerext.DynamicApplicationOrgConfigValue{} + + _ protolator.StaticallyOpaqueFieldProto = &peerext.ChaincodeProposalPayload{} + _ protolator.DecoratedProto = &peerext.ChaincodeProposalPayload{} + _ protolator.StaticallyOpaqueFieldProto = &peerext.ChaincodeAction{} + _ protolator.DecoratedProto = &peerext.ChaincodeAction{} + + _ protolator.StaticallyOpaqueFieldProto = &peerext.ProposalResponsePayload{} + _ protolator.DecoratedProto = &peerext.ProposalResponsePayload{} + + _ protolator.StaticallyOpaqueFieldProto = &peerext.TransactionAction{} + _ protolator.DecoratedProto = &peerext.TransactionAction{} + _ protolator.StaticallyOpaqueFieldProto = &peerext.ChaincodeActionPayload{} + _ protolator.DecoratedProto = &peerext.ChaincodeActionPayload{} + _ protolator.StaticallyOpaqueFieldProto = &peerext.ChaincodeEndorsedAction{} + _ protolator.DecoratedProto = &peerext.ChaincodeEndorsedAction{} +) diff --git a/v2/protolator/protoext/peerext/proposal.go b/v2/protolator/protoext/peerext/proposal.go new file mode 100644 index 0000000..e9ca481 --- /dev/null +++ b/v2/protolator/protoext/peerext/proposal.go @@ -0,0 +1,57 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package peerext + +import ( + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/ledger/rwset" + "github.com/hyperledger/fabric-protos-go-apiv2/peer" + "google.golang.org/protobuf/proto" +) + +type ChaincodeProposalPayload struct { + *peer.ChaincodeProposalPayload +} + +func (cpp *ChaincodeProposalPayload) Underlying() proto.Message { + return cpp.ChaincodeProposalPayload +} + +func (cpp *ChaincodeProposalPayload) StaticallyOpaqueFields() []string { + return []string{"input"} +} + +func (cpp *ChaincodeProposalPayload) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != cpp.StaticallyOpaqueFields()[0] { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + return &peer.ChaincodeInvocationSpec{}, nil +} + +type ChaincodeAction struct { + *peer.ChaincodeAction +} + +func (ca *ChaincodeAction) Underlying() proto.Message { + return ca.ChaincodeAction +} + +func (ca *ChaincodeAction) StaticallyOpaqueFields() []string { + return []string{"results", "events"} +} + +func (ca *ChaincodeAction) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + switch name { + case "results": + return &rwset.TxReadWriteSet{}, nil + case "events": + return &peer.ChaincodeEvent{}, nil + default: + return nil, fmt.Errorf("not a marshaled field: %s", name) + } +} diff --git a/v2/protolator/protoext/peerext/proposal_response.go b/v2/protolator/protoext/peerext/proposal_response.go new file mode 100644 index 0000000..0eb859a --- /dev/null +++ b/v2/protolator/protoext/peerext/proposal_response.go @@ -0,0 +1,33 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package peerext + +import ( + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/peer" + "google.golang.org/protobuf/proto" +) + +type ProposalResponsePayload struct { + *peer.ProposalResponsePayload +} + +func (ppr *ProposalResponsePayload) Underlying() proto.Message { + return ppr.ProposalResponsePayload +} + +func (ppr *ProposalResponsePayload) StaticallyOpaqueFields() []string { + return []string{"extension"} +} + +func (ppr *ProposalResponsePayload) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != ppr.StaticallyOpaqueFields()[0] { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + return &peer.ChaincodeAction{}, nil +} diff --git a/v2/protolator/protoext/peerext/transaction.go b/v2/protolator/protoext/peerext/transaction.go new file mode 100644 index 0000000..3870811 --- /dev/null +++ b/v2/protolator/protoext/peerext/transaction.go @@ -0,0 +1,76 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package peerext + +import ( + "fmt" + + "github.com/hyperledger/fabric-protos-go-apiv2/common" + "github.com/hyperledger/fabric-protos-go-apiv2/peer" + "google.golang.org/protobuf/proto" +) + +type TransactionAction struct { // nothing was testing this + *peer.TransactionAction +} + +func (ta *TransactionAction) Underlying() proto.Message { + return ta.TransactionAction +} + +func (ta *TransactionAction) StaticallyOpaqueFields() []string { + return []string{"header", "payload"} +} + +func (ta *TransactionAction) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + switch name { + case ta.StaticallyOpaqueFields()[0]: + return &common.SignatureHeader{}, nil + case ta.StaticallyOpaqueFields()[1]: + return &peer.ChaincodeActionPayload{}, nil + default: + return nil, fmt.Errorf("not a marshaled field: %s", name) + } +} + +type ChaincodeActionPayload struct { + *peer.ChaincodeActionPayload +} + +func (cap *ChaincodeActionPayload) Underlying() proto.Message { + return cap.ChaincodeActionPayload +} + +func (cap *ChaincodeActionPayload) StaticallyOpaqueFields() []string { + return []string{"chaincode_proposal_payload"} +} + +func (cap *ChaincodeActionPayload) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != cap.StaticallyOpaqueFields()[0] { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + return &peer.ChaincodeProposalPayload{}, nil +} + +type ChaincodeEndorsedAction struct { + *peer.ChaincodeEndorsedAction +} + +func (cae *ChaincodeEndorsedAction) Underlying() proto.Message { + return cae.ChaincodeEndorsedAction +} + +func (cae *ChaincodeEndorsedAction) StaticallyOpaqueFields() []string { + return []string{"proposal_response_payload"} +} + +func (cae *ChaincodeEndorsedAction) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != cae.StaticallyOpaqueFields()[0] { + return nil, fmt.Errorf("not a marshaled field: %s", name) + } + return &peer.ProposalResponsePayload{}, nil +} diff --git a/v2/protolator/statically_opaque.go b/v2/protolator/statically_opaque.go new file mode 100644 index 0000000..b5b2e5c --- /dev/null +++ b/v2/protolator/statically_opaque.go @@ -0,0 +1,152 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protolator + +import ( + "reflect" + + "google.golang.org/protobuf/proto" +) + +func opaqueFrom(opaqueType func() (proto.Message, error), value interface{}, destType reflect.Type) (reflect.Value, error) { + tree := value.(map[string]interface{}) // Safe, already checked + nMsg, err := opaqueType() + if err != nil { + return reflect.Value{}, err + } + if err := recursivelyPopulateMessageFromTree(tree, nMsg); err != nil { + return reflect.Value{}, err + } + mMsg, err := MostlyDeterministicMarshal(nMsg) + if err != nil { + return reflect.Value{}, err + } + return reflect.ValueOf(mMsg), nil +} + +func opaqueTo(opaqueType func() (proto.Message, error), value reflect.Value) (interface{}, error) { + nMsg, err := opaqueType() + if err != nil { + return nil, err + } + mMsg := value.Interface().([]byte) // Safe, already checked + if err = proto.Unmarshal(mMsg, nMsg); err != nil { + return nil, err + } + return recursivelyCreateTreeFromMessage(nMsg) +} + +type staticallyOpaqueFieldFactory struct{} + +func (soff staticallyOpaqueFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + opaqueProto, ok := msg.(StaticallyOpaqueFieldProto) + if !ok { + return false + } + + return stringInSlice(fieldName, opaqueProto.StaticallyOpaqueFields()) +} + +func (soff staticallyOpaqueFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + opaqueProto := msg.(StaticallyOpaqueFieldProto) // Type checked in Handles + + return &plainField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: bytesType, + value: fieldValue, + }, + populateFrom: func(v interface{}, dT reflect.Type) (reflect.Value, error) { + return opaqueFrom(func() (proto.Message, error) { return opaqueProto.StaticallyOpaqueFieldProto(fieldName) }, v, dT) + }, + populateTo: func(v reflect.Value) (interface{}, error) { + return opaqueTo(func() (proto.Message, error) { return opaqueProto.StaticallyOpaqueFieldProto(fieldName) }, v) + }, + }, nil +} + +type staticallyOpaqueMapFieldFactory struct{} + +func (soff staticallyOpaqueMapFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + opaqueProto, ok := msg.(StaticallyOpaqueMapFieldProto) + if !ok { + return false + } + + return stringInSlice(fieldName, opaqueProto.StaticallyOpaqueMapFields()) +} + +func (soff staticallyOpaqueMapFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + opaqueProto := msg.(StaticallyOpaqueMapFieldProto) // Type checked in Handles + + return &mapField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(key string, v interface{}, dT reflect.Type) (reflect.Value, error) { + return opaqueFrom(func() (proto.Message, error) { + return opaqueProto.StaticallyOpaqueMapFieldProto(fieldName, key) + }, v, dT) + }, + populateTo: func(key string, v reflect.Value) (interface{}, error) { + return opaqueTo(func() (proto.Message, error) { + return opaqueProto.StaticallyOpaqueMapFieldProto(fieldName, key) + }, v) + }, + }, nil +} + +type staticallyOpaqueSliceFieldFactory struct{} + +func (soff staticallyOpaqueSliceFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + opaqueProto, ok := msg.(StaticallyOpaqueSliceFieldProto) + if !ok { + return false + } + + return stringInSlice(fieldName, opaqueProto.StaticallyOpaqueSliceFields()) +} + +func (soff staticallyOpaqueSliceFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + opaqueProto := msg.(StaticallyOpaqueSliceFieldProto) // Type checked in Handles + + return &sliceField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(index int, v interface{}, dT reflect.Type) (reflect.Value, error) { + return opaqueFrom(func() (proto.Message, error) { + return opaqueProto.StaticallyOpaqueSliceFieldProto(fieldName, index) + }, v, dT) + }, + populateTo: func(index int, v reflect.Value) (interface{}, error) { + return opaqueTo(func() (proto.Message, error) { + return opaqueProto.StaticallyOpaqueSliceFieldProto(fieldName, index) + }, v) + }, + }, nil +} diff --git a/v2/protolator/statically_opaque_test.go b/v2/protolator/statically_opaque_test.go new file mode 100644 index 0000000..92d1f5f --- /dev/null +++ b/v2/protolator/statically_opaque_test.go @@ -0,0 +1,181 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package protolator + +import ( + "bytes" + "testing" + + "github.com/hyperledger/fabric-config/v2/protolator/testprotos" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func extractSimpleMsgPlainField(source []byte) string { + result := &testprotos.SimpleMsg{} + err := proto.Unmarshal(source, result) + if err != nil { + panic(err) + } + return result.PlainField +} + +func TestPlainStaticallyOpaqueMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff} + + pfValue := "foo" + startMsg := &testprotos.StaticallyOpaqueMsg{ + PlainOpaqueField: protoMarshalOrPanic(&testprotos.SimpleMsg{ + PlainField: pfValue, + }), + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.StaticallyOpaqueMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.PlainOpaqueField)).NotTo(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.PlainOpaqueField))) + + fieldFactories = []protoFieldFactory{tppff, staticallyOpaqueFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.PlainOpaqueField)).To(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.PlainOpaqueField))) +} + +func TestMapStaticallyOpaqueMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff} + + pfValue := "foo" + mapKey := "bar" + startMsg := &testprotos.StaticallyOpaqueMsg{ + MapOpaqueField: map[string][]byte{ + mapKey: protoMarshalOrPanic(&testprotos.SimpleMsg{ + PlainField: pfValue, + }), + }, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.StaticallyOpaqueMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.MapOpaqueField[mapKey])).NotTo(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.MapOpaqueField[mapKey]))) + + fieldFactories = []protoFieldFactory{tppff, staticallyOpaqueMapFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.MapOpaqueField[mapKey])).To(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.MapOpaqueField[mapKey]))) +} + +func TestSliceStaticallyOpaqueMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff} + + pfValue := "foo" + startMsg := &testprotos.StaticallyOpaqueMsg{ + SliceOpaqueField: [][]byte{ + protoMarshalOrPanic(&testprotos.SimpleMsg{ + PlainField: pfValue, + }), + }, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.StaticallyOpaqueMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.SliceOpaqueField[0])).NotTo(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.SliceOpaqueField[0]))) + + fieldFactories = []protoFieldFactory{tppff, staticallyOpaqueSliceFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractSimpleMsgPlainField(newMsg.SliceOpaqueField[0])).To(Equal(fromPrefix + toPrefix + extractSimpleMsgPlainField(startMsg.SliceOpaqueField[0]))) +} + +func TestIgnoredNilFields(t *testing.T) { + gt := NewGomegaWithT(t) + + _ = StaticallyOpaqueFieldProto(&testprotos.UnmarshalableDeepFields{}) + _ = StaticallyOpaqueMapFieldProto(&testprotos.UnmarshalableDeepFields{}) + _ = StaticallyOpaqueSliceFieldProto(&testprotos.UnmarshalableDeepFields{}) + + fieldFactories = []protoFieldFactory{ + staticallyOpaqueFieldFactory{}, + staticallyOpaqueMapFieldFactory{}, + staticallyOpaqueSliceFieldFactory{}, + } + + err := DeepMarshalJSON(&bytes.Buffer{}, &testprotos.UnmarshalableDeepFields{ + PlainOpaqueField: []byte("fake"), + }) + gt.Expect(err).To(MatchError("*testprotos.UnmarshalableDeepFields: error in PopulateTo for field plain_opaque_field for message *testprotos.UnmarshalableDeepFields: intentional error")) + err = DeepMarshalJSON(&bytes.Buffer{}, &testprotos.UnmarshalableDeepFields{ + MapOpaqueField: map[string][]byte{"foo": []byte("bar")}, + }) + gt.Expect(err).To(MatchError("*testprotos.UnmarshalableDeepFields: error in PopulateTo for map field map_opaque_field and key foo for message *testprotos.UnmarshalableDeepFields: intentional error")) + err = DeepMarshalJSON(&bytes.Buffer{}, &testprotos.UnmarshalableDeepFields{ + SliceOpaqueField: [][]byte{[]byte("bar")}, + }) + gt.Expect(err).To(MatchError("*testprotos.UnmarshalableDeepFields: error in PopulateTo for slice field slice_opaque_field at index 0 for message *testprotos.UnmarshalableDeepFields: intentional error")) + err = DeepMarshalJSON(&bytes.Buffer{}, &testprotos.UnmarshalableDeepFields{}) + gt.Expect(err).NotTo(HaveOccurred()) +} + +// protoMarshalOrPanic serializes a protobuf message and panics if this +// operation fails +func protoMarshalOrPanic(pb proto.Message) []byte { + data, err := proto.Marshal(pb) + if err != nil { + panic(err) + } + + return data +} diff --git a/v2/protolator/testprotos/sample.go b/v2/protolator/testprotos/sample.go new file mode 100644 index 0000000..8c208ec --- /dev/null +++ b/v2/protolator/testprotos/sample.go @@ -0,0 +1,203 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testprotos + +import ( + "fmt" + + "google.golang.org/protobuf/proto" +) + +func (som *StaticallyOpaqueMsg) StaticallyOpaqueFields() []string { + return []string{"plain_opaque_field"} +} + +func (som *StaticallyOpaqueMsg) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + if name != som.StaticallyOpaqueFields()[0] { + return nil, fmt.Errorf("not a statically opaque field: %s", name) + } + + return &SimpleMsg{}, nil +} + +func (som *StaticallyOpaqueMsg) StaticallyOpaqueMapFields() []string { + return []string{"map_opaque_field"} +} + +func (som *StaticallyOpaqueMsg) StaticallyOpaqueMapFieldProto(name string, key string) (proto.Message, error) { + if name != som.StaticallyOpaqueMapFields()[0] { + return nil, fmt.Errorf("not a statically opaque field: %s", name) + } + + return &SimpleMsg{}, nil +} + +func (som *StaticallyOpaqueMsg) StaticallyOpaqueSliceFields() []string { + return []string{"slice_opaque_field"} +} + +func (som *StaticallyOpaqueMsg) StaticallyOpaqueSliceFieldProto(name string, index int) (proto.Message, error) { + if name != som.StaticallyOpaqueSliceFields()[0] { + return nil, fmt.Errorf("not a statically opaque field: %s", name) + } + + return &SimpleMsg{}, nil +} + +func typeSwitch(typeName string) (proto.Message, error) { + switch typeName { + case "SimpleMsg": + return &SimpleMsg{}, nil + case "NestedMsg": + return &NestedMsg{}, nil + case "StaticallyOpaqueMsg": + return &StaticallyOpaqueMsg{}, nil + case "VariablyOpaqueMsg": + return &VariablyOpaqueMsg{}, nil + default: + return nil, fmt.Errorf("unknown message type: %s", typeName) + } +} + +func (vom *VariablyOpaqueMsg) VariablyOpaqueFields() []string { + return []string{"plain_opaque_field"} +} + +func (vom *VariablyOpaqueMsg) VariablyOpaqueFieldProto(name string) (proto.Message, error) { + if name != vom.VariablyOpaqueFields()[0] { + return nil, fmt.Errorf("not a statically opaque field: %s", name) + } + + return typeSwitch(vom.OpaqueType) +} + +func (vom *VariablyOpaqueMsg) VariablyOpaqueMapFields() []string { + return []string{"map_opaque_field"} +} + +func (vom *VariablyOpaqueMsg) VariablyOpaqueMapFieldProto(name string, key string) (proto.Message, error) { + if name != vom.VariablyOpaqueMapFields()[0] { + return nil, fmt.Errorf("not a statically opaque field: %s", name) + } + + return typeSwitch(vom.OpaqueType) +} + +func (vom *VariablyOpaqueMsg) VariablyOpaqueSliceFields() []string { + return []string{"slice_opaque_field"} +} + +func (vom *VariablyOpaqueMsg) VariablyOpaqueSliceFieldProto(name string, index int) (proto.Message, error) { + if name != vom.VariablyOpaqueSliceFields()[0] { + return nil, fmt.Errorf("not a statically opaque field: %s", name) + } + + return typeSwitch(vom.OpaqueType) +} + +func (cm *ContextlessMsg) VariablyOpaqueFields() []string { + return []string{"opaque_field"} +} + +type DynamicMessageWrapper struct { + *ContextlessMsg + typeName string +} + +func (dmw *DynamicMessageWrapper) VariablyOpaqueFieldProto(name string) (proto.Message, error) { + if name != dmw.ContextlessMsg.VariablyOpaqueFields()[0] { + return nil, fmt.Errorf("not a statically opaque field: %s", name) + } + + return typeSwitch(dmw.typeName) +} + +func (dmw *DynamicMessageWrapper) Underlying() proto.Message { + return dmw.ContextlessMsg +} + +func wrapContextless(underlying proto.Message, typeName string) (*DynamicMessageWrapper, error) { + cm, ok := underlying.(*ContextlessMsg) + if !ok { + return nil, fmt.Errorf("unknown dynamic message to wrap (%T) requires *ContextlessMsg", underlying) + } + + return &DynamicMessageWrapper{ + ContextlessMsg: cm, + typeName: typeName, + }, nil +} + +func (vom *DynamicMsg) DynamicFields() []string { + return []string{"plain_dynamic_field"} +} + +func (vom *DynamicMsg) DynamicFieldProto(name string, underlying proto.Message) (proto.Message, error) { + if name != vom.DynamicFields()[0] { + return nil, fmt.Errorf("not a dynamic field: %s", name) + } + + return wrapContextless(underlying, vom.DynamicType) +} + +func (vom *DynamicMsg) DynamicMapFields() []string { + return []string{"map_dynamic_field"} +} + +func (vom *DynamicMsg) DynamicMapFieldProto(name string, key string, underlying proto.Message) (proto.Message, error) { + if name != vom.DynamicMapFields()[0] { + return nil, fmt.Errorf("not a dynamic map field: %s", name) + } + + return wrapContextless(underlying, vom.DynamicType) +} + +func (vom *DynamicMsg) DynamicSliceFields() []string { + return []string{"slice_dynamic_field"} +} + +func (vom *DynamicMsg) DynamicSliceFieldProto(name string, index int, underlying proto.Message) (proto.Message, error) { + if name != vom.DynamicSliceFields()[0] { + return nil, fmt.Errorf("not a dynamic slice field: %s", name) + } + + return wrapContextless(underlying, vom.DynamicType) +} + +func (udf *UnmarshalableDeepFields) StaticallyOpaqueFields() []string { + return []string{"plain_opaque_field"} +} + +func (udf *UnmarshalableDeepFields) StaticallyOpaqueFieldProto(name string) (proto.Message, error) { + return nil, fmt.Errorf("intentional error") +} + +func (udf *UnmarshalableDeepFields) StaticallyOpaqueMapFields() []string { + return []string{"map_opaque_field"} +} + +func (udf *UnmarshalableDeepFields) StaticallyOpaqueMapFieldProto(name, key string) (proto.Message, error) { + return nil, fmt.Errorf("intentional error") +} + +func (udf *UnmarshalableDeepFields) StaticallyOpaqueSliceFields() []string { + return []string{"slice_opaque_field"} +} + +func (udf *UnmarshalableDeepFields) StaticallyOpaqueSliceFieldProto(name string, index int) (proto.Message, error) { + return nil, fmt.Errorf("intentional error") +} diff --git a/v2/protolator/testprotos/sample.pb.go b/v2/protolator/testprotos/sample.pb.go new file mode 100644 index 0000000..7ae8044 --- /dev/null +++ b/v2/protolator/testprotos/sample.pb.go @@ -0,0 +1,779 @@ +// +//Copyright IBM Corp. 2017 All Rights Reserved. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.3 +// source: sample.proto + +package testprotos + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// SimpleMsg is designed to test that all three types of message fields, plain, map, +// and slice are handled by the protolator tool +type SimpleMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlainField string `protobuf:"bytes,1,opt,name=plain_field,json=plainField,proto3" json:"plain_field,omitempty"` + MapField map[string]string `protobuf:"bytes,2,rep,name=map_field,json=mapField,proto3" json:"map_field,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SliceField []string `protobuf:"bytes,3,rep,name=slice_field,json=sliceField,proto3" json:"slice_field,omitempty"` +} + +func (x *SimpleMsg) Reset() { + *x = SimpleMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_sample_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SimpleMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SimpleMsg) ProtoMessage() {} + +func (x *SimpleMsg) ProtoReflect() protoreflect.Message { + mi := &file_sample_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SimpleMsg.ProtoReflect.Descriptor instead. +func (*SimpleMsg) Descriptor() ([]byte, []int) { + return file_sample_proto_rawDescGZIP(), []int{0} +} + +func (x *SimpleMsg) GetPlainField() string { + if x != nil { + return x.PlainField + } + return "" +} + +func (x *SimpleMsg) GetMapField() map[string]string { + if x != nil { + return x.MapField + } + return nil +} + +func (x *SimpleMsg) GetSliceField() []string { + if x != nil { + return x.SliceField + } + return nil +} + +// NestedMsg is designed to test the nested message component +type NestedMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlainNestedField *SimpleMsg `protobuf:"bytes,1,opt,name=plain_nested_field,json=plainNestedField,proto3" json:"plain_nested_field,omitempty"` + MapNestedField map[string]*SimpleMsg `protobuf:"bytes,2,rep,name=map_nested_field,json=mapNestedField,proto3" json:"map_nested_field,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SliceNestedField []*SimpleMsg `protobuf:"bytes,3,rep,name=slice_nested_field,json=sliceNestedField,proto3" json:"slice_nested_field,omitempty"` +} + +func (x *NestedMsg) Reset() { + *x = NestedMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_sample_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NestedMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NestedMsg) ProtoMessage() {} + +func (x *NestedMsg) ProtoReflect() protoreflect.Message { + mi := &file_sample_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NestedMsg.ProtoReflect.Descriptor instead. +func (*NestedMsg) Descriptor() ([]byte, []int) { + return file_sample_proto_rawDescGZIP(), []int{1} +} + +func (x *NestedMsg) GetPlainNestedField() *SimpleMsg { + if x != nil { + return x.PlainNestedField + } + return nil +} + +func (x *NestedMsg) GetMapNestedField() map[string]*SimpleMsg { + if x != nil { + return x.MapNestedField + } + return nil +} + +func (x *NestedMsg) GetSliceNestedField() []*SimpleMsg { + if x != nil { + return x.SliceNestedField + } + return nil +} + +// StaticallyOpaqueMsg is designed to test the statically opaque message component +// All fields are statically marshaled to the NestedMsg type +type StaticallyOpaqueMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlainOpaqueField []byte `protobuf:"bytes,1,opt,name=plain_opaque_field,json=plainOpaqueField,proto3" json:"plain_opaque_field,omitempty"` + MapOpaqueField map[string][]byte `protobuf:"bytes,2,rep,name=map_opaque_field,json=mapOpaqueField,proto3" json:"map_opaque_field,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SliceOpaqueField [][]byte `protobuf:"bytes,3,rep,name=slice_opaque_field,json=sliceOpaqueField,proto3" json:"slice_opaque_field,omitempty"` +} + +func (x *StaticallyOpaqueMsg) Reset() { + *x = StaticallyOpaqueMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_sample_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StaticallyOpaqueMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StaticallyOpaqueMsg) ProtoMessage() {} + +func (x *StaticallyOpaqueMsg) ProtoReflect() protoreflect.Message { + mi := &file_sample_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StaticallyOpaqueMsg.ProtoReflect.Descriptor instead. +func (*StaticallyOpaqueMsg) Descriptor() ([]byte, []int) { + return file_sample_proto_rawDescGZIP(), []int{2} +} + +func (x *StaticallyOpaqueMsg) GetPlainOpaqueField() []byte { + if x != nil { + return x.PlainOpaqueField + } + return nil +} + +func (x *StaticallyOpaqueMsg) GetMapOpaqueField() map[string][]byte { + if x != nil { + return x.MapOpaqueField + } + return nil +} + +func (x *StaticallyOpaqueMsg) GetSliceOpaqueField() [][]byte { + if x != nil { + return x.SliceOpaqueField + } + return nil +} + +// VariablyOpaqueMsg is designed to test the staticaly opaque message component +// The opaque type is determined by opaque_type +type VariablyOpaqueMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OpaqueType string `protobuf:"bytes,1,opt,name=opaque_type,json=opaqueType,proto3" json:"opaque_type,omitempty"` + PlainOpaqueField []byte `protobuf:"bytes,2,opt,name=plain_opaque_field,json=plainOpaqueField,proto3" json:"plain_opaque_field,omitempty"` + MapOpaqueField map[string][]byte `protobuf:"bytes,3,rep,name=map_opaque_field,json=mapOpaqueField,proto3" json:"map_opaque_field,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SliceOpaqueField [][]byte `protobuf:"bytes,4,rep,name=slice_opaque_field,json=sliceOpaqueField,proto3" json:"slice_opaque_field,omitempty"` +} + +func (x *VariablyOpaqueMsg) Reset() { + *x = VariablyOpaqueMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_sample_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VariablyOpaqueMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VariablyOpaqueMsg) ProtoMessage() {} + +func (x *VariablyOpaqueMsg) ProtoReflect() protoreflect.Message { + mi := &file_sample_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VariablyOpaqueMsg.ProtoReflect.Descriptor instead. +func (*VariablyOpaqueMsg) Descriptor() ([]byte, []int) { + return file_sample_proto_rawDescGZIP(), []int{3} +} + +func (x *VariablyOpaqueMsg) GetOpaqueType() string { + if x != nil { + return x.OpaqueType + } + return "" +} + +func (x *VariablyOpaqueMsg) GetPlainOpaqueField() []byte { + if x != nil { + return x.PlainOpaqueField + } + return nil +} + +func (x *VariablyOpaqueMsg) GetMapOpaqueField() map[string][]byte { + if x != nil { + return x.MapOpaqueField + } + return nil +} + +func (x *VariablyOpaqueMsg) GetSliceOpaqueField() [][]byte { + if x != nil { + return x.SliceOpaqueField + } + return nil +} + +// DynamicMsg is designed to test the dynamic message component +// The dynamic wrapper applied to ContextlessMsg is determined by +// dynamic_type +type DynamicMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DynamicType string `protobuf:"bytes,1,opt,name=dynamic_type,json=dynamicType,proto3" json:"dynamic_type,omitempty"` + PlainDynamicField *ContextlessMsg `protobuf:"bytes,2,opt,name=plain_dynamic_field,json=plainDynamicField,proto3" json:"plain_dynamic_field,omitempty"` + MapDynamicField map[string]*ContextlessMsg `protobuf:"bytes,3,rep,name=map_dynamic_field,json=mapDynamicField,proto3" json:"map_dynamic_field,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SliceDynamicField []*ContextlessMsg `protobuf:"bytes,4,rep,name=slice_dynamic_field,json=sliceDynamicField,proto3" json:"slice_dynamic_field,omitempty"` +} + +func (x *DynamicMsg) Reset() { + *x = DynamicMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_sample_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicMsg) ProtoMessage() {} + +func (x *DynamicMsg) ProtoReflect() protoreflect.Message { + mi := &file_sample_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicMsg.ProtoReflect.Descriptor instead. +func (*DynamicMsg) Descriptor() ([]byte, []int) { + return file_sample_proto_rawDescGZIP(), []int{4} +} + +func (x *DynamicMsg) GetDynamicType() string { + if x != nil { + return x.DynamicType + } + return "" +} + +func (x *DynamicMsg) GetPlainDynamicField() *ContextlessMsg { + if x != nil { + return x.PlainDynamicField + } + return nil +} + +func (x *DynamicMsg) GetMapDynamicField() map[string]*ContextlessMsg { + if x != nil { + return x.MapDynamicField + } + return nil +} + +func (x *DynamicMsg) GetSliceDynamicField() []*ContextlessMsg { + if x != nil { + return x.SliceDynamicField + } + return nil +} + +// ContextlessMsg is designed to carry a message of completely arbitrary type +// Because there is no context for the type embedded in the message, the opaque +// type must be dynamically added at runtime +type ContextlessMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OpaqueField []byte `protobuf:"bytes,1,opt,name=opaque_field,json=opaqueField,proto3" json:"opaque_field,omitempty"` +} + +func (x *ContextlessMsg) Reset() { + *x = ContextlessMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_sample_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContextlessMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContextlessMsg) ProtoMessage() {} + +func (x *ContextlessMsg) ProtoReflect() protoreflect.Message { + mi := &file_sample_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContextlessMsg.ProtoReflect.Descriptor instead. +func (*ContextlessMsg) Descriptor() ([]byte, []int) { + return file_sample_proto_rawDescGZIP(), []int{5} +} + +func (x *ContextlessMsg) GetOpaqueField() []byte { + if x != nil { + return x.OpaqueField + } + return nil +} + +// UnmarshalableDeepFields contains fields which are defined to be opaque, but will +// return an error if they are asked to be deserialized. +type UnmarshalableDeepFields struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlainOpaqueField []byte `protobuf:"bytes,1,opt,name=plain_opaque_field,json=plainOpaqueField,proto3" json:"plain_opaque_field,omitempty"` + MapOpaqueField map[string][]byte `protobuf:"bytes,2,rep,name=map_opaque_field,json=mapOpaqueField,proto3" json:"map_opaque_field,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SliceOpaqueField [][]byte `protobuf:"bytes,3,rep,name=slice_opaque_field,json=sliceOpaqueField,proto3" json:"slice_opaque_field,omitempty"` +} + +func (x *UnmarshalableDeepFields) Reset() { + *x = UnmarshalableDeepFields{} + if protoimpl.UnsafeEnabled { + mi := &file_sample_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnmarshalableDeepFields) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnmarshalableDeepFields) ProtoMessage() {} + +func (x *UnmarshalableDeepFields) ProtoReflect() protoreflect.Message { + mi := &file_sample_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnmarshalableDeepFields.ProtoReflect.Descriptor instead. +func (*UnmarshalableDeepFields) Descriptor() ([]byte, []int) { + return file_sample_proto_rawDescGZIP(), []int{6} +} + +func (x *UnmarshalableDeepFields) GetPlainOpaqueField() []byte { + if x != nil { + return x.PlainOpaqueField + } + return nil +} + +func (x *UnmarshalableDeepFields) GetMapOpaqueField() map[string][]byte { + if x != nil { + return x.MapOpaqueField + } + return nil +} + +func (x *UnmarshalableDeepFields) GetSliceOpaqueField() [][]byte { + if x != nil { + return x.SliceOpaqueField + } + return nil +} + +var File_sample_proto protoreflect.FileDescriptor + +var file_sample_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, + 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x09, 0x53, + 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6c, 0x61, 0x69, + 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, + 0x4d, 0x73, 0x67, 0x2e, 0x4d, 0x61, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6c, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, + 0x4d, 0x61, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc4, 0x02, 0x0a, 0x09, 0x4e, 0x65, + 0x73, 0x74, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x12, 0x43, 0x0a, 0x12, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x5f, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, + 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x73, 0x67, 0x52, 0x10, 0x70, 0x6c, 0x61, 0x69, + 0x6e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x10, + 0x6d, 0x61, 0x70, 0x5f, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x73, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x2e, 0x4d, 0x61, + 0x70, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0e, 0x6d, 0x61, 0x70, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x12, 0x43, 0x0a, 0x12, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, + 0x65, 0x4d, 0x73, 0x67, 0x52, 0x10, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x4e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x1a, 0x58, 0x0a, 0x13, 0x4d, 0x61, 0x70, 0x4e, 0x65, 0x73, + 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x69, 0x6d, 0x70, + 0x6c, 0x65, 0x4d, 0x73, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x93, 0x02, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x6c, 0x79, 0x4f, + 0x70, 0x61, 0x71, 0x75, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6c, 0x61, 0x69, + 0x6e, 0x5f, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x4f, 0x70, 0x61, 0x71, 0x75, + 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x5d, 0x0a, 0x10, 0x6d, 0x61, 0x70, 0x5f, 0x6f, 0x70, + 0x61, 0x71, 0x75, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x33, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x6c, 0x79, 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x4d, 0x73, + 0x67, 0x2e, 0x4d, 0x61, 0x70, 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x6d, 0x61, 0x70, 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x5f, 0x6f, + 0x70, 0x61, 0x71, 0x75, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x10, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x1a, 0x41, 0x0a, 0x13, 0x4d, 0x61, 0x70, 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb0, 0x02, 0x0a, 0x11, 0x56, 0x61, 0x72, 0x69, 0x61, + 0x62, 0x6c, 0x79, 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x1f, 0x0a, 0x0b, + 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x5f, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x5f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x5b, 0x0a, 0x10, 0x6d, + 0x61, 0x70, 0x5f, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x73, 0x2e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x79, 0x4f, 0x70, 0x61, 0x71, 0x75, + 0x65, 0x4d, 0x73, 0x67, 0x2e, 0x4d, 0x61, 0x70, 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x6d, 0x61, 0x70, 0x4f, 0x70, 0x61, + 0x71, 0x75, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x6c, 0x69, 0x63, + 0x65, 0x5f, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x61, 0x71, 0x75, + 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x1a, 0x41, 0x0a, 0x13, 0x4d, 0x61, 0x70, 0x4f, 0x70, 0x61, + 0x71, 0x75, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x80, 0x03, 0x0a, 0x0a, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x73, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x6c, 0x65, 0x73, + 0x73, 0x4d, 0x73, 0x67, 0x52, 0x11, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x57, 0x0a, 0x11, 0x6d, 0x61, 0x70, 0x5f, 0x64, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x73, 0x67, 0x2e, 0x4d, 0x61, 0x70, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0f, 0x6d, 0x61, 0x70, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x6c, 0x65, 0x73, 0x73, 0x4d, 0x73, 0x67, 0x52, 0x11, 0x73, 0x6c, 0x69, 0x63, 0x65, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x1a, 0x5e, 0x0a, 0x14, + 0x4d, 0x61, 0x70, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x6c, 0x65, 0x73, 0x73, 0x4d, 0x73, + 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x33, 0x0a, 0x0e, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x6c, 0x65, 0x73, 0x73, 0x4d, 0x73, 0x67, 0x12, 0x21, + 0x0a, 0x0c, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x22, 0x9b, 0x02, 0x0a, 0x17, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x44, 0x65, 0x65, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x2c, 0x0a, + 0x12, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x5f, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x5f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x61, 0x0a, 0x10, 0x6d, + 0x61, 0x70, 0x5f, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x73, 0x2e, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x61, 0x62, 0x6c, 0x65, + 0x44, 0x65, 0x65, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x2e, 0x4d, 0x61, 0x70, 0x4f, 0x70, + 0x61, 0x71, 0x75, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, + 0x6d, 0x61, 0x70, 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x2c, + 0x0a, 0x12, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x5f, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x73, 0x6c, 0x69, 0x63, + 0x65, 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x1a, 0x41, 0x0a, 0x13, + 0x4d, 0x61, 0x70, 0x4f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, + 0x3c, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x79, + 0x70, 0x65, 0x72, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x72, 0x2f, 0x66, 0x61, 0x62, 0x72, 0x69, 0x63, + 0x2d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6c, 0x61, 0x74, + 0x6f, 0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sample_proto_rawDescOnce sync.Once + file_sample_proto_rawDescData = file_sample_proto_rawDesc +) + +func file_sample_proto_rawDescGZIP() []byte { + file_sample_proto_rawDescOnce.Do(func() { + file_sample_proto_rawDescData = protoimpl.X.CompressGZIP(file_sample_proto_rawDescData) + }) + return file_sample_proto_rawDescData +} + +var file_sample_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_sample_proto_goTypes = []any{ + (*SimpleMsg)(nil), // 0: testprotos.SimpleMsg + (*NestedMsg)(nil), // 1: testprotos.NestedMsg + (*StaticallyOpaqueMsg)(nil), // 2: testprotos.StaticallyOpaqueMsg + (*VariablyOpaqueMsg)(nil), // 3: testprotos.VariablyOpaqueMsg + (*DynamicMsg)(nil), // 4: testprotos.DynamicMsg + (*ContextlessMsg)(nil), // 5: testprotos.ContextlessMsg + (*UnmarshalableDeepFields)(nil), // 6: testprotos.UnmarshalableDeepFields + nil, // 7: testprotos.SimpleMsg.MapFieldEntry + nil, // 8: testprotos.NestedMsg.MapNestedFieldEntry + nil, // 9: testprotos.StaticallyOpaqueMsg.MapOpaqueFieldEntry + nil, // 10: testprotos.VariablyOpaqueMsg.MapOpaqueFieldEntry + nil, // 11: testprotos.DynamicMsg.MapDynamicFieldEntry + nil, // 12: testprotos.UnmarshalableDeepFields.MapOpaqueFieldEntry +} +var file_sample_proto_depIdxs = []int32{ + 7, // 0: testprotos.SimpleMsg.map_field:type_name -> testprotos.SimpleMsg.MapFieldEntry + 0, // 1: testprotos.NestedMsg.plain_nested_field:type_name -> testprotos.SimpleMsg + 8, // 2: testprotos.NestedMsg.map_nested_field:type_name -> testprotos.NestedMsg.MapNestedFieldEntry + 0, // 3: testprotos.NestedMsg.slice_nested_field:type_name -> testprotos.SimpleMsg + 9, // 4: testprotos.StaticallyOpaqueMsg.map_opaque_field:type_name -> testprotos.StaticallyOpaqueMsg.MapOpaqueFieldEntry + 10, // 5: testprotos.VariablyOpaqueMsg.map_opaque_field:type_name -> testprotos.VariablyOpaqueMsg.MapOpaqueFieldEntry + 5, // 6: testprotos.DynamicMsg.plain_dynamic_field:type_name -> testprotos.ContextlessMsg + 11, // 7: testprotos.DynamicMsg.map_dynamic_field:type_name -> testprotos.DynamicMsg.MapDynamicFieldEntry + 5, // 8: testprotos.DynamicMsg.slice_dynamic_field:type_name -> testprotos.ContextlessMsg + 12, // 9: testprotos.UnmarshalableDeepFields.map_opaque_field:type_name -> testprotos.UnmarshalableDeepFields.MapOpaqueFieldEntry + 0, // 10: testprotos.NestedMsg.MapNestedFieldEntry.value:type_name -> testprotos.SimpleMsg + 5, // 11: testprotos.DynamicMsg.MapDynamicFieldEntry.value:type_name -> testprotos.ContextlessMsg + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_sample_proto_init() } +func file_sample_proto_init() { + if File_sample_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sample_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*SimpleMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sample_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*NestedMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sample_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*StaticallyOpaqueMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sample_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*VariablyOpaqueMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sample_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*DynamicMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sample_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ContextlessMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sample_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*UnmarshalableDeepFields); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sample_proto_rawDesc, + NumEnums: 0, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sample_proto_goTypes, + DependencyIndexes: file_sample_proto_depIdxs, + MessageInfos: file_sample_proto_msgTypes, + }.Build() + File_sample_proto = out.File + file_sample_proto_rawDesc = nil + file_sample_proto_goTypes = nil + file_sample_proto_depIdxs = nil +} diff --git a/v2/protolator/testprotos/sample.proto b/v2/protolator/testprotos/sample.proto new file mode 100644 index 0000000..8097a55 --- /dev/null +++ b/v2/protolator/testprotos/sample.proto @@ -0,0 +1,78 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +syntax = "proto3"; + +option go_package = "github.com/hyperledger/fabric-config/protolator/testprotos"; + +package testprotos; + +// SimpleMsg is designed to test that all three types of message fields, plain, map, +// and slice are handled by the protolator tool +message SimpleMsg { + string plain_field = 1; + map map_field = 2; + repeated string slice_field = 3; +} + +// NestedMsg is designed to test the nested message component +message NestedMsg { + SimpleMsg plain_nested_field = 1; + map map_nested_field = 2; + repeated SimpleMsg slice_nested_field = 3; +} + +// StaticallyOpaqueMsg is designed to test the statically opaque message component +// All fields are statically marshaled to the NestedMsg type +message StaticallyOpaqueMsg { + bytes plain_opaque_field = 1; + map map_opaque_field = 2; + repeated bytes slice_opaque_field = 3; +} + +// VariablyOpaqueMsg is designed to test the staticaly opaque message component +// The opaque type is determined by opaque_type +message VariablyOpaqueMsg { + string opaque_type = 1; + bytes plain_opaque_field = 2; + map map_opaque_field = 3; + repeated bytes slice_opaque_field = 4; +} + +// DynamicMsg is designed to test the dynamic message component +// The dynamic wrapper applied to ContextlessMsg is determined by +// dynamic_type +message DynamicMsg { + string dynamic_type = 1; + ContextlessMsg plain_dynamic_field = 2; + map map_dynamic_field = 3; + repeated ContextlessMsg slice_dynamic_field = 4; +} + +// ContextlessMsg is designed to carry a message of completely arbitrary type +// Because there is no context for the type embedded in the message, the opaque +// type must be dynamically added at runtime +message ContextlessMsg { + bytes opaque_field = 1; +} + +// UnmarshalableDeepFields contains fields which are defined to be opaque, but will +// return an error if they are asked to be deserialized. +message UnmarshalableDeepFields { + bytes plain_opaque_field = 1; + map map_opaque_field = 2; + repeated bytes slice_opaque_field = 3; +} diff --git a/v2/protolator/variably_opaque.go b/v2/protolator/variably_opaque.go new file mode 100644 index 0000000..49e3889 --- /dev/null +++ b/v2/protolator/variably_opaque.go @@ -0,0 +1,124 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protolator + +import ( + "reflect" + + "google.golang.org/protobuf/proto" +) + +type variablyOpaqueFieldFactory struct{} + +func (soff variablyOpaqueFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + opaqueProto, ok := msg.(VariablyOpaqueFieldProto) + if !ok { + return false + } + + return stringInSlice(fieldName, opaqueProto.VariablyOpaqueFields()) +} + +func (soff variablyOpaqueFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + opaqueProto := msg.(VariablyOpaqueFieldProto) // Type checked in Handles + + return &plainField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: bytesType, + value: fieldValue, + }, + populateFrom: func(v interface{}, dT reflect.Type) (reflect.Value, error) { + return opaqueFrom(func() (proto.Message, error) { return opaqueProto.VariablyOpaqueFieldProto(fieldName) }, v, dT) + }, + populateTo: func(v reflect.Value) (interface{}, error) { + return opaqueTo(func() (proto.Message, error) { return opaqueProto.VariablyOpaqueFieldProto(fieldName) }, v) + }, + }, nil +} + +type variablyOpaqueMapFieldFactory struct{} + +func (soff variablyOpaqueMapFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + opaqueProto, ok := msg.(VariablyOpaqueMapFieldProto) + if !ok { + return false + } + + return stringInSlice(fieldName, opaqueProto.VariablyOpaqueMapFields()) +} + +func (soff variablyOpaqueMapFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + opaqueProto := msg.(VariablyOpaqueMapFieldProto) // Type checked in Handles + + return &mapField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(key string, v interface{}, dT reflect.Type) (reflect.Value, error) { + return opaqueFrom(func() (proto.Message, error) { + return opaqueProto.VariablyOpaqueMapFieldProto(fieldName, key) + }, v, dT) + }, + populateTo: func(key string, v reflect.Value) (interface{}, error) { + return opaqueTo(func() (proto.Message, error) { + return opaqueProto.VariablyOpaqueMapFieldProto(fieldName, key) + }, v) + }, + }, nil +} + +type variablyOpaqueSliceFieldFactory struct{} + +func (soff variablyOpaqueSliceFieldFactory) Handles(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) bool { + opaqueProto, ok := msg.(VariablyOpaqueSliceFieldProto) + if !ok { + return false + } + + return stringInSlice(fieldName, opaqueProto.VariablyOpaqueSliceFields()) +} + +func (soff variablyOpaqueSliceFieldFactory) NewProtoField(msg proto.Message, fieldName string, fieldType reflect.Type, fieldValue reflect.Value) (protoField, error) { + opaqueProto := msg.(VariablyOpaqueSliceFieldProto) // Type checked in Handles + + return &sliceField{ + baseField: baseField{ + msg: msg, + name: fieldName, + fType: mapStringInterfaceType, + vType: fieldType, + value: fieldValue, + }, + populateFrom: func(index int, v interface{}, dT reflect.Type) (reflect.Value, error) { + return opaqueFrom(func() (proto.Message, error) { + return opaqueProto.VariablyOpaqueSliceFieldProto(fieldName, index) + }, v, dT) + }, + populateTo: func(index int, v reflect.Value) (interface{}, error) { + return opaqueTo(func() (proto.Message, error) { + return opaqueProto.VariablyOpaqueSliceFieldProto(fieldName, index) + }, v) + }, + }, nil +} diff --git a/v2/protolator/variably_opaque_test.go b/v2/protolator/variably_opaque_test.go new file mode 100644 index 0000000..79c6bb0 --- /dev/null +++ b/v2/protolator/variably_opaque_test.go @@ -0,0 +1,150 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package protolator + +import ( + "bytes" + "testing" + + "github.com/hyperledger/fabric-config/v2/protolator/testprotos" + . "github.com/onsi/gomega" + "google.golang.org/protobuf/proto" +) + +func extractNestedMsgPlainField(source []byte) string { + result := &testprotos.NestedMsg{} + err := proto.Unmarshal(source, result) + if err != nil { + panic(err) + } + return result.PlainNestedField.PlainField +} + +func TestPlainVariablyOpaqueMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff} + + pfValue := "foo" + startMsg := &testprotos.VariablyOpaqueMsg{ + OpaqueType: "NestedMsg", + PlainOpaqueField: protoMarshalOrPanic(&testprotos.NestedMsg{ + PlainNestedField: &testprotos.SimpleMsg{ + PlainField: pfValue, + }, + }), + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.VariablyOpaqueMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractNestedMsgPlainField(newMsg.PlainOpaqueField)).NotTo(Equal(fromPrefix + toPrefix + extractNestedMsgPlainField(startMsg.PlainOpaqueField))) + + fieldFactories = []protoFieldFactory{tppff, nestedFieldFactory{}, variablyOpaqueFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractNestedMsgPlainField(newMsg.PlainOpaqueField)).To(Equal(fromPrefix + toPrefix + extractNestedMsgPlainField(startMsg.PlainOpaqueField))) +} + +func TestMapVariablyOpaqueMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff} + + pfValue := "foo" + mapKey := "bar" + startMsg := &testprotos.VariablyOpaqueMsg{ + OpaqueType: "NestedMsg", + MapOpaqueField: map[string][]byte{ + mapKey: protoMarshalOrPanic(&testprotos.NestedMsg{ + PlainNestedField: &testprotos.SimpleMsg{ + PlainField: pfValue, + }, + }), + }, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.VariablyOpaqueMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractNestedMsgPlainField(newMsg.MapOpaqueField[mapKey])).NotTo(Equal(fromPrefix + toPrefix + extractNestedMsgPlainField(startMsg.MapOpaqueField[mapKey]))) + + fieldFactories = []protoFieldFactory{tppff, nestedFieldFactory{}, variablyOpaqueMapFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractNestedMsgPlainField(newMsg.MapOpaqueField[mapKey])).To(Equal(fromPrefix + toPrefix + extractNestedMsgPlainField(startMsg.MapOpaqueField[mapKey]))) +} + +func TestSliceVariablyOpaqueMsg(t *testing.T) { + gt := NewGomegaWithT(t) + + fromPrefix := "from" + toPrefix := "to" + tppff := &testProtoPlainFieldFactory{ + fromPrefix: fromPrefix, + toPrefix: toPrefix, + } + + fieldFactories = []protoFieldFactory{tppff} + + pfValue := "foo" + startMsg := &testprotos.VariablyOpaqueMsg{ + OpaqueType: "NestedMsg", + SliceOpaqueField: [][]byte{ + protoMarshalOrPanic(&testprotos.NestedMsg{ + PlainNestedField: &testprotos.SimpleMsg{ + PlainField: pfValue, + }, + }), + }, + } + + var buffer bytes.Buffer + err := DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + newMsg := &testprotos.VariablyOpaqueMsg{} + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractNestedMsgPlainField(newMsg.SliceOpaqueField[0])).NotTo(Equal(fromPrefix + toPrefix + extractNestedMsgPlainField(startMsg.SliceOpaqueField[0]))) + + fieldFactories = []protoFieldFactory{tppff, nestedFieldFactory{}, variablyOpaqueSliceFieldFactory{}} + + buffer.Reset() + err = DeepMarshalJSON(&buffer, startMsg) + gt.Expect(err).NotTo(HaveOccurred()) + err = DeepUnmarshalJSON(bytes.NewReader(buffer.Bytes()), newMsg) + gt.Expect(err).NotTo(HaveOccurred()) + gt.Expect(extractNestedMsgPlainField(newMsg.SliceOpaqueField[0])).To(Equal(fromPrefix + toPrefix + extractNestedMsgPlainField(startMsg.SliceOpaqueField[0]))) +}