From bb668ecd90aab6170780adacf574e1b0b0d1bfbc Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 14 Apr 2021 14:08:24 -0500 Subject: [PATCH 1/8] Moved parseProducers from Cluster to Node and made member function and added getParticipantNum method. --- tests/Cluster.py | 29 +++++++++-------------- tests/Node.py | 21 ++++++++++++++++ tests/nodeos_forked_chain_test.py | 2 +- tests/nodeos_high_transaction_test.py | 2 +- tests/nodeos_short_fork_take_over_test.py | 2 +- tests/nodeos_voting_test.py | 2 +- 6 files changed, 36 insertions(+), 22 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index fb879ab2688..8e0f6a07e9b 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -538,6 +538,8 @@ def connectGroup(group, producerNodes, bridgeNodes) : if onlyBios: self.nodes=[biosNode] + self.totalNodes = totalNodes + # ensure cluster node are inter-connected by ensuring everyone has block 1 Utils.Print("Cluster viability smoke test. Validate every cluster node has block 1. ") if not self.waitOnClusterBlockNumSync(1): @@ -1032,24 +1034,6 @@ def parseProducerKeys(configFile, nodeName): return producerKeys - @staticmethod - def parseProducers(nodeNum): - """Parse node config file for producers.""" - - configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") - if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) - configStr=None - with open(configFile, 'r') as f: - configStr=f.read() - - pattern=r"^\s*producer-name\s*=\W*(\w+)\W*$" - producerMatches=re.findall(pattern, configStr, re.MULTILINE) - if producerMatches is None: - if Utils.Debug: Utils.Print("Failed to find producers.") - return None - - return producerMatches - @staticmethod def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" @@ -1877,3 +1861,12 @@ def verifyInSync(self, sourceNodeNum=0, specificNodes=None): if error: self.reportInfo() Utils.errorExit(error) + + def getParticipantNum(self, nodeToIdentify): + num = 0 + for node in self.nodes: + if node == nodeToIdentify: + return num + num += 1 + assert nodeToIdentify == self.biosNode + return self.totalNodes diff --git a/tests/Node.py b/tests/Node.py index 61c69991f36..7a1cbc3bc04 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1687,3 +1687,24 @@ def waitForIrreversibleBlockProducedBy(self, producer, startBlockNum=0, retry=10 retry = retry - 1 startBlockNum = latestBlockNum + 1 return False + + @staticmethod + def parseProducers(nodeNum): + """Parse node config file for producers.""" + + configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") + if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + configStr=None + with open(configFile, 'r') as f: + configStr=f.read() + + pattern=r"^\s*producer-name\s*=\W*(\w+)\W*$" + producerMatches=re.findall(pattern, configStr, re.MULTILINE) + if producerMatches is None: + if Utils.Debug: Utils.Print("Failed to find producers.") + return None + + return producerMatches + + def getProducers(self): + return Node.parseProducers(self.nodeId) \ No newline at end of file diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 06ae59646a9..9baa3b1a7e5 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -207,7 +207,7 @@ def getMinHeadAndLib(prodNodes): producers=[] for i in range(0, totalNodes): node=cluster.getNode(i) - node.producers=Cluster.parseProducers(i) + node.producers=node.getProducers() numProducers=len(node.producers) Print("node has producers=%s" % (node.producers)) if numProducers==0: diff --git a/tests/nodeos_high_transaction_test.py b/tests/nodeos_high_transaction_test.py index 6108af9ffa0..8107e973ce5 100755 --- a/tests/nodeos_high_transaction_test.py +++ b/tests/nodeos_high_transaction_test.py @@ -116,7 +116,7 @@ allNodes=cluster.getNodes() for i in range(0, totalNodes): node=allNodes[i] - nodeProducers=Cluster.parseProducers(i) + nodeProducers=node.getProducers() numProducers=len(nodeProducers) Print("node has producers=%s" % (nodeProducers)) if numProducers==0: diff --git a/tests/nodeos_short_fork_take_over_test.py b/tests/nodeos_short_fork_take_over_test.py index 29aa223aee2..f09b860fb74 100755 --- a/tests/nodeos_short_fork_take_over_test.py +++ b/tests/nodeos_short_fork_take_over_test.py @@ -170,7 +170,7 @@ def getMinHeadAndLib(prodNodes): producers=[] for i in range(0, totalNodes): node=cluster.getNode(i) - node.producers=Cluster.parseProducers(i) + node.producers=node.getProducers() numProducers=len(node.producers) Print("node has producers=%s" % (node.producers)) if numProducers==0: diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index a3c157e8027..ae951ef096b 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -202,7 +202,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): for i in range(0, totalNodes): node=cluster.getNode(i) - node.producers=Cluster.parseProducers(i) + node.producers=node.getProducers() for prod in node.producers: trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True) From 21ca57efc64c30a630d763777a4f9c3e012a5433 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 14 Apr 2021 15:04:32 -0500 Subject: [PATCH 2/8] Added more senarios to test. --- tests/privacy_simple_network.py | 135 +++++++++++++++++++++++--------- 1 file changed, 97 insertions(+), 38 deletions(-) diff --git a/tests/privacy_simple_network.py b/tests/privacy_simple_network.py index b55b5f96b2c..1295d5a7e66 100755 --- a/tests/privacy_simple_network.py +++ b/tests/privacy_simple_network.py @@ -80,7 +80,6 @@ Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) - Utils.Print("\n\n\n\n\nCheck after KILL:") Utils.Print("\n\n\n\n\nNext Round of Info:") cluster.reportInfo() @@ -88,29 +87,33 @@ apiNodes = [cluster.getNode(x) for x in range(pnodes, totalNodes)] apiNodes.append(cluster.biosNode) - blockProducer = producers[0].getHeadOrLib()["producer"] + featureProdNum = 0 + blockProducer = producers[featureProdNum].getHeadOrLib()["producer"] + while blockProducer not in producers[featureProdNum].getProducers(): + featureProdNum += 1 + assert featureProdNum < pnodes, "Checked nodes {} through {} but could not find producer: {}".format(0, featureProdNum - 1, blockProducer) cluster.verifyInSync() - featureDict = producers[0].getSupportedProtocolFeatureDict() + featureDict = producers[featureProdNum].getSupportedProtocolFeatureDict() Utils.Print("feature dict: {}".format(json.dumps(featureDict, indent=4, sort_keys=True))) cluster.reportInfo() Utils.Print("Activating SECURITY_GROUP Feature") - #Utils.Print("act feature dict: {}".format(json.dumps(producers[0].getActivatedProtocolFeatures(), indent=4, sort_keys=True))) + Utils.Print("act feature dict: {}".format(json.dumps(producers[featureProdNum].getActivatedProtocolFeatures(), indent=4, sort_keys=True))) timeout = ( pnodes * 12 / 2 ) * 2 # (number of producers * blocks produced / 0.5 blocks per second) * 2 rounds - for producer in producers: - producers[0].waitUntilBeginningOfProdTurn(blockProducer, timeout=timeout) + for tryNum in range(3): # try 3 times to set the security group feature + producers[featureProdNum].waitUntilBeginningOfProdTurn(blockProducer, timeout=timeout) feature = "SECURITY_GROUP" - producers[0].activateFeatures([feature]) - if producers[0].containsFeatures([feature]): + producers[featureProdNum].activateFeatures([feature]) + if producers[featureProdNum].containsFeatures([feature]): break Utils.Print("SECURITY_GROUP Feature activated") cluster.reportInfo() - assert producers[0].containsFeatures([feature]), "{} feature was not activated".format(feature) + assert producers[featureProdNum].containsFeatures([feature]), "{} feature was not activated".format(feature) def publishContract(account, file, waitForTransBlock=False): Print("Publish contract") @@ -122,19 +125,32 @@ def publishContract(account, file, waitForTransBlock=False): participants = [x for x in producers] nonParticipants = [x for x in apiNodes] - def security_group(nodeNums): - action = None - for nodeNum in nodeNums: - if action is None: - action = '[[' - else: - action += ',' - action += '"{}"'.format(Node.participantName(nodeNum)) - action += ']]' - - Utils.Print("adding {} to the security group".format(action)) - trans = producers[0].pushMessage(cluster.eosioAccount.name, "add", action, "--permission eosio@active") - Utils.Print("add trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) + def security_group(addNodeNums=[], removeNodeNums=[]): + def createAction(nodeNums): + action = None + for nodeNum in nodeNums: + if action is None: + action = '[[' + else: + action += ',' + action += '"{}"'.format(Node.participantName(nodeNum)) + if action: + action += ']]' + return action + + addAction = createAction(addNodeNums) + removeAction = createAction(removeNodeNums) + + if addAction: + Utils.Print("adding {} to the security group".format(addAction)) + trans = producers[0].pushMessage(cluster.eosioAccount.name, "add", addAction, "--permission eosio@active") + Utils.Print("add trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) + + if removeAction: + Utils.Print("removing {} from the security group".format(removeAction)) + trans = producers[0].pushMessage(cluster.eosioAccount.name, "remove", removeAction, "--permission eosio@active") + Utils.Print("remove trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) + trans = producers[0].pushMessage(cluster.eosioAccount.name, "publish", "[0]", "--permission eosio@active") Utils.Print("publish action trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) return trans @@ -160,33 +176,76 @@ def verifyNonParticipants(transId): nonParticipantHead = nonParticipant.getBlockNum() assert nonParticipantHead < producerHead, "Participants (that are not producers themselves) should not advance head to {}, but it has advanced to {}".format(producerHead, nonParticipantHead) + def verifySecurityGroup(publishTransPair): + publishTransId = Node.getTransId(publishTransPair[1]) + verifyParticipantsTransactionFinalized(publishTransId) + verifyNonParticipants(publishTransId) + Utils.Print("Add all producers to security group") publishTrans = security_group([x for x in range(pnodes)]) - publishTransId = Node.getTransId(publishTrans[1]) - verifyParticipantsTransactionFinalized(publishTransId) - verifyNonParticipants(publishTransId) + verifySecurityGroup(publishTrans) + + cluster.reportInfo() + + # one by one add each nonParticipant to the security group while len(nonParticipants) > 0: toAdd = nonParticipants[0] participants.append(toAdd) del nonParticipants[0] Utils.Print("Take a non-participant and make a participant. Now there are {} participants and {} non-participants".format(len(participants), len(nonParticipants))) - toAddNum = None - num = 0 - for node in cluster.getNodes(): - if node == toAdd: - toAddNum = num - break - num += 1 - if toAddNum is None: - assert toAdd == cluster.biosNode - toAddNum = totalNodes + toAddNum = cluster.getParticipantNum(toAdd) publishTrans = security_group([toAddNum]) - publishTransId = Node.getTransId(publishTrans[1]) - verifyParticipantsTransactionFinalized(publishTransId) - verifyNonParticipants(publishTransId) + verifySecurityGroup(publishTrans) + cluster.reportInfo() + + + # one by one remove each (original) nonParticipant from the security group + while len(participants) > pnodes: + toRemove = participants[-1] + # popping off back of participants and need to push on the front of nonParticipants + nonParticipants.insert(0, toRemove) + del participants[-1] + Utils.Print("Take a participant and make a non-participant. Now there are {} participants and {} non-participants".format(len(participants), len(nonParticipants))) + toRemoveNum = cluster.getParticipantNum(toRemove) + publishTrans = security_group(removeNodeNums=[toRemoveNum]) + verifySecurityGroup(publishTrans) + cluster.reportInfo() + + + # if we have more than 1 api node, we will add and remove all those nodes in bulk, if not it is just a repeat of the above test + if totalNodes > pnodes + 1: + # add all the api nodes to security group at once + toAdd = [] + for apiNode in nonParticipants: + participantNum = cluster.getParticipantNum(apiNode) + toAdd.append(participantNum) + participants.extend(nonParticipants) + nonParticipants = [] + + Utils.Print("Add all api nodes to security group") + publishTrans = security_group(addNodeNums=toAdd) + verifySecurityGroup(publishTrans) + + cluster.reportInfo() + + + # remove all the api nodes from the security group at once + toRemove = [] + # index pnodes and following are moving to nonParticipants, so participants has everything before that + nonParticipants = participants[pnodes:] + participants = participants[:pnodes] + for apiNode in nonParticipants: + participantNum = cluster.getParticipantNum(apiNode) + toRemove.append(participantNum) + + Utils.Print("Remove all api nodes from security group") + publishTrans = security_group(removeNodeNums=toRemove) + verifySecurityGroup(publishTrans) + + cluster.reportInfo() testSuccessful=True finally: From e8a4e3042bc7a3b44d641f008fe334497f443ac8 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 15 Apr 2021 13:17:12 -0500 Subject: [PATCH 3/8] Cleaned up unused parameter. --- tests/Node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 7a1cbc3bc04..9e94dda08db 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1200,7 +1200,7 @@ def getBlockProducerByNum(self, blockNum, timeout=None, waitForBlock=True, exitO block=self.getBlock(blockNum, exitOnError=exitOnError) return Node.getBlockAttribute(block, "producer", blockNum, exitOnError=exitOnError) - def getBlockProducer(self, timeout=None, waitForBlock=True, exitOnError=True, blockType=BlockType.head): + def getBlockProducer(self, timeout=None, exitOnError=True, blockType=BlockType.head): blockNum=self.getBlockNum(blockType=blockType) block=self.getBlock(blockNum, exitOnError=exitOnError, blockType=blockType) return Node.getBlockAttribute(block, "producer", blockNum, exitOnError=exitOnError) From 23f8510dfed89f0d205fe4106587c1a609f9527d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 15 Apr 2021 13:40:51 -0500 Subject: [PATCH 4/8] Added method for identifying the node for the provided producer. --- tests/Cluster.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/Cluster.py b/tests/Cluster.py index 8e0f6a07e9b..0536f1b057f 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -1870,3 +1870,14 @@ def getParticipantNum(self, nodeToIdentify): num += 1 assert nodeToIdentify == self.biosNode return self.totalNodes + + def getProducingNodeIndex(self, blockProducer): + featureProdNum = 0 + while featureProdNum < pnodes: + if blockProducer in self.nodes[featureProdNum].getProducers(): + return featureProdNum + + featureProdNum += 1 + + assert blockProducer in self.biosNode.getProducers(), "Checked all nodes but could not find producer: {}".format(blockProducer) + return "bios" From 27433a9696ab567b7653387a34ba64323a4b7e68 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 16 Apr 2021 10:43:52 -0500 Subject: [PATCH 5/8] Improvement for activating features and allowing more control for block header state methods. --- tests/Node.py | 71 ++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 53 insertions(+), 18 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 9e94dda08db..dc716e1b368 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1453,7 +1453,8 @@ def isDesiredProdTurn(): return beginningOfProdTurnHead # Require producer_api_plugin - def activateFeatures(self, features): + def activateFeatures(self, features, blocksToAdvance=2): + assert blocksToAdvance >= 0 featureDigests = [] for feature in features: protocolFeatureDigestDict = self.getSupportedProtocolFeatureDict() @@ -1465,16 +1466,44 @@ def activateFeatures(self, features): self.scheduleProtocolFeatureActivations(featureDigests) # Wait for the next block to be produced so the scheduled protocol feature is activated - assert self.waitForHeadToAdvance(blocksToAdvance=2), print("ERROR: TIMEOUT WAITING FOR activating features: {}".format(",".join(features))) + assert self.waitForHeadToAdvance(blocksToAdvance=blocksToAdvance), print("ERROR: TIMEOUT WAITING FOR activating features: {}".format(",".join(features))) + + def activateAndVerifyFeatures(self, features): + self.activateFeatures(features, blocksToAdvance=0) + headBlockNum = self.getBlockNum() + blockNum = headBlockNum + producers = {} + lastProducer = None + while True: + block = self.getBlock(blockNum) + blockHeaderState = self.getBlockHeaderState(blockNum) + if self.containsFeatures(features, blockHeaderState): + return + + producer = block["producer"] + producers[producer] += 1 + assert lastProducer != producer or producers[producer] == 1, \ + "We have already cycled through a complete cycle, so feature should have been set by now. \ + Initial block num: {}, looking at block num: {}".format(headBlockNum, blockNum) + + # feature should be in block for this node's producers, if it is at least 2 blocks after we sent the activate + minBlocksForGuarantee = 2 + assert producer not in self.getProducers() or blockNum - headBlockNum < minBlocksForGuarantee, \ + "It is {} blocks past the block when we activated the features and block num: {} was produced by this \ + node, so features should have been set." + self.waitForBlock(blockNum + 1) + blockNum = self.getBlockNum() + + # Require producer_api_plugin def activatePreactivateFeature(self): return self.activateFeatures(["PREACTIVATE_FEATURE"]) - def containsFeatures(self, features): + def containsFeatures(self, features, blockHeaderState=None): protocolFeatureDict = self.getSupportedProtocolFeatureDict() - blockHeaderState = self.getLatestBlockHeaderState() - assert blockHeaderState, "blockHeaderState should not be empty" + if blockHeaderState is None: + blockHeaderState = self.getLatestBlockHeaderState() for feature in features: featureDigest = protocolFeatureDict[feature]["feature_digest"] assert featureDigest, "{}'s Digest should not be empty".format(feature) @@ -1520,20 +1549,26 @@ def preactivateAllBuiltinProtocolFeature(self): def getLatestBlockHeaderState(self): headBlockNum = self.getHeadBlockNum() - for i in range(10): - cmdDesc = "get block {} --header-state".format(headBlockNum) - latestBlockHeaderState = self.processCleosCmd(cmdDesc, cmdDesc) - Utils.Print("block num: {}, block state: {}, head: {}".format(headBlockNum, latestBlockHeaderState, self.getHeadBlockNum())) - if latestBlockHeaderState: - return latestBlockHeaderState - time.sleep(1) - return None - - def getActivatedProtocolFeatures(self): - latestBlockHeaderState = self.getLatestBlockHeaderState() - if "activated_protocol_features" not in latestBlockHeaderState or "protocol_features" not in latestBlockHeaderState["activated_protocol_features"]: + return self.getBlockHeaderState(headBlockNum) + + def getBlockHeaderState(self, blockNum, errorOnNone=True): + cmdDesc = "get block {} --header-state".format(blockNum) + blockHeaderState = self.processCleosCmd(cmdDesc, cmdDesc) + if blockHeaderState is None and errorOnNone: + info = self.getInfo() + lib = info["last_irreversible_block_num"] + head = info["head_block_num"] + assert head == lib + 1, "getLatestBlockHeaderState failed to retrieve the latest block. This should be investigated." + Utils.errorExit("Called getLatestBlockHeaderState, which can only retrieve blocks in reversible database, but the test setup only has one producer so there" + + " is only 1 block in the reversible database. Test should be redesigned to aquire this information via another interface.") + return blockHeaderState + + def getActivatedProtocolFeatures(self, blockHeaderState=None): + if blockHeaderState is None: + blockHeaderState = self.getLatestBlockHeaderState() + if "activated_protocol_features" not in blockHeaderState or "protocol_features" not in blockHeaderState["activated_protocol_features"]: Utils.errorExit("getLatestBlockHeaderState did not return expected output, should contain [\"activated_protocol_features\"][\"protocol_features\"]: {}".format(latestBlockHeaderState)) - return latestBlockHeaderState["activated_protocol_features"]["protocol_features"] + return blockHeaderState["activated_protocol_features"]["protocol_features"] def modifyBuiltinPFSubjRestrictions(self, featureCodename, subjectiveRestriction={}): jsonPath = os.path.join(Utils.getNodeConfigDir(self.nodeId), From b9d27b8a0407e9dd8afc40fb9595227314190431 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 16 Apr 2021 11:32:40 -0500 Subject: [PATCH 6/8] Added scenarios of adding and removing participants and covers scenario #2. --- tests/privacy_simple_network.py | 107 ++++++++++++++++++++------------ 1 file changed, 69 insertions(+), 38 deletions(-) diff --git a/tests/privacy_simple_network.py b/tests/privacy_simple_network.py index 1295d5a7e66..72c0c0b4fab 100755 --- a/tests/privacy_simple_network.py +++ b/tests/privacy_simple_network.py @@ -87,33 +87,16 @@ apiNodes = [cluster.getNode(x) for x in range(pnodes, totalNodes)] apiNodes.append(cluster.biosNode) - featureProdNum = 0 - blockProducer = producers[featureProdNum].getHeadOrLib()["producer"] - while blockProducer not in producers[featureProdNum].getProducers(): - featureProdNum += 1 - assert featureProdNum < pnodes, "Checked nodes {} through {} but could not find producer: {}".format(0, featureProdNum - 1, blockProducer) - + feature = "SECURITY_GROUP" + Utils.Print("Activating {} Feature".format(feature)) + producers[0].activateAndVerifyFeatures({feature}) cluster.verifyInSync() - featureDict = producers[featureProdNum].getSupportedProtocolFeatureDict() + featureDict = producers[0].getSupportedProtocolFeatureDict() Utils.Print("feature dict: {}".format(json.dumps(featureDict, indent=4, sort_keys=True))) + Utils.Print("{} Feature activated".format(feature)) cluster.reportInfo() - Utils.Print("Activating SECURITY_GROUP Feature") - - Utils.Print("act feature dict: {}".format(json.dumps(producers[featureProdNum].getActivatedProtocolFeatures(), indent=4, sort_keys=True))) - timeout = ( pnodes * 12 / 2 ) * 2 # (number of producers * blocks produced / 0.5 blocks per second) * 2 rounds - for tryNum in range(3): # try 3 times to set the security group feature - producers[featureProdNum].waitUntilBeginningOfProdTurn(blockProducer, timeout=timeout) - feature = "SECURITY_GROUP" - producers[featureProdNum].activateFeatures([feature]) - if producers[featureProdNum].containsFeatures([feature]): - break - - Utils.Print("SECURITY_GROUP Feature activated") - cluster.reportInfo() - - assert producers[featureProdNum].containsFeatures([feature]), "{} feature was not activated".format(feature) def publishContract(account, file, waitForTransBlock=False): Print("Publish contract") @@ -125,6 +108,8 @@ def publishContract(account, file, waitForTransBlock=False): participants = [x for x in producers] nonParticipants = [x for x in apiNodes] + # this is passed to limit the number of add/remove table entries are processed, but using it here to keep from getting duplicate transactions + publishProcessNum = 20 def security_group(addNodeNums=[], removeNodeNums=[]): def createAction(nodeNums): action = None @@ -151,7 +136,9 @@ def createAction(nodeNums): trans = producers[0].pushMessage(cluster.eosioAccount.name, "remove", removeAction, "--permission eosio@active") Utils.Print("remove trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) - trans = producers[0].pushMessage(cluster.eosioAccount.name, "publish", "[0]", "--permission eosio@active") + global publishProcessNum + publishProcessNum += 1 + trans = producers[0].pushMessage(cluster.eosioAccount.name, "publish", "[{}]".format(publishProcessNum), "--permission eosio@active") Utils.Print("publish action trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) return trans @@ -181,6 +168,31 @@ def verifySecurityGroup(publishTransPair): verifyParticipantsTransactionFinalized(publishTransId) verifyNonParticipants(publishTransId) + def moveToParticipants(): + movedNode = nonParticipants[0] + participants.append(movedNode) + del nonParticipants[0] + return movedNode + + def moveToNonParticipants(): + movedNode = participants[-1] + # popping off back of participants and need to push on the front of nonParticipants + nonParticipants.insert(0, movedNode) + del participants[-1] + return movedNode + + def addToSg(): + node = moveToParticipants() + Utils.Print("Take a non-participant and make a participant. Now there are {} participants and {} non-participants".format(len(participants), len(nonParticipants))) + toAddNum = cluster.getParticipantNum(node) + return security_group([toAddNum]) + + def remFromSg(): + node = moveToNonParticipants() + Utils.Print("Take a participant and make a non-participant. Now there are {} participants and {} non-participants".format(len(participants), len(nonParticipants))) + toRemoveNum = cluster.getParticipantNum(node) + return security_group(removeNodeNums=[toRemoveNum]) + Utils.Print("Add all producers to security group") publishTrans = security_group([x for x in range(pnodes)]) verifySecurityGroup(publishTrans) @@ -190,27 +202,14 @@ def verifySecurityGroup(publishTransPair): # one by one add each nonParticipant to the security group while len(nonParticipants) > 0: - toAdd = nonParticipants[0] - participants.append(toAdd) - del nonParticipants[0] - Utils.Print("Take a non-participant and make a participant. Now there are {} participants and {} non-participants".format(len(participants), len(nonParticipants))) - - toAddNum = cluster.getParticipantNum(toAdd) - publishTrans = security_group([toAddNum]) + publishTrans = addToSg() verifySecurityGroup(publishTrans) cluster.reportInfo() # one by one remove each (original) nonParticipant from the security group while len(participants) > pnodes: - toRemove = participants[-1] - # popping off back of participants and need to push on the front of nonParticipants - nonParticipants.insert(0, toRemove) - del participants[-1] - Utils.Print("Take a participant and make a non-participant. Now there are {} participants and {} non-participants".format(len(participants), len(nonParticipants))) - - toRemoveNum = cluster.getParticipantNum(toRemove) - publishTrans = security_group(removeNodeNums=[toRemoveNum]) + publishTrans = remFromSg() verifySecurityGroup(publishTrans) cluster.reportInfo() @@ -232,6 +231,38 @@ def verifySecurityGroup(publishTransPair): cluster.reportInfo() + # alternate adding/removing participants to ensure the security group doesn't change + initialBlockNum = None + blockNum = None + def is_done(): + # want to ensure that we can identify the range of libs the security group was changed in + return blockNum - initialBlockNum > 12 + + done = False + # keep adding and removing nodes till we are done + while not done: + if blockNum: + participants[0].waitForNextBlock() + + while not done and len(participants) > pnodes: + publishTrans = remFromSg() + Utils.Print("publishTrans: {}".format(json.dumps(publishTrans, indent=2))) + blockNum = Node.getTransBlockNum(publishTrans[1]) + if initialBlockNum is None: + initialBlockNum = blockNum + lastBlockNum = blockNum + done = is_done() + + while not done and len(nonParticipants) > 0: + publishTrans = addToSg() + blockNum = Node.getTransBlockNum(publishTrans[1]) + done = is_done() + + Utils.Print("First adjustment to security group was in block num: {}, verifying no changes till block num: {} is finalized".format(initialBlockNum, blockNum)) + verifySecurityGroup(publishTrans) + + cluster.reportInfo() + # remove all the api nodes from the security group at once toRemove = [] # index pnodes and following are moving to nonParticipants, so participants has everything before that From e2182ce595bdd1970f8a70e55f26da9bc40796e9 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 16 Apr 2021 11:45:25 -0500 Subject: [PATCH 7/8] Adding more non-producing nodes for the test. --- tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e7fc99911c4..7e673e289f6 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -119,7 +119,7 @@ add_test(NAME eosio_blocklog_prune_test COMMAND tests/eosio_blocklog_prune_test. set_property(TEST eosio_blocklog_prune_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME privacy_startup_network COMMAND tests/privacy_startup_network.py -p 1 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST privacy_startup_network PROPERTY LABELS nonparallelizable_tests) -add_test(NAME privacy_simple_network COMMAND tests/privacy_simple_network.py -p 2 -n 3 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME privacy_simple_network COMMAND tests/privacy_simple_network.py -p 2 -n 4 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST privacy_simple_network PROPERTY LABELS nonparallelizable_tests) # Long running tests From fafcfaa644460b6d705018940e59a6fca28b30a4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 16 Apr 2021 13:15:35 -0500 Subject: [PATCH 8/8] Making logic more straightforward. --- tests/privacy_simple_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/privacy_simple_network.py b/tests/privacy_simple_network.py index 72c0c0b4fab..9a41d9d8178 100755 --- a/tests/privacy_simple_network.py +++ b/tests/privacy_simple_network.py @@ -215,7 +215,7 @@ def remFromSg(): # if we have more than 1 api node, we will add and remove all those nodes in bulk, if not it is just a repeat of the above test - if totalNodes > pnodes + 1: + if len(apiNodes) > 1: # add all the api nodes to security group at once toAdd = [] for apiNode in nonParticipants: