Skip to content

Commit

Permalink
Merge pull request #952 from AntelopeIO/gh_48
Browse files Browse the repository at this point in the history
TestHarness: Remove hardcoded version checks to support Leap v3.1 and below.
  • Loading branch information
greg7mdp authored Oct 21, 2024
2 parents 85c39bc + 943a29f commit 35ee346
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 69 deletions.
33 changes: 17 additions & 16 deletions tests/PerformanceHarness/performance_test_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,9 +147,6 @@ def configureApiNodes():
if self.apiNodeCount > 0:
configureApiNodes()

self.writeTrx = lambda trxDataFile, blockNum, trx:[ trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['block_time']},{trx['cpu_usage_us']},{trx['net_usage_words']},{trx['actions']}\n") ]
self.createBlockData = lambda block, blockTransactionTotal, blockNetTotal, blockCpuTotal: blockData(blockId=block["payload"]["id"], blockNum=block['payload']['number'], transactions=blockTransactionTotal, net=blockNetTotal, cpu=blockCpuTotal, producer=block["payload"]["producer"], status=block["payload"]["status"], _timestamp=block["payload"]["timestamp"])
self.updateTrxDict = lambda blockNum, transaction, trxDict: trxDict.update(dict([(transaction["id"], trxData(blockNum=transaction["block_num"], cpuUsageUs=transaction["cpu_usage_us"], netUsageUs=transaction["net_usage_words"], blockTime=transaction["block_time"]))]))
@dataclass
class PtbConfig:
targetTps: int=8000
Expand Down Expand Up @@ -304,29 +301,33 @@ def isOnBlockTransaction(self, transaction):
def queryBlockTrxData(self, node, blockDataPath, blockTrxDataPath, startBlockNum, endBlockNum):
for blockNum in range(startBlockNum, endBlockNum + 1):
blockCpuTotal, blockNetTotal, blockTransactionTotal = 0, 0, 0
block = node.fetchBlock(blockNum)
block = node.processUrllibRequest("trace_api", "get_block", {"block_num":blockNum}, silentErrors=False, exitOnError=True)
btdf_append_write = self.fileOpenMode(blockTrxDataPath)
with open(blockTrxDataPath, btdf_append_write) as trxDataFile:
for transaction in block['payload']['transactions']:
if not self.isOnBlockTransaction(transaction):
self.clusterConfig.updateTrxDict(blockNum, transaction, self.data.trxDict)
self.clusterConfig.writeTrx(trxDataFile, blockNum, transaction)
blockCpuTotal += transaction["cpu_usage_us"]
blockNetTotal += transaction["net_usage_words"]
for trx in block['payload']['transactions']:
if not self.isOnBlockTransaction(trx):
trx_data = trxData(blockNum=trx["block_num"], cpuUsageUs=trx["cpu_usage_us"],
netUsageUs=trx["net_usage_words"], blockTime=trx["block_time"])
self.data.trxDict.update(dict([(trx["id"], trx_data)]))
[ trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['block_time']},{trx['cpu_usage_us']},{trx['net_usage_words']},{trx['actions']}\n") ]
blockCpuTotal += trx["cpu_usage_us"]
blockNetTotal += trx["net_usage_words"]
blockTransactionTotal += 1
blockData = self.clusterConfig.createBlockData(block=block, blockTransactionTotal=blockTransactionTotal,
blockNetTotal=blockNetTotal, blockCpuTotal=blockCpuTotal)
self.data.blockList.append(blockData)
self.data.blockDict[str(blockNum)] = blockData
block_data = blockData(blockId=block["payload"]["id"], blockNum=block['payload']['number'],
transactions=blockTransactionTotal, net=blockNetTotal, cpu=blockCpuTotal,
producer=block["payload"]["producer"], status=block["payload"]["status"],
_timestamp=block["payload"]["timestamp"])
self.data.blockList.append(block_data)
self.data.blockDict[str(blockNum)] = block_data
bdf_append_write = self.fileOpenMode(blockDataPath)
with open(blockDataPath, bdf_append_write) as blockDataFile:
blockDataFile.write(f"{blockData.blockNum},{blockData.blockId},{blockData.producer},{blockData.status},{blockData._timestamp}\n")
blockDataFile.write(f"{block_data.blockNum},{block_data.blockId},{block_data.producer},{block_data.status},{block_data._timestamp}\n")

def waitForEmptyBlocks(self, node, numEmptyToWaitOn):
emptyBlocks = 0
while emptyBlocks < numEmptyToWaitOn:
headBlock = node.getHeadBlockNum()
block = node.fetchHeadBlock(node, headBlock)
block = node.processUrllibRequest("chain", "get_block_info", {"block_num":headBlock}, silentErrors=False, exitOnError=True)
node.waitForHeadToAdvance()
if block['payload']['transaction_mroot'] == "0000000000000000000000000000000000000000000000000000000000000000":
emptyBlocks += 1
Expand Down
9 changes: 0 additions & 9 deletions tests/TestHarness/Cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,18 +465,9 @@ def connectGroup(group, producerNodes, bridgeNodes) :
argsArr.append("--shape")
argsArr.append(topo)

if type(specificExtraNodeosArgs) is dict:
for args in specificExtraNodeosArgs.values():
if "--plugin eosio::history_api_plugin" in args:
argsArr.append("--is-nodeos-v2")
break
if signatureProviderForNonProducer:
argsArr.append("--signature-provider")

# Handle common case of specifying no block offset for older versions
if "v2" in self.nodeosVers or "v3" in self.nodeosVers or "v4" in self.nodeosVers:
argsArr = list(map(lambda st: str.replace(st, "--produce-block-offset-ms 0", "--last-block-time-offset-us 0 --last-block-cpu-effort-percent 100"), argsArr))

Cluster.__LauncherCmdArr = argsArr.copy()

launcher = cluster_generator(argsArr)
Expand Down
29 changes: 3 additions & 26 deletions tests/TestHarness/Node.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,29 +79,6 @@ def __init__(self, host, port, nodeId: int, data_dir: Path, config_dir: Path, cm
# if multiple producers configured for a Node, this is the first one
self.producerName=None
self.keys: List[KeyStrings] = field(default_factory=list)
self.configureVersion()

def configureVersion(self):
if 'v2' in self.nodeosVers:
self.fetchTransactionCommand = lambda: "get transaction"
self.fetchTransactionFromTrace = lambda trx: trx['trx']['id']
self.fetchBlock = lambda blockNum: self.processUrllibRequest("chain", "get_block", {"block_num_or_id":blockNum}, silentErrors=False, exitOnError=True)
self.fetchKeyCommand = lambda: "[trx][trx][ref_block_num]"
self.fetchRefBlock = lambda trans: trans["trx"]["trx"]["ref_block_num"]
self.fetchHeadBlock = lambda node, headBlock: node.processUrllibRequest("chain", "get_block", {"block_num_or_id":headBlock}, silentErrors=False, exitOnError=True)
self.cleosLimit = ""

else:
self.fetchTransactionCommand = lambda: "get transaction_trace"
self.fetchTransactionFromTrace = lambda trx: trx['id']
self.fetchBlock = lambda blockNum: self.processUrllibRequest("trace_api", "get_block", {"block_num":blockNum}, silentErrors=False, exitOnError=True)
self.fetchKeyCommand = lambda: "[transaction][transaction_header][ref_block_num]"
self.fetchRefBlock = lambda trans: trans["block_num"]
self.fetchHeadBlock = lambda node, headBlock: node.processUrllibRequest("chain", "get_block_info", {"block_num":headBlock}, silentErrors=False, exitOnError=True)
if 'v3.1' in self.nodeosVers:
self.cleosLimit = ""
else:
self.cleosLimit = "--time-limit 999"

def __str__(self):
return "Host: %s, Port:%d, NodeNum:%s, Pid:%s" % (self.host, self.port, self.nodeId, self.pid)
Expand Down Expand Up @@ -167,11 +144,11 @@ def waitForTransactionInBlock(self, transId, timeout=None, exitOnError=True):
return ret

def checkBlockForTransactions(self, transIds, blockNum):
block = self.fetchBlock(blockNum)
block = self.processUrllibRequest("trace_api", "get_block", {"block_num":blockNum}, silentErrors=False, exitOnError=True)
if block['payload']['transactions']:
for trx in block['payload']['transactions']:
if self.fetchTransactionFromTrace(trx) in transIds:
transIds.pop(self.fetchTransactionFromTrace(trx))
if trx['id'] in transIds:
transIds.pop(trx['id'])
return transIds

def waitForTransactionsInBlockRange(self, transIds, startBlock, endBlock):
Expand Down
15 changes: 1 addition & 14 deletions tests/TestHarness/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,6 @@ def comma_separated(string):
cfg.add_argument('--max-transaction-cpu-usage', type=int, help='the "max-transaction-cpu-usage" value to use in the genesis.json file', default=None)
cfg.add_argument('--logging-level', type=fc_log_level, help='Provide the "level" value to use in the logging.json file')
cfg.add_argument('--logging-level-map', type=json.loads, help='JSON string of a logging level dictionary to use in the logging.json file for specific nodes, matching based on node number. Ex: {"bios":"off","00":"info"}')
cfg.add_argument('--is-nodeos-v2', action='store_true', help='Toggles old nodeos compatibility', default=False)
cfg.add_argument('--signature-provider', action='store_true', help='add signature provider (BLS key pair) for non-producers', default=False)
r = parser.parse_args(args)
if r.launch != 'none' and r.topology_filename:
Expand Down Expand Up @@ -581,19 +580,7 @@ def construct_command_line(self, instance: nodeDefinition):

# Always enable a history query plugin on the bios node
if is_bios:
if self.args.is_nodeos_v2:
a(a(eosdcmd, '--plugin'), 'eosio::history_api_plugin')
a(a(eosdcmd, '--filter-on'), '"*"')
else:
a(a(eosdcmd, '--plugin'), 'eosio::trace_api_plugin')

if 'eosio::history_api_plugin' in eosdcmd and 'eosio::trace_api_plugin' in eosdcmd:
eosdcmd.remove('--trace-no-abis')
eosdcmd.remove('--trace-rpc-abi')
i = eosdcmd.index('eosio::trace_api_plugin')
eosdcmd.pop(i)
i -= 1
eosdcmd.pop(i)
a(a(eosdcmd, '--plugin'), 'eosio::trace_api_plugin')

return eosdcmd

Expand Down
8 changes: 4 additions & 4 deletions tests/TestHarness/queries.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayed
assert(isinstance(transId, str))
exitOnErrorForDelayed=not delayedRetry and exitOnError
timeout=3
cmdDesc=self.fetchTransactionCommand()
cmdDesc="get transaction_trace"
cmd="%s %s" % (cmdDesc, transId)
msg="(transaction id=%s)" % (transId);
for i in range(0,(int(60/timeout) - 1)):
Expand Down Expand Up @@ -295,8 +295,8 @@ def getBlockNumByTransId(self, transId, exitOnError=True, delayedRetry=True, blo
refBlockNum=None
key=""
try:
key = self.fetchKeyCommand()
refBlockNum = self.fetchRefBlock(trans)
key = "[transaction][transaction_header][ref_block_num]"
refBlockNum = trans["block_num"]
refBlockNum=int(refBlockNum)
except (TypeError, ValueError, KeyError) as _:
Utils.Print("transaction%s not found. Transaction: %s" % (key, trans))
Expand Down Expand Up @@ -347,7 +347,7 @@ def getEosAccount(self, name, exitOnError=False, returnType=ReturnType.json):

def getTable(self, contract, scope, table, exitOnError=False):
cmdDesc = "get table"
cmd=f"{cmdDesc} {self.cleosLimit} {contract} {scope} {table}"
cmd=f"{cmdDesc} --time-limit 999 {contract} {scope} {table}"
msg=f"contract={contract}, scope={scope}, table={table}"
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)

Expand Down

0 comments on commit 35ee346

Please sign in to comment.