From 4e6b9d4be60313beff9a5b7590002e14bff4af0a Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Mon, 6 Jun 2022 21:03:09 -0400 Subject: [PATCH] Remove contibuted `pbench-bzt` and `pbench-mpt` The first script, `pbench-bzt` is not really a Pbench Agent benchmark workload convenience script, it is a wrapper around the "bzt" workload. So it really doesn't belong here. The second script, `pbench-mpt` is woefully out of date and unmaintained. --- contrib/agent/bench-scripts/pbench-bzt | 229 ------------------------- contrib/agent/bench-scripts/pbench-mpt | 224 ------------------------ 2 files changed, 453 deletions(-) delete mode 100755 contrib/agent/bench-scripts/pbench-bzt delete mode 100755 contrib/agent/bench-scripts/pbench-mpt diff --git a/contrib/agent/bench-scripts/pbench-bzt b/contrib/agent/bench-scripts/pbench-bzt deleted file mode 100755 index 8626e87664..0000000000 --- a/contrib/agent/bench-scripts/pbench-bzt +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/env python2 - -""" -This script generates a bzt taurus json file and executes a performance test based on it. -It is a wrapper for the bzt tool. - -By default, it uses the jmeter executor with the simple scenario set up [1]. -It is required to have a file containing the URL's of the websites to test. - -cat urls_to_test.lst -pod1.cloudapps.example.com -pod2.cloudapps.example.com -pod3.cloudapps.example.comz - -The simplest way of running it would then be be: -./pbench_bzt -u urls_to_test.lst - -For HTTP/TLS testing, use: -./pbench_bzt -u urls_to_test.lst -p https:// - -It outputs a post-test summary using the final stats module [2] -This is the simplest reporter that prints out basic KPIs in the console log after test execution. - -pbench_bzt uses the following load profiles for the jmeter executor: - concurrency - number of target concurrent virtual users - ramp-up - ramp-up time to reach target concurrency - hold-for - time to hold target concurrency - -These are customizable using the -c, -r and -d command line arguments. - -By default, pbench_bzt outputs a taurus.csv and a bzt.json file under the /tmp directory with the test results -and scenario data, respectively. This can be changed using the -f and -o arguments. - - -[1] "http://gettaurus.org/docs/ExecutionSettings/" -[2] "http://gettaurus.org/docs/Reporting/" - -""" - -import argparse -import subprocess -import os -import sys -import platform -import json - - -class OSType: - """ - Detects Red Hat / CentOS / Fedora distributions - """ - - def __getattr__(self, attr): - if attr == "fedora": - return "fedora" - elif attr == "rhel": - return "redhat" - elif attr == "unknown": - return "unknown" - else: - raise AttributeError(attr) - - def platform_type(self): - if platform.dist()[0] == self.rhel: - return self.rhel - elif platform.dist()[0] == self.fedora: - return self.fedora - else: - return self.unknown - - def query_os(self): - if platform.system() == "Linux": - return self.platform_type() - - -class BztConfigWriter: - """ - Writes a template json config file for testing endpoints with taurus bzt - """ - - def __init__(self, args): - self.args = args - self.bzt_conf = dict() - self.bzt_conf["execution"] = [] - self.bzt_conf["reporting"] = [] - self.bzt_conf["scenarios"] = {} - - def create_scenario_file(self): - if self.args.scenario == "simple": - count = 1 - with open(self.args.url_file) as f: - for line in f: - scenario_nbr = self.args.scenario + str(count) - self.bzt_conf["execution"].append( - { - "concurrency": self.args.concurrency, - "hold-for": self.args.hold_for, - "ramp-up": self.args.ramp_up, - "scenario": scenario_nbr, - } - ) - self.bzt_conf["scenarios"][scenario_nbr] = { - "requests": [self.args.prefix + line.strip()] - } - count += 1 - - self.bzt_conf["modules"] = { - "blazemeter": {"browser-open": False, "test": self.args.test_name}, - "console": {"disable": True}, - } - self.bzt_conf["reporting"].append( - {"module": "final_stats", "dump-csv": self.args.stats_file} - ) - self.write_json_file(self.args.out_json_file) - - def write_json_file(self, f): - with open(f, mode="w") as fd: - json.dump(self.bzt_conf, fd, sort_keys=True, indent=2) - - -def which(cmd): - """ - Basic function that mimics the linux `which` cmd - :param cmd: executable to look up for - :return: - """ - - def is_executable(path): - return os.path.isfile(path) and os.access(path, os.X_OK) - - path, name = os.path.split(cmd) - if path: - if is_executable(cmd): - return cmd - else: - for path in os.environ["PATH"].split(os.pathsep): - path = path.strip('"') - cmdpath = os.path.join(path, cmd) - if is_executable(cmdpath): - return cmdpath - return None - - -def fingerprint(): - """ - Detects OS type/distro - TODO: install dependencies based on the result - :return: - """ - ost = OSType() - if "fedora" == ost.query_os(): - print(ost.query_os()) - if "redhat" == ost.query_os(): - print(ost.query_os()) - - -def parser(): - parser_obj = argparse.ArgumentParser( - description="This script generates a taurus scenario file and, based on \ - that file, executes a bzt jmeter performance test.\ - E.g.: pbench_bzt.py -u jmeter_urls/4_http_urls" - ) - - parser_obj.add_argument( - "-u", "--url_file", action="store", dest="url_file", type=str, required=True - ) - parser_obj.add_argument( - "-c", "--concurrency", action="store", dest="concurrency", type=str, default=10 - ) - parser_obj.add_argument( - "-r", "--ramp-up", action="store", dest="ramp_up", type=str, default="10s" - ) - parser_obj.add_argument( - "-d", "--hold_for", action="store", dest="hold_for", type=str, default="1m" - ) - parser_obj.add_argument( - "-s", "--scenario", action="store", dest="scenario", type=str, default="simple" - ) - parser_obj.add_argument( - "-o", - "--out_json_file", - action="store", - dest="out_json_file", - type=str, - default="/tmp/bzt.json", - ) - parser_obj.add_argument( - "-n", "--test_name", action="store", dest="test_name", type=str, default="OSE" - ) - parser_obj.add_argument( - "-p", "--prefix", action="store", dest="prefix", type=str, default="http://" - ) - parser_obj.add_argument( - "-f", - "--stats_file", - action="store", - dest="stats_file", - type=str, - default="/tmp/taurus.csv", - ) - parser_obj.add_argument( - "-P", "--program", action="store", dest="program", default="bzt" - ) - - return parser_obj.parse_args() - - -if __name__ == "__main__": - fingerprint() - opt_args = parser() - - bzt = BztConfigWriter(opt_args) - bzt.create_scenario_file() - - executable = which(opt_args.program) - if executable is not None: - try: - # check_call: blocking code - subprocess.check_call([executable, opt_args.out_json_file]) - except subprocess.CalledProcessError: - print( - "%s has returned in error: %s" - % (opt_args.program, subprocess.CalledProcessError.message) - ) - else: - sys.exit( - "Either %s is not installed, or the current user does not have permission to execute it.\n" - "http://gettaurus.org/docs/Installation/" % opt_args.program - ) diff --git a/contrib/agent/bench-scripts/pbench-mpt b/contrib/agent/bench-scripts/pbench-mpt deleted file mode 100755 index 4d017dfb41..0000000000 --- a/contrib/agent/bench-scripts/pbench-mpt +++ /dev/null @@ -1,224 +0,0 @@ -#!/bin/bash - -app_path=`dirname $0` - -script_path=`dirname $0` -script_name=`basename $0` -pbench_bin="`cd ${script_path}/..; /bin/pwd`" - -# source the base script -. "$pbench_bin"/base - -benchmark_rpm="msg-perf-tool" -benchmark="mpt" -ver=0.1.1 - -#Defaults -export benchmark_run_dir="" -broker_url=amqp://localhost:5672/test.performance.queue - -# TODO: find the appropriate balance here. Ideally, messaging-performance -# tests should be able to determine the sustainable performance. Too short -# runs may not be able to hit performance bottlenecks on the broker. Too long -# runs may exhaust resources, not be feasible, etc. -duration=10 -message_sizes=32,256 - -# TODO: possibly a bit too conservative ... -parallel_instances=1,5 - -# TODO: this should be determined automatically -throttle=0 - -install_only=n - -function mpt_usage() { - printf "The following options are available:\n" - printf "\n" - printf -- "\t-h --help\n" - printf "\t\tdisplay this help message\n" - printf -- "\t--install\n" - printf "\t\tinstall only\n" - printf -- "\t-b str --broker-url=str (default is $broker_url)\n" - printf "\t\tURL to access the broker (ie: amqp://hostname:5672/queue.name or stomp://hostname:5672/queue.name)\n" - printf -- "\t-d int --duration=int (default is $duration)\n" - printf "\t\tduration of the performance test in minutes\n" - printf -- "\t-s int[,int] --message-sizes=int[,int] (default is $message_sizes)\n" - printf "\t\tmessage sizes in bytes (default is $message_sizes)\n" - printf -- "\t-p int[,int] --parallel-instances=int[,int] (default is $parallel_instances)\n" - printf "\t\tnumber of concurrent instances of the sender and receiver (default is $parallel_instances)\n" - printf -- "\t-T int --throttle=int (default is $throttle)\n" - printf "\t\tmaximum number of mesages (per second) allowed per sender connection (default is $throttle, 0 means unlimited)\n" -} - -function mpt_process_options() { - ARGS=$(getopt -o hib:d:s:p:T: --longoptions "help,install,broker:,duration:,message-sizes:,parallel-instances:,throttle:" -n "$0" -- "$@"); - eval set -- "$ARGS"; - while true; do - case "$1" in - -i|--install) - shift - export install_only="y" - ;; - -b|--broker) - shift - export broker_url="$1" - shift - ;; - -d|--duration) - shift - export duration="$1" - shift - ;; - -s|--message-sizes) - shift - export message_sizes="$1" - shift - ;; - -p|--parallel-instances) - shift - export parallel_instances="$1" - shift - ;; - -t|--throttle) - shift - export throttle="$1" - shift - ;; - -h|--help) - shift - mpt_usage - exit 0 - ;; - --) - shift - break - ;; - esac - done - - benchmark_run_dir="${pbench_run}/${benchmark}_${config}_${date_suffix}" -} - -function mpt_check_options() { - if [ ${install_only} == "y" ] ; then - return - fi - - if [[ -z "${broker_url}" ]] ; then - echo -e "Broker is a required option (-b)\n" - - mpt_usage - exit 1 - fi - - if [[ -z "${duration}" ]] ; then - echo -e "Either the test duration or the message count should be informed (-d or -c)\n" - mpt_usage - exit 1 - fi - - if [[ -z "${parallel_instances}" ]] ; then - echo -e "Parallel instances is a required option (-p)\n" - mpt_usage - exit 1 - fi - - echo "Broker URL: ${broker_url}" -} - -function mpt_stop_test() { - echo "Stopping the test" - - killall -INT mpt-receiver - killall -INT mpt-sender - - killall -TERM mpt-receiver || echo "" - killall -TERM mpt-sender || echo "" - - exit -} - -function mpt_run_by_duration() { - local message_size=$1 - local instance_count=$2 - local log_dir=$3 - - - echo "Lauching the receiver" - export pid_receiver=`mpt-receiver -b ${broker_url} --log-level=STAT --duration=${duration} -p ${instance_count} --logdir=${log_dir} -s ${message_size} --daemon` - - echo "Lauching the sender" - export pid_sender=`mpt-sender -b ${broker_url} -t ${throttle} --log-level=STAT --duration ${duration} -p ${instance_count} --logdir=${log_dir} -s ${message_size} --daemon` - - # Sleeps for a little longer than the test duration so that it gives some time - # for the program to finish and flush data - - echo "Sleeping for ${duration}m15s" - sleep 15s "${duration}m" -} - -function mpt_run_test() { - # Explanation: we trap the exit/kill/termination of the script, so that if the - # test is aborted, we terminate the daeamons - trap mpt_stop_test SIGINT SIGTERM SIGQUIT - - echo "Saving mpt test results to ${benchmark_run_dir}" - - test_start_time=$(date '+%Y-%m-%d %H:%M:%S') - echo "Test start time: ${test_start_time}" - - # TODO: multiple test iterations may require coordination with the broker - # (ie: do we want to ensure the broker is cold by stopping it after every - # test iteration? ) - local count=1 - for parallel_instance in `echo $parallel_instances | sed -e s/,/" "/g`; do - for message_size in `echo $message_sizes | sed -e s/,/" "/g`; do - it_start_time=$(date '+%Y-%m-%d %H:%M:%S') - echo "Iteration (p${parallel_instance} ${message_size}KiB) start time: ${it_start_time}" - - iteration="${count}-p${parallel_instance}-${message_size}KiB" - iteration_dir=$benchmark_run_dir/$iteration - mkdir -p ${iteration_dir} - mpt_run_by_duration ${message_size} ${parallel_instance} ${iteration_dir} - - it_end_time=$(date '+%Y-%m-%d %H:%M:%S') - echo "Iteration (p${parallel_instance} ${message_size}KiB) end time: ${it_end_time}" - done - done - - test_end_time=$(date '+%Y-%m-%d %H:%M:%S') - echo "Test end time: ${test_end_time}" -} - - -# Ensure the right version of the benchmark is installed -function mpt_install() { - # TODO: since msg-perf-tool is installed via COPR, check what is the - # recommended approach - - #check_enable_copr orpiske litestomp - #check_enable_copr orpiske msg-perf-tool - - if check_install_rpm "${benchmark_rpm}" "${ver}"; then - debug_log "[${script_name}]${benchmark_rpm}-${ver} is installed" - else - error_log "[${script_name}]${benchmark_rpm}-${ver} is not installed, exiting" - exit 1 - fi - - # TODO:support clients? remote testing? -} - -mpt_process_options "$@" -mpt_check_options - -echo "Installing the packages" -mpt_install - -if [ ${install_only} == "y" ] ; then - exit 0 -fi - -echo "Running the test" -mpt_run_test