From eb15f2bf384e85be0c605af9ab9eac9ae96efd55 Mon Sep 17 00:00:00 2001 From: Lars Holmberg Date: Tue, 18 Aug 2020 09:18:53 +0200 Subject: [PATCH] Renamed hatch rate to spawn rate. Fixes #1405 --- docs/changelog.rst | 9 ++ docs/configuration.rst | 2 +- docs/generating-custom-load-shape.rst | 8 +- docs/running-locust-distributed.rst | 6 +- docs/running-locust-without-web-ui.rst | 4 +- docs/use-as-lib.rst | 2 +- examples/custom_shape/stages.py | 18 +-- examples/custom_shape/step_load.py | 6 +- examples/semaphore_wait.py | 4 +- examples/use_as_lib.py | 2 +- locust/argument_parser.py | 14 +- locust/event.py | 4 +- locust/main.py | 21 +-- locust/runners.py | 170 ++++++++++++------------- locust/shape.py | 2 +- locust/static/locust.js | 4 +- locust/static/style.css | 16 +-- locust/templates/index.html | 8 +- locust/test/test_main.py | 12 +- locust/test/test_parser.py | 2 +- locust/test/test_runners.py | 94 +++++++------- locust/test/test_web.py | 12 +- locust/user/users.py | 4 +- locust/web.py | 8 +- 24 files changed, 226 insertions(+), 206 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 10f9e841f8..7815d978d8 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -4,6 +4,15 @@ Changelog Highlights For full details of the Locust changelog, please see https://github.com/locustio/locust/blob/master/CHANGELOG.md +1.2 +=== + +* Rename hatch rate to spawn rate (this may be a breaking change in some cases!) +* Ability to generate any custom load shape with LoadTestShape class +* Allow ramping down of users +* Ability to use save custom percentiles +* Bug fixes (excessive precision of metrics in losust csv stats, negative response time when system clock has changed, issue with non-string failure messages, some typos etc) + 1.1.1 ===== diff --git a/docs/configuration.rst b/docs/configuration.rst index b1c909714f..cf657a3298 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -54,7 +54,7 @@ Example: expect-workers = 5 host = http://target-system users = 100 - hatch-rate = 10 + spawn-rate = 10 run-time = 10m diff --git a/docs/generating-custom-load-shape.rst b/docs/generating-custom-load-shape.rst index 8bfdbd2f86..b53134e567 100644 --- a/docs/generating-custom-load-shape.rst +++ b/docs/generating-custom-load-shape.rst @@ -4,12 +4,12 @@ Generating a custom load shape using a `LoadTestShape` class ================================= -Sometimes a completely custom shaped load test is required that cannot be achieved by simply setting or changing the user count and hatch rate. For example, generating a spike during a test or ramping up and down at custom times. In these cases using the `LoadTestShape` class can give complete control over the user count and hatch rate at all times. +Sometimes a completely custom shaped load test is required that cannot be achieved by simply setting or changing the user count and spawn rate. For example, generating a spike during a test or ramping up and down at custom times. In these cases using the `LoadTestShape` class can give complete control over the user count and spawn rate at all times. How does a `LoadTestShape` class work? --------------------------------------------- -Define your class inheriting the `LoadTestShape` class in your locust file. If this type of class is found then it will be automatically used by Locust. Add a `tick()` method to return either a tuple containing the user count and hatch rate or `None` to stop the load test. Locust will call the `tick()` method every second and update the load test with new settings or stop the test. +Define your class inheriting the `LoadTestShape` class in your locust file. If this type of class is found then it will be automatically used by Locust. Add a `tick()` method to return either a tuple containing the user count and spawn rate or `None` to stop the load test. Locust will call the `tick()` method every second and update the load test with new settings or stop the test. Examples --------------------------------------------- @@ -25,14 +25,14 @@ Here is a simple example that will increase user count in blocks of 100 then sto ```python class MyCustomShape(LoadTestShape): time_limit = 600 - hatch_rate = 20 + spawn_rate = 20 def tick(self): run_time = self.get_run_time() if run_time < self.time_limit: user_count = round(run_time, -2) - return (user_count, hatch_rate) + return (user_count, spawn_rate) return None ``` diff --git a/docs/running-locust-distributed.rst b/docs/running-locust-distributed.rst index 3d2c7691a9..6a3514a3eb 100644 --- a/docs/running-locust-distributed.rst +++ b/docs/running-locust-distributed.rst @@ -26,9 +26,9 @@ processor core** on the worker machines. Otherwise - due to the current implementation - you might end up with a distribution of the User classes that doesn't correspond to the - User classes' ``weight`` attribute. And if the hatch rate is lower than the number of worker - nodes, the hatching would occur in "bursts" where all worker node would hatch a single user and - then sleep for multiple seconds, hatch another user, sleep and repeat. + User classes' ``weight`` attribute. And if the spawn rate is lower than the number of worker + nodes, the spawning would occur in "bursts" where all worker node would spawn a single user and + then sleep for multiple seconds, spawn another user, sleep and repeat. Example diff --git a/docs/running-locust-without-web-ui.rst b/docs/running-locust-without-web-ui.rst index 8d94648550..d621af386a 100644 --- a/docs/running-locust-without-web-ui.rst +++ b/docs/running-locust-without-web-ui.rst @@ -11,8 +11,8 @@ like a CI server - by using the ``--headless`` flag together with ``-u`` and ``- $ locust -f locust_files/my_locust_file.py --headless -u 1000 -r 100 -``-u`` specifies the number of Users to spawn, and ``-r`` specifies the hatch rate -(number of users to spawn per second). +``-u`` specifies the number of Users to spawn, and ``-r`` specifies the spawn rate +(number of users to start per second). Setting a time limit for the test diff --git a/docs/use-as-lib.rst b/docs/use-as-lib.rst index 6a644960f7..5feb574ed7 100644 --- a/docs/use-as-lib.rst +++ b/docs/use-as-lib.rst @@ -23,7 +23,7 @@ The :py:class:`Environment ` instance's .. code-block:: python env.create_local_runner() - env.runner.start(5000, hatch_rate=20) + env.runner.start(5000, spawn_rate=20) env.runner.greenlet.join() We could also use the :py:class:`Environment ` instance's diff --git a/examples/custom_shape/stages.py b/examples/custom_shape/stages.py index 83346cfe10..24b84e106f 100644 --- a/examples/custom_shape/stages.py +++ b/examples/custom_shape/stages.py @@ -15,7 +15,7 @@ class WebsiteUser(HttpUser): class StagesShape(LoadTestShape): """ - A simply load test shape class that has different user and hatch_rate at + A simply load test shape class that has different user and spawn_rate at different stages. Keyword arguments: @@ -23,19 +23,19 @@ class StagesShape(LoadTestShape): stages -- A list of dicts, each representing a stage with the following keys: duration -- When this many seconds pass the test is advanced to the next stage users -- Total user count - hatch_rate -- Hatch rate + spawn_rate -- Number of users to start/stop per second stop -- A boolean that can stop that test at a specific stage stop_at_end -- Can be set to stop once all stages have run. """ stages = [ - {'duration': 60, 'users': 10, 'hatch_rate': 10}, - {'duration': 100, 'users': 50, 'hatch_rate': 10}, - {'duration': 180, 'users': 100, 'hatch_rate': 10}, - {'duration': 220, 'users': 30, 'hatch_rate': 10}, - {'duration': 230, 'users': 10, 'hatch_rate': 10}, - {'duration': 240, 'users': 1, 'hatch_rate': 1}, + {'duration': 60, 'users': 10, 'spawn_rate': 10}, + {'duration': 100, 'users': 50, 'spawn_rate': 10}, + {'duration': 180, 'users': 100, 'spawn_rate': 10}, + {'duration': 220, 'users': 30, 'spawn_rate': 10}, + {'duration': 230, 'users': 10, 'spawn_rate': 10}, + {'duration': 240, 'users': 1, 'spawn_rate': 1}, ] def tick(self): @@ -43,7 +43,7 @@ def tick(self): for stage in self.stages: if run_time < stage["duration"]: - tick_data = (stage["users"], stage["hatch_rate"]) + tick_data = (stage["users"], stage["spawn_rate"]) return tick_data return None diff --git a/examples/custom_shape/step_load.py b/examples/custom_shape/step_load.py index a8658586f1..b8e76b7c46 100644 --- a/examples/custom_shape/step_load.py +++ b/examples/custom_shape/step_load.py @@ -23,14 +23,14 @@ class StepLoadShape(LoadTestShape): step_time -- Time between steps step_load -- User increase amount at each step - hatch_rate -- Hatch rate to use at every step + spawn_rate -- Users to stop/start per second at every step time_limit -- Time limit in seconds """ step_time = 30 step_load = 10 - hatch_rate = 10 + spawn_rate = 10 time_limit = 600 def tick(self): @@ -40,4 +40,4 @@ def tick(self): return None current_step = math.floor(run_time / self.step_time) + 1 - return (current_step * self.step_load, self.hatch_rate) + return (current_step * self.step_load, self.spawn_rate) diff --git a/examples/semaphore_wait.py b/examples/semaphore_wait.py index 5b061be132..bce3ffe1c8 100644 --- a/examples/semaphore_wait.py +++ b/examples/semaphore_wait.py @@ -7,8 +7,8 @@ @events.init.add_listener def _(environment, **kw): - @environment.events.hatch_complete.add_listener - def on_hatch_complete(**kw): + @environment.events.spawning_complete.add_listener + def on_spawning_complete(**kw): all_users_spawned.release() class UserTasks(TaskSet): diff --git a/examples/use_as_lib.py b/examples/use_as_lib.py index fc1e34101d..687b88303e 100644 --- a/examples/use_as_lib.py +++ b/examples/use_as_lib.py @@ -30,7 +30,7 @@ def task_404(self): gevent.spawn(stats_printer(env.stats)) # start the test -env.runner.start(1, hatch_rate=10) +env.runner.start(1, spawn_rate=10) # in 60 seconds stop the runner gevent.spawn_later(60, lambda: env.runner.quit()) diff --git a/locust/argument_parser.py b/locust/argument_parser.py index ffeda8a290..d6b0546456 100644 --- a/locust/argument_parser.py +++ b/locust/argument_parser.py @@ -134,14 +134,20 @@ def setup_parser_arguments(parser): '-u', '--users', type=int, dest='num_users', - help="Number of concurrent Locust users. Only used together with --headless", + help="Number of concurrent Locust users. Primarily used together with --headless", env_var="LOCUST_USERS", ) parser.add_argument( - '-r', '--hatch-rate', + '-r', '--spawn-rate', type=float, - help="The rate per second in which users are spawned. Only used together with --headless", + help="The rate per second in which users are spawned. Primarily used together with --headless", + env_var="LOCUST_SPAWN_RATE", + ) + parser.add_argument( + '--hatch-rate', env_var="LOCUST_HATCH_RATE", + action='store_true', + help=configargparse.SUPPRESS, ) parser.add_argument( '-t', '--run-time', @@ -315,7 +321,7 @@ def setup_parser_arguments(parser): stats_group.add_argument( '--reset-stats', action='store_true', - help="Reset statistics once hatching has been completed. Should be set on both master and workers when running in distributed mode", + help="Reset statistics once spawning has been completed. Should be set on both master and workers when running in distributed mode", env_var="LOCUST_RESET_STATS", ) diff --git a/locust/event.py b/locust/event.py index 25d6e96631..ac21eeaa03 100644 --- a/locust/event.py +++ b/locust/event.py @@ -112,13 +112,13 @@ class Events: :param data: Data dict with the data from the worker node """ - hatch_complete = EventHook + spawning_complete = EventHook """ Fired when all simulated users has been spawned. Event arguments: - :param user_count: Number of users that was hatched + :param user_count: Number of users that were spawned """ quitting = EventHook diff --git a/locust/main.py b/locust/main.py index 047887e530..ada67c3e42 100644 --- a/locust/main.py +++ b/locust/main.py @@ -137,9 +137,14 @@ def main(): options = parse_options() if options.slave or options.expect_slaves: - sys.stderr.write("[DEPRECATED] Usage of slave has been deprecated, use --worker or --expect-workers\n") + sys.stderr.write("The --slave/--expect-slaves parameters have been renamed --worker/--expect-workers\n") sys.exit(1) - + + if options.hatch_rate: + sys.stderr.write("The --hatch-rate parameter has been renamed --spawn-rate\n") + sys.exit(1) + + # setup logging if not options.skip_log_setup: if options.loglevel.upper() in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]: @@ -186,8 +191,8 @@ def main(): # create locust Environment environment = create_environment(user_classes, options, events=locust.events, shape_class=shape_class) - if shape_class and (options.num_users or options.hatch_rate or options.step_load): - logger.error("The specified locustfile contains a shape class but a conflicting argument was specified: users, hatch-rate or step-load") + if shape_class and (options.num_users or options.spawn_rate or options.step_load): + logger.error("The specified locustfile contains a shape class but a conflicting argument was specified: users, spawn-rate or step-load") sys.exit(1) if options.show_task_ratio: @@ -301,16 +306,16 @@ def timelimit_stop(): # apply headless mode defaults if options.num_users is None: options.num_users = 1 - if options.hatch_rate is None: - options.hatch_rate = 1 + if options.spawn_rate is None: + options.spawn_rate = 1 if options.step_users is None: options.step_users = 1 # start the test if options.step_time: - runner.start_stepload(options.num_users, options.hatch_rate, options.step_users, options.step_time) + runner.start_stepload(options.num_users, options.spawn_rate, options.step_users, options.step_time) else: - runner.start(options.num_users, options.hatch_rate) + runner.start(options.num_users, options.spawn_rate) if options.run_time: spawn_run_time_limit_greenlet() diff --git a/locust/runners.py b/locust/runners.py index fa9067029d..5a4ccdff70 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -21,7 +21,7 @@ logger = logging.getLogger(__name__) -STATE_INIT, STATE_HATCHING, STATE_RUNNING, STATE_CLEANUP, STATE_STOPPING, STATE_STOPPED, STATE_MISSING = ["ready", "hatching", "running", "cleanup", "stopping", "stopped", "missing"] +STATE_INIT, STATE_SPAWNING, STATE_RUNNING, STATE_CLEANUP, STATE_STOPPING, STATE_STOPPED, STATE_MISSING = ["ready", "spawning", "running", "cleanup", "stopping", "stopped", "missing"] WORKER_REPORT_INTERVAL = 3.0 CPU_MONITOR_INTERVAL = 5.0 HEARTBEAT_INTERVAL = 1 @@ -47,7 +47,7 @@ def __init__(self, environment): self.user_greenlets = Group() self.greenlet = Group() self.state = STATE_INIT - self.hatching_greenlet = None + self.spawning_greenlet = None self.stepload_greenlet = None self.shape_greenlet = None self.shape_last_state = None @@ -69,13 +69,13 @@ def on_request_failure(request_type, name, response_time, response_length, excep self.environment.events.request_failure.add_listener(on_request_failure) self.connection_broken = False - # register listener that resets stats when hatching is complete - def on_hatch_complete(user_count): + # register listener that resets stats when spawning is complete + def on_spawning_complete(user_count): self.state = STATE_RUNNING if environment.reset_stats: logger.info("Resetting stats\n") self.stats.reset_all() - self.environment.events.hatch_complete.add_listener(on_hatch_complete) + self.environment.events.spawning_complete.add_listener(on_spawning_complete) def __del__(self): # don't leave any stray greenlets if runner is removed @@ -141,25 +141,25 @@ def weight_users(self, amount): return bucket - def spawn_users(self, spawn_count, hatch_rate, wait=False): + def spawn_users(self, spawn_count, spawn_rate, wait=False): bucket = self.weight_users(spawn_count) spawn_count = len(bucket) if self.state == STATE_INIT or self.state == STATE_STOPPED: - self.state = STATE_HATCHING + self.state = STATE_SPAWNING existing_count = len(self.user_greenlets) - logger.info("Hatching and swarming %i users at the rate %g users/s (%i users already running)..." % (spawn_count, hatch_rate, existing_count)) + logger.info("Spawning %i users at the rate %g users/s (%i users already running)..." % (spawn_count, spawn_rate, existing_count)) occurrence_count = dict([(l.__name__, 0) for l in self.user_classes]) - def hatch(): - sleep_time = 1.0 / hatch_rate + def spawn(): + sleep_time = 1.0 / spawn_rate while True: if not bucket: - logger.info("All users hatched: %s (%i already running)" % ( + logger.info("All users spawned: %s (%i already running)" % ( ", ".join(["%s: %d" % (name, count) for name, count in occurrence_count.items()]), existing_count, )) - self.environment.events.hatch_complete.fire(user_count=len(self.user_greenlets)) + self.environment.events.spawning_complete.fire(user_count=len(self.user_greenlets)) return user_class = bucket.pop(random.randint(0, len(bucket)-1)) @@ -167,11 +167,11 @@ def hatch(): new_user = user_class(self.environment) new_user.start(self.user_greenlets) if len(self.user_greenlets) % 10 == 0: - logger.debug("%i users hatched" % len(self.user_greenlets)) + logger.debug("%i users spawned" % len(self.user_greenlets)) if bucket: gevent.sleep(sleep_time) - hatch() + spawn() if wait: self.user_greenlets.join() logger.info("All users stopped\n") @@ -237,17 +237,17 @@ def monitor_cpu(self): self.cpu_warning_emitted = True gevent.sleep(CPU_MONITOR_INTERVAL) - def start(self, user_count, hatch_rate, wait=False): + def start(self, user_count, spawn_rate, wait=False): """ Start running a load test :param user_count: Number of users to start - :param hatch_rate: Number of users to spawn per second + :param spawn_rate: Number of users to spawn per second :param wait: If True calls to this method will block until all users are spawned. If False (the default), a greenlet that spawns the users will be started and the call to this method will return immediately. """ - if self.state != STATE_RUNNING and self.state != STATE_HATCHING: + if self.state != STATE_RUNNING and self.state != STATE_SPAWNING: self.stats.clear_all() self.exceptions = {} self.cpu_warning_emitted = False @@ -255,23 +255,23 @@ def start(self, user_count, hatch_rate, wait=False): self.target_user_count = user_count if self.state != STATE_INIT and self.state != STATE_STOPPED: - logger.debug("Updating running test with %d users, %.2f hatch rate and wait=%r" % (user_count, hatch_rate, wait)) - self.state = STATE_HATCHING + logger.debug("Updating running test with %d users, %.2f spawn rate and wait=%r" % (user_count, spawn_rate, wait)) + self.state = STATE_SPAWNING if self.user_count > user_count: # Stop some users stop_count = self.user_count - user_count - self.stop_users(stop_count, hatch_rate) + self.stop_users(stop_count, spawn_rate) elif self.user_count < user_count: # Spawn some users spawn_count = user_count - self.user_count - self.spawn_users(spawn_count=spawn_count, hatch_rate=hatch_rate) + self.spawn_users(spawn_count=spawn_count, spawn_rate=spawn_rate) else: - self.environment.events.hatch_complete.fire(user_count=self.user_count) + self.environment.events.spawning_complete.fire(user_count=self.user_count) else: - self.hatch_rate = hatch_rate - self.spawn_users(user_count, hatch_rate=hatch_rate, wait=wait) + self.spawn_rate = spawn_rate + self.spawn_users(user_count, spawn_rate=spawn_rate, wait=wait) - def start_stepload(self, user_count, hatch_rate, step_user_count, step_duration): + def start_stepload(self, user_count, spawn_rate, step_user_count, step_duration): if user_count < step_user_count: logger.error("Invalid parameters: total user count of %d is smaller than step user count of %d" % (user_count, step_user_count)) return @@ -280,20 +280,20 @@ def start_stepload(self, user_count, hatch_rate, step_user_count, step_duration) if self.stepload_greenlet: logger.info("There is an ongoing swarming in Step Load mode, will stop it now.") self.stepload_greenlet.kill() - logger.info("Start a new swarming in Step Load mode: total user count of %d, hatch rate of %d, step user count of %d, step duration of %d " % (user_count, hatch_rate, step_user_count, step_duration)) + logger.info("Start a new swarming in Step Load mode: total user count of %d, spawn rate of %d, step user count of %d, step duration of %d " % (user_count, spawn_rate, step_user_count, step_duration)) self.state = STATE_INIT - self.stepload_greenlet = self.greenlet.spawn(self.stepload_worker, hatch_rate, step_user_count, step_duration) + self.stepload_greenlet = self.greenlet.spawn(self.stepload_worker, spawn_rate, step_user_count, step_duration) self.stepload_greenlet.link_exception(greenlet_exception_handler) - def stepload_worker(self, hatch_rate, step_users_growth, step_duration): + def stepload_worker(self, spawn_rate, step_users_growth, step_duration): current_num_users = 0 - while self.state == STATE_INIT or self.state == STATE_HATCHING or self.state == STATE_RUNNING: + while self.state == STATE_INIT or self.state == STATE_SPAWNING or self.state == STATE_RUNNING: current_num_users += step_users_growth if current_num_users > int(self.total_users): logger.info("Step Load is finished") break - self.start(current_num_users, hatch_rate) - logger.info("Step loading: start hatch job of %d user" % (current_num_users)) + self.start(current_num_users, spawn_rate) + logger.info("Step loading: start spawn job of %d user" % (current_num_users)) gevent.sleep(step_duration) def start_shape(self): @@ -301,14 +301,14 @@ def start_shape(self): logger.info("There is an ongoing shape test running. Editing is disabled") return - logger.info("Shape test starting. User count and hatch rate are ignored for this type of load test") + logger.info("Shape test starting. User count and spawn rate are ignored for this type of load test") self.state = STATE_INIT self.shape_greenlet = self.greenlet.spawn(self.shape_worker) self.shape_greenlet.link_exception(greenlet_exception_handler) def shape_worker(self): logger.info("Shape worker starting") - while self.state == STATE_INIT or self.state == STATE_HATCHING or self.state == STATE_RUNNING: + while self.state == STATE_INIT or self.state == STATE_SPAWNING or self.state == STATE_RUNNING: new_state = self.environment.shape_class.tick() if new_state is None: logger.info("Shape test stopping") @@ -316,9 +316,9 @@ def shape_worker(self): elif self.shape_last_state == new_state: gevent.sleep(1) else: - user_count, hatch_rate = new_state - logger.info("Shape test updating to %d users at %.2f hatch rate" % (user_count, hatch_rate)) - self.start(user_count=user_count, hatch_rate=hatch_rate) + user_count, spawn_rate = new_state + logger.info("Shape test updating to %d users at %.2f spawn rate" % (user_count, spawn_rate)) + self.start(user_count=user_count, spawn_rate=spawn_rate) self.shape_last_state = new_state def stop(self): @@ -326,9 +326,9 @@ def stop(self): Stop a running load test by stopping all running users """ self.state = STATE_CLEANUP - # if we are currently hatching users we need to kill the hatching greenlet first - if self.hatching_greenlet and not self.hatching_greenlet.ready(): - self.hatching_greenlet.kill(block=True) + # if we are currently spawning users we need to kill the spawning greenlet first + if self.spawning_greenlet and not self.spawning_greenlet.ready(): + self.spawning_greenlet.kill(block=True) self.stop_users(self.user_count) self.state = STATE_STOPPED self.cpu_log_warning() @@ -364,20 +364,20 @@ def on_user_error(user_instance, exception, tb): self.log_exception("local", str(exception), formatted_tb) self.environment.events.user_error.add_listener(on_user_error) - def start(self, user_count, hatch_rate, wait=False): + def start(self, user_count, spawn_rate, wait=False): self.target_user_count = user_count - if hatch_rate > 100: - logger.warning("Your selected hatch rate is very high (>100), and this is known to sometimes cause issues. Do you really need to ramp up that fast?") + if spawn_rate > 100: + logger.warning("Your selected spawn rate is very high (>100), and this is known to sometimes cause issues. Do you really need to ramp up that fast?") - if self.state != STATE_RUNNING and self.state != STATE_HATCHING: + if self.state != STATE_RUNNING and self.state != STATE_SPAWNING: # if we're not already running we'll fire the test_start event self.environment.events.test_start.fire(environment=self.environment) - if self.hatching_greenlet: - # kill existing hatching_greenlet before we start a new one - self.hatching_greenlet.kill(block=True) - self.hatching_greenlet = self.greenlet.spawn(lambda: super(LocalRunner, self).start(user_count, hatch_rate, wait=wait)) - self.hatching_greenlet.link_exception(greenlet_exception_handler) + if self.spawning_greenlet: + # kill existing spawning_greenlet before we start a new one + self.spawning_greenlet.kill(block=True) + self.spawning_greenlet = self.greenlet.spawn(lambda: super(LocalRunner, self).start(user_count, spawn_rate, wait=wait)) + self.spawning_greenlet.link_exception(greenlet_exception_handler) def stop(self): if self.state == STATE_STOPPED: @@ -433,8 +433,8 @@ def ready(self): return self.get_by_state(STATE_INIT) @property - def hatching(self): - return self.get_by_state(STATE_HATCHING) + def spawning(self): + return self.get_by_state(STATE_SPAWNING) @property def running(self): @@ -483,32 +483,32 @@ def cpu_log_warning(self): warning_emitted = True return warning_emitted - def start(self, user_count, hatch_rate): + def start(self, user_count, spawn_rate): self.target_user_count = user_count - num_workers = len(self.clients.ready) + len(self.clients.running) + len(self.clients.hatching) + num_workers = len(self.clients.ready) + len(self.clients.running) + len(self.clients.spawning) if not num_workers: logger.warning("You are running in distributed mode but have no worker servers connected. " "Please connect workers prior to swarming.") return - self.hatch_rate = hatch_rate + self.spawn_rate = spawn_rate worker_num_users = user_count // (num_workers or 1) - worker_hatch_rate = float(hatch_rate) / (num_workers or 1) + worker_spawn_rate = float(spawn_rate) / (num_workers or 1) remaining = user_count % num_workers - logger.info("Sending hatch jobs of %d users and %.2f hatch rate to %d ready clients" % (worker_num_users, worker_hatch_rate, num_workers)) + logger.info("Sending spawn jobs of %d users and %.2f spawn rate to %d ready clients" % (worker_num_users, worker_spawn_rate, num_workers)) - if worker_hatch_rate > 100: - logger.warning("Your selected hatch rate is very high (>100/worker), and this is known to sometimes cause issues. Do you really need to ramp up that fast?") + if worker_spawn_rate > 100: + logger.warning("Your selected spawn rate is very high (>100/worker), and this is known to sometimes cause issues. Do you really need to ramp up that fast?") - if self.state != STATE_RUNNING and self.state != STATE_HATCHING: + if self.state != STATE_RUNNING and self.state != STATE_SPAWNING: self.stats.clear_all() self.exceptions = {} self.environment.events.test_start.fire(environment=self.environment) - for client in (self.clients.ready + self.clients.running + self.clients.hatching): + for client in (self.clients.ready + self.clients.running + self.clients.spawning): data = { - "hatch_rate": worker_hatch_rate, + "spawn_rate": worker_spawn_rate, "num_users": worker_num_users, "host": self.environment.host, "stop_timeout": self.environment.stop_timeout, @@ -518,9 +518,9 @@ def start(self, user_count, hatch_rate): data["num_users"] += 1 remaining -= 1 - self.server.send_to_client(Message("hatch", data, client.id)) + self.server.send_to_client(Message("spawn", data, client.id)) - self.state = STATE_HATCHING + self.state = STATE_SPAWNING def stop(self): if self.state not in [STATE_INIT, STATE_STOPPED, STATE_STOPPING]: @@ -540,7 +540,7 @@ def quit(self): self.greenlet.kill(block=True) def check_stopped(self): - if not self.state == STATE_INIT and all(map(lambda x: x.state != STATE_RUNNING and x.state != STATE_HATCHING, self.clients.all)): + if not self.state == STATE_INIT and all(map(lambda x: x.state != STATE_RUNNING and x.state != STATE_SPAWNING, self.clients.all)): self.state = STATE_STOPPED @@ -585,10 +585,10 @@ def client_listener(self): if msg.type == "client_ready": id = msg.node_id self.clients[id] = WorkerNode(id, heartbeat_liveness=HEARTBEAT_LIVENESS) - logger.info("Client %r reported as ready. Currently %i clients ready to swarm." % (id, len(self.clients.ready + self.clients.running + self.clients.hatching))) - if self.state == STATE_RUNNING or self.state == STATE_HATCHING: + logger.info("Client %r reported as ready. Currently %i clients ready to swarm." % (id, len(self.clients.ready + self.clients.running + self.clients.spawning))) + if self.state == STATE_RUNNING or self.state == STATE_SPAWNING: # balance the load distribution when new client joins - self.start(self.target_user_count, self.hatch_rate) + self.start(self.target_user_count, self.spawn_rate) ## emit a warning if the worker's clock seem to be out of sync with our clock #if abs(time() - msg.data["time"]) > 5.0: # warnings.warn("The worker node's clock seem to be out of sync. For the statistics to be correct the different locust servers need to have synchronized clocks.") @@ -607,14 +607,14 @@ def client_listener(self): logger.warning("Worker %s exceeded cpu threshold (will only log this once per worker)" % (msg.node_id)) elif msg.type == "stats": self.environment.events.worker_report.fire(client_id=msg.node_id, data=msg.data) - elif msg.type == "hatching": - self.clients[msg.node_id].state = STATE_HATCHING - elif msg.type == "hatch_complete": + elif msg.type == "spawning": + self.clients[msg.node_id].state = STATE_SPAWNING + elif msg.type == "spawning_complete": self.clients[msg.node_id].state = STATE_RUNNING self.clients[msg.node_id].user_count = msg.data["count"] - if len(self.clients.hatching) == 0: + if len(self.clients.spawning) == 0: count = sum(c.user_count for c in self.clients.values()) - self.environment.events.hatch_complete.fire(user_count=count) + self.environment.events.spawning_complete.fire(user_count=count) elif msg.type == "quit": if msg.node_id in self.clients: del self.clients[msg.node_id] @@ -632,7 +632,7 @@ def client_listener(self): @property def worker_count(self): - return len(self.clients.ready) + len(self.clients.hatching) + len(self.clients.running) + return len(self.clients.ready) + len(self.clients.spawning) + len(self.clients.running) class WorkerRunner(DistributedRunner): """ @@ -660,11 +660,11 @@ def __init__(self, environment, master_host, master_port): self.client.send(Message("client_ready", None, self.client_id)) self.greenlet.spawn(self.stats_reporter).link_exception(greenlet_exception_handler) - # register listener for when all users have hatched, and report it to the master node - def on_hatch_complete(user_count): - self.client.send(Message("hatch_complete", {"count":user_count}, self.client_id)) + # register listener for when all users have spawned, and report it to the master node + def on_spawning_complete(user_count): + self.client.send(Message("spawning_complete", {"count":user_count}, self.client_id)) self.worker_state = STATE_RUNNING - self.environment.events.hatch_complete.add_listener(on_hatch_complete) + self.environment.events.spawning_complete.add_listener(on_spawning_complete) # register listener that adds the current number of spawned users to the report that is sent to the master node def on_report_to_master(client_id, data): @@ -706,19 +706,19 @@ def worker(self): except RPCError as e: logger.error("RPCError found when receiving from master: %s" % ( e ) ) continue - if msg.type == "hatch": - self.worker_state = STATE_HATCHING - self.client.send(Message("hatching", None, self.client_id)) + if msg.type == "spawn": + self.worker_state = STATE_SPAWNING + self.client.send(Message("spawning", None, self.client_id)) job = msg.data - self.hatch_rate = job["hatch_rate"] + self.spawn_rate = job["spawn_rate"] self.target_user_count = job["num_users"] self.environment.host = job["host"] self.environment.stop_timeout = job["stop_timeout"] - if self.hatching_greenlet: - # kill existing hatching greenlet before we launch new one - self.hatching_greenlet.kill(block=True) - self.hatching_greenlet = self.greenlet.spawn(lambda: self.start(user_count=job["num_users"], hatch_rate=job["hatch_rate"])) - self.hatching_greenlet.link_exception(greenlet_exception_handler) + if self.spawning_greenlet: + # kill existing spawning greenlet before we launch new one + self.spawning_greenlet.kill(block=True) + self.spawning_greenlet = self.greenlet.spawn(lambda: self.start(user_count=job["num_users"], spawn_rate=job["spawn_rate"])) + self.spawning_greenlet.link_exception(greenlet_exception_handler) elif msg.type == "stop": self.stop() self.client.send(Message("client_stopped", None, self.client_id)) diff --git a/locust/shape.py b/locust/shape.py index 3ef392781b..cb3c3e3433 100644 --- a/locust/shape.py +++ b/locust/shape.py @@ -26,7 +26,7 @@ def tick(self): Returns a tuple with 2 elements to control the running load test: user_count -- Total user count - hatch_rate -- Hatch rate to use when changing total user count + spawn_rate -- Number of users to start/stop per second when changing number of users If `None` is returned then the running load test will be stopped. diff --git a/locust/static/locust.js b/locust/static/locust.js index 714bab652f..db2b76b0a8 100644 --- a/locust/static/locust.js +++ b/locust/static/locust.js @@ -61,7 +61,7 @@ function setHostName(hostname) { $('#swarm_form').submit(function(event) { event.preventDefault(); - $("body").attr("class", "hatching"); + $("body").attr("class", "spawning"); $("#start").fadeOut(); $("#status").fadeIn(); $(".box_running").fadeIn(); @@ -82,7 +82,7 @@ $('#edit_form').submit(function(event) { $.post($(this).attr("action"), $(this).serialize(), function(response) { if (response.success) { - $("body").attr("class", "hatching"); + $("body").attr("class", "spawning"); $("#edit").fadeOut(); setHostName(response.host); } diff --git a/locust/static/style.css b/locust/static/style.css index 7edf64521e..ae8d75516f 100644 --- a/locust/static/style.css +++ b/locust/static/style.css @@ -137,7 +137,7 @@ a:hover { .boxes .box_running { display: none; } -.hatching .boxes .box_running {display: block;} +.spawning .boxes .box_running {display: block;} .running .boxes .box_running {display: block;} .stopped .boxes .box_running, .stopping .boxes .box_running {display: block;} .stopped .boxes .box_stop, .stopping .boxes .box_stop {display: none;} @@ -217,7 +217,7 @@ a:hover { } .stopped .edit, .stopping .edit {display: none;} -.running .edit, .hatching .edit { +.running .edit, .spawning .edit { display: none; border-radius: 5px; -moz-border-radius: 5px; @@ -226,19 +226,19 @@ a:hover { background: #132b21; box-shadow: 0 0 60px rgba(0,0,0,0.3); } -.running .start, .hatching .start {display: none;} +.running .start, .spawning .start {display: none;} .ready .edit {display: none;} .ready .start {display: block;} -.running .status, .hatching .status {display: block;} +.running .status, .spawning .status {display: block;} .stopped .status, .stopping .status {display: block;} .ready .status {display: none;} .stopped .boxes .edit_test, .stopping .boxes .edit_test, .ready .boxes .edit_test {display: none;} .stopped .boxes .user_count, .stopping .boxes .user_count, .ready .boxes .user_count {display: none;} -.running a.new_test, .ready a.new_test, .hatching a.new_test {display: none;} +.running a.new_test, .ready a.new_test, .spawning a.new_test {display: none;} .running a.new_test {display: none;} .stopped a.new_test, .stopping a.new_test {display: block;} @@ -248,10 +248,10 @@ a:hover { top: 10px; } .stopped .start a.close_link, .stopping .start a.close_link {display: inline;} -.running .start a.close_link, .ready .start a.close_link, .hatching .start a.close_link {display: none;} +.running .start a.close_link, .ready .start a.close_link, .spawning .start a.close_link {display: none;} .stopped .edit a.close_link, .stopping .edit a.close_link, .ready .edit a.close_link {display: none;} -.running .edit a.close_link, .hatching .edit a.close_link {display: inline;} +.running .edit a.close_link, .spawning .edit a.close_link {display: inline;} .stats_label { cursor: pointer; @@ -460,7 +460,7 @@ ul.tabs li a.current:after { font-weight: bold; } -.running .hostname, .hatching .hostname {display: block;} +.running .hostname, .spawning .hostname {display: block;} .stopped .hostname, .stopping .hostname {display: block;} .ready .hostname {display: none;} diff --git a/locust/templates/index.html b/locust/templates/index.html index d3d1eaa439..409da34472 100644 --- a/locust/templates/index.html +++ b/locust/templates/index.html @@ -60,8 +60,8 @@

Start new load test


- -
+ +