diff --git a/locust/core.py b/locust/core.py index 1377357a0c..3c6c24600f 100644 --- a/locust/core.py +++ b/locust/core.py @@ -303,9 +303,12 @@ def interrupt(self, reschedule=True): locust_runner = None + + STATE_INIT, STATE_HATCHING, STATE_RUNNING, STATE_STOPPED = ["ready", "hatching", "running", "stopped"] SLAVE_REPORT_INTERVAL = 3.0 + class LocustRunner(object): def __init__(self, locust_classes, hatch_rate, num_clients, num_requests=None, host=None): self.locust_classes = locust_classes @@ -335,18 +338,19 @@ def errors(self): @property def user_count(self): return len(self.locusts) - - def hatch(self, stop_timeout=None): - if self.num_requests is not None: - RequestStats.global_max_requests = self.num_requests - + + def weight_locusts(self, amount, stop_timeout = None): + """ + Distributes the amount of locusts for each WebLocust-class according to it's weight + and a list: bucket with the weighted locusts is returned + """ bucket = [] weight_sum = sum((locust.weight for locust in self.locust_classes)) for locust in self.locust_classes: if not locust.tasks: warnings.warn("Notice: Found locust (%s) got no tasks. Skipping..." % locust.__name__) continue - + if self.host is not None: locust.host = self.host if stop_timeout is not None: @@ -354,54 +358,100 @@ def hatch(self, stop_timeout=None): # create locusts depending on weight percent = locust.weight / float(weight_sum) - num_locusts = int(round(self.num_clients * percent)) + num_locusts = int(round(amount * percent)) bucket.extend([locust for x in xrange(0, num_locusts)]) + return bucket + + def hatch(self, spawn_count=None, stop_timeout=None, wait=False): + if spawn_count is None: + spawn_count = self.num_clients + + if self.num_requests is not None: + RequestStats.global_max_requests = self.num_requests + + bucket = self.weight_locusts(spawn_count, stop_timeout) + spawn_count = len(bucket) + if self.state == STATE_INIT or self.state == STATE_STOPPED: + self.state = STATE_HATCHING + self.num_clients = spawn_count + else: + self.num_clients += spawn_count - print "\nHatching and swarming %i clients at the rate %g clients/s...\n" % (self.num_clients, self.hatch_rate) + print "\nHatching and swarming %i clients at the rate %g clients/s...\n" % (spawn_count, self.hatch_rate) occurence_count = dict([(l.__name__, 0) for l in self.locust_classes]) - total_locust_count = len(bucket) def spawn_locusts(): sleep_time = 1.0 / self.hatch_rate while True: if not bucket: print "All locusts hatched: %s" % ", ".join(["%s: %d" % (name, count) for name, count in occurence_count.iteritems()]) - events.hatch_complete.fire(total_locust_count) + events.hatch_complete.fire(self.num_clients) return locust = bucket.pop(random.randint(0, len(bucket)-1)) occurence_count[locust.__name__] += 1 - def start_locust(): + def start_locust(_): try: locust()() except RescheduleTaskImmediately: pass except GreenletExit: pass - new_locust = self.locusts.spawn(start_locust) + new_locust = self.locusts.spawn(start_locust, locust) if len(self.locusts) % 10 == 0: print "%i locusts hatched" % len(self.locusts) gevent.sleep(sleep_time) spawn_locusts() - self.locusts.join() - print "All locusts dead\n" - print_stats(self.request_stats) - print_percentile_stats(self.request_stats) #TODO use an event listener, or such, for this? - - def start_hatching(self, locust_count=None, hatch_rate=None): - if locust_count: - self.num_clients = locust_count - if hatch_rate: - self.hatch_rate = hatch_rate - + if wait: + self.locusts.join() + print "All locusts dead\n" + print_stats(self.request_stats) + print_percentile_stats(self.request_stats) #TODO use an event listener, or such, for this? + + def kill_locusts(self, kill_count): + """ + Kill a kill_count of weighted locusts from the Group() object in self.locusts + """ + bucket = self.weight_locusts(kill_count) + kill_count = len(bucket) + self.num_clients -= kill_count + print "killing locusts:", kill_count + dying = [] + for g in self.locusts: + for l in bucket: + if l == g.args[0]: + dying.append(g) + bucket.remove(l) + break + for g in dying: + self.locusts.killone(g) + events.hatch_complete.fire(self.num_clients) + + def start_hatching(self, locust_count=None, hatch_rate=None, wait=False): + print "start hatching", locust_count, hatch_rate, self.state if self.state != STATE_RUNNING and self.state != STATE_HATCHING: - RequestStats.clear_all() - - RequestStats.global_start_time = time() - self.state = STATE_HATCHING - self.hatch() - + RequestStats.clear_all() + RequestStats.global_start_time = time() + # Dynamically changing the locust count + if self.state != STATE_INIT and self.state != STATE_STOPPED: + self.state = STATE_HATCHING + if self.num_clients > locust_count: + # Kill some locusts + kill_count = self.num_clients - locust_count + self.kill_locusts(kill_count) + elif self.num_clients < locust_count: + # Spawn some locusts + if hatch_rate: + self.hatch_rate = hatch_rate + spawn_count = locust_count - self.num_clients + self.hatch(spawn_count=spawn_count) + else: + if locust_count: + self.hatch(locust_count, wait=wait) + else: + self.hatch(wait=wait) + def stop(self): # if we are currently hatching locusts we need to kill the hatching greenlet first if self.hatching_greenlet and not self.hatching_greenlet.ready(): @@ -410,8 +460,8 @@ def stop(self): self.state = STATE_STOPPED class LocalLocustRunner(LocustRunner): - def start_hatching(self, locust_count=None, hatch_rate=None): - self.hatching_greenlet = gevent.spawn(lambda: super(LocalLocustRunner, self).start_hatching(locust_count, hatch_rate)) + def start_hatching(self, locust_count=None, hatch_rate=None, wait=False): + self.hatching_greenlet = gevent.spawn(lambda: super(LocalLocustRunner, self).start_hatching(locust_count, hatch_rate, wait=wait)) self.greenlet = self.hatching_greenlet class DistributedLocustRunner(LocustRunner): @@ -464,14 +514,13 @@ def on_slave_report(client_id, data): def user_count(self): return sum([c.user_count for c in self.clients.itervalues()]) - def start_hatching(self, locust_count=None, hatch_rate=None): - if locust_count: - self.num_clients = locust_count / (len(self.clients.ready) or 1) - if hatch_rate: - self.hatch_rate = float(hatch_rate) / (len(self.clients.ready) or 1) - - print "Sending hatch jobs to %i ready clients" % len(self.clients.ready) - if not len(self.clients.ready): + def start_hatching(self, locust_count, hatch_rate): + self.num_clients = locust_count + slave_num_clients = locust_count / ((len(self.clients.ready) + len(self.clients.running)) or 1) + slave_hatch_rate = float(hatch_rate) / ((len(self.clients.ready) + len(self.clients.running)) or 1) + + print "Sending hatch jobs to %i ready clients" % (len(self.clients.ready) + len(self.clients.running)) + if not (len(self.clients.ready)+len(self.clients.running)): print "WARNING: You are running in distributed mode but have no slave servers connected." print "Please connect slaves prior to swarming." @@ -479,12 +528,74 @@ def start_hatching(self, locust_count=None, hatch_rate=None): RequestStats.clear_all() for client in self.clients.itervalues(): - msg = {"hatch_rate":self.hatch_rate, "num_clients":self.num_clients, "num_requests": self.num_requests, "host":self.host, "stop_timeout":None} - self.server.send({"type":"start", "data":msg}) + msg = {"hatch_rate":slave_hatch_rate, "num_clients":slave_num_clients, "num_requests": self.num_requests, "host":self.host, "stop_timeout":None} + self.server.send({"type":"hatch", "data":msg}) RequestStats.global_start_time = time() self.state = STATE_HATCHING - + + def start_ramping(self, hatch_rate=None, max_locusts=1000, hatch_stride=None, percent=0.95, response_time=2000, acceptable_fail=0.05): + if hatch_rate: + self.hatch_rate = hatch_rate + + if not hatch_stride: + hatch_stride = 100 + + clients = hatch_stride + + # Record low load percentile + def calibrate(): + self.start_hatching(clients, self.hatch_rate) + while True: + if self.state != STATE_HATCHING: + print "recording low_percentile..." + gevent.sleep(30) + percentile = RequestStats.sum_stats().one_percentile(percent) + print "low_percentile:", percentile + self.start_hatching(1, self.hatch_rate) + return percentile + gevent.sleep(1) + + low_percentile = calibrate() + + while True: + if self.state != STATE_HATCHING: + if self.num_clients >= max_locusts: + print "ramping stopped due to max_locusts limit reached:", max_locusts + return + gevent.sleep(10) + if RequestStats.sum_stats().fail_ratio >= acceptable_fail: + print "ramping stopped due to acceptable_fail ratio (%d1.2%%) exceeded with fail ratio %1.2d%%", (acceptable_fail*100, RequestStats.sum_stats().fail_ratio*100) + return + p = RequestStats.sum_stats().one_percentile(percent) + if p >= low_percentile * 2.0: + print "ramping stopped due to response times getting high:", p + return + self.start_hatching(clients, self.hatch_rate) + clients += hatch_stride + gevent.sleep(1) + +# while True: +# if self.state != STATE_HATCHING: +# print "self.num_clients: %i max_locusts: %i" % (self.num_clients, max_locusts) +# if self.num_clients >= max_locusts: +# print "ramping stopped due to max_locusts limit reached:", max_locusts +# return +# gevent.sleep(5) +# if self.state != STATE_INIT: +# print "num_reqs: %i fail_ratio: %1.2d" % (RequestStats.sum_stats().num_reqs, RequestStats.sum_stats().fail_ratio) +# while RequestStats.sum_stats().num_reqs < 100: +# if RequestStats.sum_stats().fail_ratio >= acceptable_fail: +# print "ramping stopped due to acceptable_fail ratio (%d1.2%%) exceeded with fail ratio %1.2d%%", (acceptable_fail*100, RequestStats.sum_stats().fail_ratio*100) +# return +# gevent.sleep(1) +# if RequestStats.sum_stats().one_percentile(percent) >= response_time: +# print "ramping stopped due to response times over %ims for %1.2f%%" % (response_time, percent*100) +# return +# self.start_hatching(clients, self.hatch_rate) +# clients += 10 * hatchrate +# gevent.sleep(1) + def stop(self): for client in self.clients.hatching + self.clients.running: self.server.send({"type":"stop", "data":{}}) @@ -504,7 +615,7 @@ def client_listener(self): elif msg["type"] == "stats": report = msg["data"] events.slave_report.fire(report["client_id"], report["data"]) - elif msg["type"] == "running": + elif msg["type"] == "hatching": id = msg["data"] self.clients[id].state = STATE_HATCHING elif msg["type"] == "hatch_complete": @@ -514,7 +625,7 @@ def client_listener(self): if len(self.clients.hatching) == 0: count = sum(c.user_count for c in self.clients.itervalues()) events.hatch_complete.fire(count) - + @property def slave_count(self): return len(self.clients.ready) + len(self.clients.hatching) + len(self.clients.running) @@ -543,19 +654,20 @@ def on_report_to_master(client_id, data): def worker(self): while True: msg = self.client.recv() - if msg["type"] == "start": - self.client.send({"type":"running", "data":self.client_id}) + if msg["type"] == "hatch": + self.client.send({"type":"hatching", "data":self.client_id}) job = msg["data"] self.hatch_rate = job["hatch_rate"] - self.num_clients = job["num_clients"] + #self.num_clients = job["num_clients"] self.num_requests = job["num_requests"] self.host = job["host"] - self.hatching_greenlet = gevent.spawn(lambda: self.start_hatching()) + self.hatching_greenlet = gevent.spawn(lambda: self.start_hatching(locust_count=job["num_clients"], hatch_rate=job["hatch_rate"])) elif msg["type"] == "stop": self.stop() self.client.send({"type":"client_stopped", "data":self.client_id}) self.client.send({"type":"client_ready", "data":self.client_id}) - + + def stats_reporter(self): while True: data = {} diff --git a/locust/main.py b/locust/main.py index d57fa2a7a5..2ac9814a7b 100644 --- a/locust/main.py +++ b/locust/main.py @@ -335,7 +335,7 @@ def main(): core.locust_runner = LocalLocustRunner(locust_classes, options.hatch_rate, options.num_clients, options.num_requests, options.host) # spawn client spawning/hatching greenlet if not options.web: - core.locust_runner.start_hatching() + core.locust_runner.start_hatching(wait=True) main_greenlet = core.locust_runner.greenlet elif options.master: core.locust_runner = MasterLocustRunner(locust_classes, options.hatch_rate, options.num_clients, num_requests=options.num_requests, host=options.host, master_host=options.master_host) diff --git a/locust/static/style.css b/locust/static/style.css index 7063275b33..2df06cbb17 100644 --- a/locust/static/style.css +++ b/locust/static/style.css @@ -86,29 +86,29 @@ a { padding-top: 20px; } -.start { +.start, .edit { width: 398px; position: absolute; left: 50%; top: 100px; margin-left: -169px; } -.start .padder { +.start .padder, .edit .padder { padding: 30px; padding-top: 0px; } -.start h2 { +.start h2, .edit h2 { color: #addf82; font-size: 26px; font-weight: bold; } -.start label { +.start label, .edit label { display: block; margin-bottom: 10px; margin-top: 20px; font-size: 16px; } -.start input.val { +.start input.val, .edit input.val { border: none; background: #fff; height: 52px; @@ -116,7 +116,7 @@ a { font-size: 24px; padding-left: 10px; } -.start input.start_button { +.start input.start_button, .edit input.start_button { margin-top: 20px; float: right; } @@ -130,20 +130,33 @@ a { border: 3px solid #eee; background: #111717; } + +.stopped .edit {display: none;} +.running .edit, .hatching .edit { + display: none; + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radisu: 5px; + border: 3px solid #eee; + background: #111717; +} .running .start, .hatching .start {display: none;} + +.ready .edit {display: none;} .ready .start {display: block;} .running .status, .hatching .status {display: block;} -.stopped .status {display: blocked;} +.stopped .status {display: block;} .ready .status {display: none;} +.stopped .boxes .edit_test, .ready .boxes .edit_test {display: none;} .stopped .boxes .user_count, .ready .boxes .user_count {display: none;} .running a.new_test, .ready a.new_test, .hatching a.new_test {display: none;} .running a.new_test {display: none;} .stopped a.new_test {display: block;} -.start a.close_link { +.start a.close_link, .edit a.close_link{ position: absolute; right: 10px; top: 10px; @@ -151,6 +164,9 @@ a { .stopped .start a.close_link {display: inline;} .running .start a.close_link, .ready .start a.close_link, .hatching .start a.close_link {display: none;} +.stopped .edit a.close_link, .ready .edit a.close_link {display: none;} +.running .edit a.close_link, .hatching .edit a.close_link {display: inline;} + .status table { border-collapse: collapse; width: 100%; diff --git a/locust/stats.py b/locust/stats.py index 8ec7fc35ac..7dc3b32003 100644 --- a/locust/stats.py +++ b/locust/stats.py @@ -11,7 +11,6 @@ class RequestStatsAdditionError(Exception): pass - class RequestStats(object): requests = {} @@ -35,7 +34,7 @@ def clear_all(cls): cls.global_max_requests = None cls.global_last_request_timestamp = None cls.global_start_time = None - + @classmethod def reset_all(cls): cls.global_start_time = time.time() @@ -43,7 +42,7 @@ def reset_all(cls): for name, stats in cls.requests.iteritems(): stats.reset() cls.errors = {} - + def reset(self): self.start_time = time.time() self.num_reqs = 0 @@ -61,7 +60,7 @@ def log(self, response_time, content_length): self.num_reqs += 1 self.total_response_time += response_time - + t = int(time.time()) self.num_reqs_per_sec[t] = self.num_reqs_per_sec.setdefault(t, 0) + 1 self.last_request_timestamp = t @@ -69,10 +68,10 @@ def log(self, response_time, content_length): if self._min_response_time is None: self._min_response_time = response_time - + self._min_response_time = min(self._min_response_time, response_time) self.max_response_time = max(self.max_response_time, response_time) - + # to avoid to much data that has to be transfered to the master node when # running in distributed mode, we save the response time rounded in a dict # so that 147 becomes 150, 3432 becomes 3400 and 58760 becomes 59000 @@ -87,29 +86,39 @@ def log(self, response_time, content_length): # increase request count for the rounded key in response time dict self.response_times.setdefault(rounded_response_time, 0) self.response_times[rounded_response_time] += 1 - + # increase total content-length self.total_content_length += content_length - + def log_error(self, error): self.num_failures += 1 key = "%r: %s" % (error, repr(str(error))) RequestStats.errors.setdefault(key, 0) RequestStats.errors[key] += 1 - + + @property + def fail_ratio(self): + try: + return float(self.num_failures) / self.num_reqs + except ZeroDivisionError: + if self.num_failures > 0: + return 1.0 + else: + return 0.0 + @property def min_response_time(self): if self._min_response_time is None: return 0 return self._min_response_time - + @property def avg_response_time(self): try: return float(self.total_response_time) / self.num_reqs except ZeroDivisionError: return 0 - + @property def median_response_time(self): if not self.response_times: @@ -122,7 +131,7 @@ def current_rps(self): if self.global_last_request_timestamp is None: return 0 slice_start_time = max(self.global_last_request_timestamp - 12, int(self.global_start_time or 0)) - + reqs = [self.num_reqs_per_sec.get(t, 0) for t in range(slice_start_time, self.global_last_request_timestamp-2)] return avg(reqs) @@ -130,9 +139,9 @@ def current_rps(self): def total_rps(self): if not RequestStats.global_last_request_timestamp: return 0.0 - + return self.num_reqs / max(RequestStats.global_last_request_timestamp - RequestStats.global_start_time, 1) - + @property def avg_content_length(self): try: @@ -164,12 +173,12 @@ def iadd_stats(self, other, full_request_history=False): for i in xrange(other.last_request_timestamp-20, other.last_request_timestamp+1): if i in other.num_reqs_per_sec: self.num_reqs_per_sec[i] = self.num_reqs_per_sec.get(i, 0) + other.num_reqs_per_sec[i] - + def get_stripped_report(self): report = copy(self) self.reset() return report - + def to_dict(self): return { 'num_reqs': self.num_reqs, @@ -185,7 +194,7 @@ def __str__(self): fail_percent = (self.num_failures/float(self.num_reqs))*100 except ZeroDivisionError: fail_percent = 0 - + return (" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s %7d %7d %7d | %7d %7.2f") % ( self.name, self.num_reqs, @@ -196,7 +205,7 @@ def __str__(self): self.median_response_time or 0, self.current_rps or 0 ) - + def create_response_times_list(self): inflated_list = [] for response_time, count in self.response_times.iteritems(): @@ -228,7 +237,7 @@ def get(cls, name): request = RequestStats(name) cls.requests[name] = request return request - + @classmethod def sum_stats(cls, name="Total", full_request_history=False): stats = RequestStats(name) @@ -275,7 +284,7 @@ def percentile(N, percent, key=lambda x:x): def on_request_success(name, response_time, response): if RequestStats.global_max_requests is not None and RequestStats.total_num_requests >= RequestStats.global_max_requests: raise InterruptLocust("Maximum number of requests reached") - + content_length = int(response.info.getheader("Content-Length") or 0) RequestStats.get(name).log(response_time, content_length) @@ -293,7 +302,7 @@ def on_slave_report(client_id, data): RequestStats.requests[stats.name] = RequestStats(stats.name) RequestStats.requests[stats.name].iadd_stats(stats, full_request_history=True) RequestStats.global_last_request_timestamp = max(RequestStats.global_last_request_timestamp, stats.last_request_timestamp) - + for err_message, err_count in data["errors"].iteritems(): RequestStats.errors[err_message] = RequestStats.errors.setdefault(err_message, 0) + err_count @@ -326,7 +335,7 @@ def print_stats(stats): print "" def print_percentile_stats(stats): - print "Percentage of the requests completed within given times" + print "Percentage of the requests completed within given times" print (" %-" + str(STATS_NAME_WIDTH) + "s %8s %6s %6s %6s %6s %6s %6s %6s %6s %6s") % ('Name', '# reqs', '50%', '66%', '75%', '80%', '90%', '95%', '98%', '99%', '100%') print "-" * (80 + STATS_NAME_WIDTH) complete_list = [] diff --git a/locust/templates/index.html b/locust/templates/index.html index f9621abbec..c02a94fc4d 100644 --- a/locust/templates/index.html +++ b/locust/templates/index.html @@ -18,6 +18,7 @@ {{user_count}} users New test + Edit {% if is_distributed %}
@@ -36,6 +37,9 @@
+
+ Reset Stats +
@@ -58,6 +62,23 @@

Start new Locust swarm

+ +
+
+ Close +
+
+

Change the locust count

+
+ +
+ +
+ +
+
+
+