diff --git a/README.md b/README.md index d2bcde28..cdedf5f8 100644 --- a/README.md +++ b/README.md @@ -406,7 +406,7 @@ Examples: ```console Requests [total, rate, throughput] 1200, 120.00, 65.87 Duration [total, attack, wait] 10.094965987s, 9.949883921s, 145.082066ms -Latencies [mean, 50, 95, 99, max] 113.172398ms, 108.272568ms, 140.18235ms, 247.771566ms, 264.815246ms +Latencies [min, mean, 50, 95, 99, max] 90.438129ms, 113.172398ms, 108.272568ms, 140.18235ms, 247.771566ms, 264.815246ms Bytes In [total, mean] 3714690, 3095.57 Bytes Out [total, mean] 0, 0.00 Success [ratio] 55.42% @@ -434,6 +434,7 @@ The `Duration` row shows: Latency is the amount of time taken for a response to a request to be read (including the `-max-body` bytes from the response body). +- `min` is the minimum latency of all requests in an attack. - `mean` is the [arithmetic mean / average](https://en.wikipedia.org/wiki/Arithmetic_mean) of the latencies of all requests in an attack. - `50`, `90`, `95`, `99` are the 50th, 90th, 95th and 99th [percentiles](https://en.wikipedia.org/wiki/Percentile), respectively, of the latencies of all requests in an attack. To understand more about why these are useful, I recommend [this article](https://bravenewgeek.com/everything-you-know-about-latency-is-wrong/) from @tylertreat. - `max` is the maximum latency of all requests in an attack. @@ -462,7 +463,8 @@ All duration like fields are in nanoseconds. "90th": 3228223, "95th": 3478629, "99th": 3530000, - "max": 3660505 + "max": 3660505, + "min": 1949582 }, "buckets": {"0":9952,"1000000":40,"2000000":6,"3000000":0,"4000000":0,"5000000":2}, "bytes_in": { diff --git a/lib/metrics.go b/lib/metrics.go index d5842cbc..5d7dbf9b 100644 --- a/lib/metrics.go +++ b/lib/metrics.go @@ -144,6 +144,8 @@ type LatencyMetrics struct { P99 time.Duration `json:"99th"` // Max is the maximum observed request latency. Max time.Duration `json:"max"` + // Min is the minimum observed request latency. + Min time.Duration `json:"min"` estimator estimator } @@ -154,6 +156,9 @@ func (l *LatencyMetrics) Add(latency time.Duration) { if l.Total += latency; latency > l.Max { l.Max = latency } + if latency < l.Min || l.Min == 0 { + l.Min = latency + } l.estimator.Add(float64(latency)) } diff --git a/lib/metrics_test.go b/lib/metrics_test.go index e5ad0ce2..c31732bd 100644 --- a/lib/metrics_test.go +++ b/lib/metrics_test.go @@ -48,6 +48,7 @@ func TestMetrics_Add(t *testing.T) { P95: duration("9.5005ms"), P99: duration("9.9005ms"), Max: duration("10ms"), + Min: duration("1us"), estimator: got.Latencies.estimator, }, BytesIn: ByteMetrics{Total: 10240000, Mean: 1024}, diff --git a/lib/reporters.go b/lib/reporters.go index a2c82361..97342359 100644 --- a/lib/reporters.go +++ b/lib/reporters.go @@ -57,7 +57,7 @@ func NewHistogramReporter(h *Histogram) Reporter { func NewTextReporter(m *Metrics) Reporter { const fmtstr = "Requests\t[total, rate, throughput]\t%d, %.2f, %.2f\n" + "Duration\t[total, attack, wait]\t%s, %s, %s\n" + - "Latencies\t[mean, 50, 90, 95, 99, max]\t%s, %s, %s, %s, %s, %s\n" + + "Latencies\t[min, mean, 50, 90, 95, 99, max]\t%s, %s, %s, %s, %s, %s, %s\n" + "Bytes In\t[total, mean]\t%d, %.2f\n" + "Bytes Out\t[total, mean]\t%d, %.2f\n" + "Success\t[ratio]\t%.2f%%\n" + @@ -68,8 +68,7 @@ func NewTextReporter(m *Metrics) Reporter { if _, err = fmt.Fprintf(tw, fmtstr, m.Requests, m.Rate, m.Throughput, m.Duration+m.Wait, m.Duration, m.Wait, - m.Latencies.Mean, m.Latencies.P50, m.Latencies.P90, - m.Latencies.P95, m.Latencies.P99, m.Latencies.Max, + m.Latencies.Min, m.Latencies.Mean, m.Latencies.P50, m.Latencies.P90, m.Latencies.P95, m.Latencies.P99, m.Latencies.Max, m.BytesIn.Total, m.BytesIn.Mean, m.BytesOut.Total, m.BytesOut.Mean, m.Success*100,