Skip to content

Commit

Permalink
Improve log perfomance (#359)
Browse files Browse the repository at this point in the history
The performance for the average case seems to be 7% slower compared to v0.0.73.
But for the best case, it is about 45% faster.
```
go test -timeout 7m -count=10 -run=XXXX -bench=BenchmarkAverageCase/ong github.com/komuw/ong/log > old.txt
go test -timeout 7m -count=10 -run=XXXX -bench=BenchmarkAverageCase/ong github.com/komuw/ong/log > new.txt
benchstat old.txt new.txt

pkg: github.com/komuw/ong/log
cpu: Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz
                  │   old.txt   │              new.txt               │
                  │   sec/op    │   sec/op     vs base               │
AverageCase/ong-8   5.287µ ± 4%   5.678µ ± 6%  +7.41% (p=0.000 n=10)

                  │   old.txt    │               new.txt               │
                  │     B/op     │     B/op      vs base               │
AverageCase/ong-8   1.475Ki ± 0%   1.567Ki ± 0%  +6.29% (p=0.000 n=10)

                  │  old.txt   │              new.txt              │
                  │ allocs/op  │ allocs/op   vs base               │
AverageCase/ong-8   27.00 ± 0%   26.00 ± 0%  -3.70% (p=0.000 n=10)


go test -timeout 7m -count=10 -run=XXXX -bench=BenchmarkBestCase/ong github.com/komuw/ong/log > old.txt
go test -timeout 7m -count=10 -run=XXXX -bench=BenchmarkBestCase/ong github.com/komuw/ong/log > new.txt
benchstat old.txt new.txt

pkg: github.com/komuw/ong/log
cpu: Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz
               │   old.txt   │               new.txt               │
               │   sec/op    │   sec/op     vs base                │
BestCase/ong-8   2.315µ ± 2%   1.253µ ± 7%  -45.90% (p=0.000 n=10)

               │   old.txt   │              new.txt               │
               │    B/op     │    B/op     vs base                │
BestCase/ong-8   1024.0 ± 0%   864.0 ± 0%  -15.62% (p=0.000 n=10)

               │   old.txt   │              new.txt               │
               │  allocs/op  │ allocs/op   vs base                │
BestCase/ong-8   19.000 ± 0%   2.000 ± 0%  -89.47% (p=0.000 n=10)
```
  • Loading branch information
komuw authored Aug 15, 2023
1 parent 0185295 commit fa95a8f
Show file tree
Hide file tree
Showing 5 changed files with 121 additions and 71 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ Most recent version is listed first.
- ong/log: Conform to log/slog: https://github.com/komuw/ong/pull/357
Eliminate duplicate logIDs
- ong/log: Integrate ong packages with new logger: https://github.com/komuw/ong/pull/358
- ong/log: Improve log perfomance: https://github.com/komuw/ong/pull/359

# v0.0.73
- Remove /ex/exp: https://github.com/komuw/ong/pull/352
Expand Down
118 changes: 65 additions & 53 deletions log/log.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"io"
"log/slog"
"sync"
"time"

ongErrors "github.com/komuw/ong/errors"
"github.com/komuw/ong/id"
Expand All @@ -25,18 +24,19 @@ const (

// GetId gets a logId either from the provided context or auto-generated.
func GetId(ctx context.Context) string {
if ctx == nil {
ctx = context.Background()
}
id, _ := getId(ctx)
return id
}

// getId gets a logId either from the provided context or auto-generated.
// It returns the logID and true if the id came from ctx else false
func getId(ctx context.Context) (string, bool) {
if ctx != nil {
if vCtx := ctx.Value(octx.LogCtxKey); vCtx != nil {
if s, ok := vCtx.(string); ok {
return s, true
}
if vCtx := ctx.Value(octx.LogCtxKey); vCtx != nil {
if s, ok := vCtx.(string); ok {
return s, true
}
}
return id.New(), false
Expand Down Expand Up @@ -160,55 +160,59 @@ func (h *handler) Handle(ctx context.Context, r slog.Record) error {
// https://github.com/golang/go/blob/5c154986094bcc2fb28909cc5f01c9ba1dd9ddd4/src/log/slog/handler.go#L50-L59
// Note that this handler does not produce output and hence the above rules do not apply.

h.mu.Lock()
defer h.mu.Unlock()

{ // 1. Add some required fields.

// Convert time to UTC.
// Note that we do not convert any other fields(that may be of type time.Time) into UTC.
// If we ever need that functionality, we would do that in `r.Attrs()`
if !r.Time.IsZero() {
// According to the docs, If r.Time is the zero time, ignore the time.
r.Time = time.Now().UTC()
}

newAttrs := []slog.Attr{}

// Add logID
theID := h.logID
id2, fromCtx := getId(ctx)
if fromCtx || (theID == "") {
theID = id2
}
newAttrs = []slog.Attr{
{Key: logIDFieldName, Value: slog.StringValue(theID)},
}
h.logID = theID

// Add stackTraces
r.Attrs(func(a slog.Attr) bool {
if e, ok := a.Value.Any().(error); ok {
if stack := ongErrors.StackTrace(e); stack != "" {
newAttrs = append(newAttrs, slog.Attr{Key: "stack", Value: slog.StringValue(stack)})
}
}
return true
})

r.AddAttrs(newAttrs...)
{ // 1. save record.
h.mu.Lock()
h.cBuf.store(extendedLogRecord{r: r, logID: h.logID, ctx: ctx})
h.mu.Unlock()
}

{ // 2. save record.
h.cBuf.store(r)
}

{ // 3. flush on error.
{ // 2. flush on error.
if r.Level >= slog.LevelError {
h.mu.Lock()
defer h.mu.Unlock()

var err error
for _, v := range h.cBuf.buf {
if e := h.wrappedHandler.Handle(ctx, v); e != nil {
err = errors.Join([]error{err, e}...)
{ // 3. Add some required fields.

// Convert time to UTC.
// Note that we do not convert any other fields(that may be of type time.Time) into UTC.
// If we ever need that functionality, we would do that in `r.Attrs()`
if !v.r.Time.IsZero() {
// According to the docs, If r.Time is the zero time, ignore the time.
v.r.Time = v.r.Time.UTC()
}

newAttrs := []slog.Attr{}

// Add logID
theID := v.logID
id2, fromCtx := getId(v.ctx)
if fromCtx || (theID == "") {
theID = id2
}
newAttrs = []slog.Attr{
{Key: logIDFieldName, Value: slog.StringValue(theID)},
}

// Add stackTraces
v.r.Attrs(func(a slog.Attr) bool {
if e, ok := a.Value.Any().(error); ok {
if stack := ongErrors.StackTrace(e); stack != "" {
newAttrs = append(newAttrs, slog.Attr{Key: "stack", Value: slog.StringValue(stack)})
return false // Stop iteration. This assumes that the log fields had only one error.
}
}
return true
})

v.r.AddAttrs(newAttrs...)
}

{ // 4. flush to underlying handler.
if e := h.wrappedHandler.Handle(v.ctx, v.r); e != nil {
err = errors.Join([]error{err, e}...)
}
}
}

Expand All @@ -226,10 +230,18 @@ func (h *handler) Handle(ctx context.Context, r slog.Record) error {
return nil
}

// extendedLogRecord is similar to [slog.Record] except that
// it has been expanded to also include items that are specific to ong/log.
type extendedLogRecord struct {
r slog.Record
logID string
ctx context.Context
}

// circleBuf implements a very simple & naive circular buffer.
// users of circleBuf are responsible for concurrency safety.
type circleBuf struct {
buf []slog.Record
buf []extendedLogRecord
maxSize int
}

Expand All @@ -238,7 +250,7 @@ func newCirleBuf(maxSize int) *circleBuf {
maxSize = 10
}
c := &circleBuf{
buf: make([]slog.Record, maxSize),
buf: make([]extendedLogRecord, maxSize),
maxSize: maxSize,
}
c.reset() // remove the nils from `make()`
Expand All @@ -247,7 +259,7 @@ func newCirleBuf(maxSize int) *circleBuf {

// store is a private api(thus needs no locking).
// It should only ever be called by `handler.Handle` which already takes a lock.
func (c *circleBuf) store(r slog.Record) {
func (c *circleBuf) store(r extendedLogRecord) {
availableSpace := c.maxSize - len(c.buf)
if availableSpace <= 0 {
// clear space.
Expand Down
47 changes: 43 additions & 4 deletions log/log_benchmarks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"testing"
"time"

ongErrors "github.com/komuw/ong/errors"
"github.com/komuw/ong/log"

"github.com/rs/zerolog"
Expand Down Expand Up @@ -50,7 +51,7 @@ func newZapLogger(lvl zapcore.Level) *zap.Logger {
))
}

func newNewOngLogger() *slog.Logger {
func newOngLogger() *slog.Logger {
maxMsgs := 50_000
return log.New(
context.Background(),
Expand All @@ -59,6 +60,12 @@ func newNewOngLogger() *slog.Logger {
)
}

func newSlogLogger() *slog.Logger {
return slog.New(
slog.NewJSONHandler(io.Discard, nil),
)
}

func getMessage() ([]string, []any) {
type car struct {
mft string
Expand All @@ -78,6 +85,7 @@ func getMessage() ([]string, []any) {
"car_length": float32(123.8999),
"carVal": c,
"carPtr": &c,
"ongError": ongErrors.New("This is an ong/errors error"),
}

sl := make([]string, 0, len(f))
Expand Down Expand Up @@ -132,7 +140,16 @@ func BenchmarkBestCase(b *testing.B) {
})

b.Run("ong", func(b *testing.B) {
l := newNewOngLogger()
l := newOngLogger()
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
l.Info(sl[0], slAny...)
}
})

b.Run("slog/json", func(b *testing.B) {
l := newSlogLogger()
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
Expand Down Expand Up @@ -193,7 +210,19 @@ func BenchmarkAverageCase(b *testing.B) {
})

b.Run("ong", func(b *testing.B) {
l := newNewOngLogger()
l := newOngLogger()
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
l.Info(sl[0], slAny...)
if rand.Intn(100) >= 99 {
l.Error("some-error", logErr)
}
}
})

b.Run("slog/json", func(b *testing.B) {
l := newSlogLogger()
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
Expand Down Expand Up @@ -251,7 +280,17 @@ func BenchmarkWorstCase(b *testing.B) {
})

b.Run("ong", func(b *testing.B) {
l := newNewOngLogger()
l := newOngLogger()
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
l.Info(sl[0], slAny...)
l.Error("some-error", logErr)
}
})

b.Run("slog/json", func(b *testing.B) {
l := newSlogLogger()
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
Expand Down
22 changes: 10 additions & 12 deletions log/log_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@ func TestCircleBuf(t *testing.T) {
maxSize := 4
c := newCirleBuf(maxSize)

c.store(slog.Record{Message: "one"})
c.store(slog.Record{Message: "two"})
c.store(extendedLogRecord{r: slog.Record{Message: "one"}})
c.store(extendedLogRecord{r: slog.Record{Message: "two"}})

attest.Equal(t, c.buf[0].Message, "one")
attest.Equal(t, c.buf[0].r.Message, "one")

attest.Equal(t, c.buf[1].Message, "two")
attest.Equal(t, c.buf[1].r.Message, "two")

attest.Equal(t, len(c.buf), 2)
attest.Equal(t, cap(c.buf), 4)
Expand All @@ -51,7 +51,7 @@ func TestCircleBuf(t *testing.T) {
c := newCirleBuf(maxSize)
for i := 0; i <= (13 * maxSize); i++ {
x := fmt.Sprint(i)
c.store(slog.Record{Message: x})
c.store(extendedLogRecord{r: slog.Record{Message: x}})

attest.True(t, len(c.buf) <= maxSize)
attest.True(t, cap(c.buf) <= maxSize)
Expand All @@ -67,15 +67,15 @@ func TestCircleBuf(t *testing.T) {
c := newCirleBuf(maxSize)
for i := 0; i <= (6 * maxSize); i++ {
x := fmt.Sprint(i)
c.store(slog.Record{Message: x})
c.store(extendedLogRecord{r: slog.Record{Message: x}})
attest.True(t, len(c.buf) <= maxSize)
attest.True(t, cap(c.buf) <= maxSize)
}
attest.True(t, len(c.buf) <= maxSize)
attest.True(t, cap(c.buf) <= maxSize)

attest.Equal(t, c.buf[1].Message, "29")
attest.Equal(t, c.buf[2].Message, "30")
attest.Equal(t, c.buf[1].r.Message, "29")
attest.Equal(t, c.buf[2].r.Message, "30")
})

t.Run("reset", func(t *testing.T) {
Expand All @@ -85,7 +85,7 @@ func TestCircleBuf(t *testing.T) {
c := newCirleBuf(maxSize)
for i := 0; i <= (13 * maxSize); i++ {
x := fmt.Sprint(i)
c.store(slog.Record{Message: x})
c.store(extendedLogRecord{r: slog.Record{Message: x}})
attest.True(t, len(c.buf) <= maxSize)
attest.True(t, cap(c.buf) <= maxSize)
}
Expand Down Expand Up @@ -338,8 +338,6 @@ func TestLogger(t *testing.T) {
l.Info(msg2)
l.Error("badTingOne", "err1", errors.New("badTingOne"))

fmt.Println("\t cool: ")
fmt.Println(w.String())
attest.Subsequence(t, w.String(), msg1)
attest.Subsequence(t, w.String(), msg2)
attest.Subsequence(t, w.String(), "badTingOne")
Expand Down Expand Up @@ -390,7 +388,7 @@ func TestLogger(t *testing.T) {
stdLogger.Println(msg)
attest.Subsequence(t, w.String(), msg)
attest.Subsequence(t, w.String(), `log_test.go`)
attest.Subsequence(t, w.String(), `log_test.go:390`)
attest.Subsequence(t, w.String(), `log_test.go:388`)
attest.True(t, LevelImmediate < 0) // otherwise it will trigger `log.handler` to flush all logs, which we dont want.
}
})
Expand Down
4 changes: 2 additions & 2 deletions log/slogtest.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ type testCase struct {
checks []check
}

// TODO: make this test Parallel?
// the one in stdlib probably isn't, we should preserve that.
// The `TestHandler` in the stdlib does not use t.Parallel,
// hence this one doesn't also.

// TestHandler tests a [slog.Handler].
// If TestHandler finds any misbehaviors, it returns an error for each,
Expand Down

0 comments on commit fa95a8f

Please sign in to comment.