From 1964c0c27490ad9e65997efcc4c4f8c7411b70df Mon Sep 17 00:00:00 2001 From: rhassanein <64641920+rhassanein@users.noreply.github.com> Date: Fri, 6 Aug 2021 13:30:35 +0300 Subject: [PATCH] adds duration_ms in int64 to the logs (#4509) * Adds duration_ms in int64 to the logs Signed-off-by: rhassanein * Add CHANGELOG entry for PR#4509 Signed-off-by: rhassanein --- CHANGELOG.md | 1 + cmd/thanos/downsample.go | 6 +++--- pkg/block/fetcher.go | 2 +- pkg/compact/compact.go | 6 +++--- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bcc7dd36c4..24c754e964 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#4482](https://github.com/thanos-io/thanos/pull/4482) COS: Add http_config for cos object store client. - [#4487](https://github.com/thanos-io/thanos/pull/4487) Query: Add memcached auto discovery support. - [#4444](https://github.com/thanos-io/thanos/pull/4444) UI: Add search block UI. +- [#4509](https://github.com/thanos-io/thanos/pull/4509) Logging: Adds duration_ms in int64 to the logs. ### Fixed diff --git a/cmd/thanos/downsample.go b/cmd/thanos/downsample.go index dd68826192..849c72db36 100644 --- a/cmd/thanos/downsample.go +++ b/cmd/thanos/downsample.go @@ -317,7 +317,7 @@ func processDownsampling(ctx context.Context, logger log.Logger, bkt objstore.Bu if err != nil { return errors.Wrapf(err, "download block %s", m.ULID) } - level.Info(logger).Log("msg", "downloaded block", "id", m.ULID, "duration", time.Since(begin)) + level.Info(logger).Log("msg", "downloaded block", "id", m.ULID, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds()) if err := block.VerifyIndex(logger, filepath.Join(bdir, block.IndexFilename), m.MinTime, m.MaxTime); err != nil { return errors.Wrap(err, "input block index not valid") @@ -345,7 +345,7 @@ func processDownsampling(ctx context.Context, logger log.Logger, bkt objstore.Bu resdir := filepath.Join(dir, id.String()) level.Info(logger).Log("msg", "downsampled block", - "from", m.ULID, "to", id, "duration", time.Since(begin)) + "from", m.ULID, "to", id, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds()) if err := block.VerifyIndex(logger, filepath.Join(resdir, block.IndexFilename), m.MinTime, m.MaxTime); err != nil { return errors.Wrap(err, "output block index not valid") @@ -358,7 +358,7 @@ func processDownsampling(ctx context.Context, logger log.Logger, bkt objstore.Bu return errors.Wrapf(err, "upload downsampled block %s", id) } - level.Info(logger).Log("msg", "uploaded block", "id", id, "duration", time.Since(begin)) + level.Info(logger).Log("msg", "uploaded block", "id", id, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds()) // It is not harmful if these fails. if err := os.RemoveAll(bdir); err != nil { diff --git a/pkg/block/fetcher.go b/pkg/block/fetcher.go index 5b8f4db84a..d4cbfc0bd2 100644 --- a/pkg/block/fetcher.go +++ b/pkg/block/fetcher.go @@ -473,7 +473,7 @@ func (f *BaseFetcher) fetch(ctx context.Context, metrics *FetcherMetrics, filter return metas, resp.partial, errors.Wrap(resp.metaErrs.Err(), "incomplete view") } - level.Info(f.logger).Log("msg", "successfully synchronized block metadata", "duration", time.Since(start).String(), "cached", len(f.cached), "returned", len(metas), "partial", len(resp.partial)) + level.Info(f.logger).Log("msg", "successfully synchronized block metadata", "duration", time.Since(start).String(), "duration_ms", time.Since(start).Milliseconds(), "cached", len(f.cached), "returned", len(metas), "partial", len(resp.partial)) return metas, resp.partial, nil } diff --git a/pkg/compact/compact.go b/pkg/compact/compact.go index a0f62fbc61..946c554621 100644 --- a/pkg/compact/compact.go +++ b/pkg/compact/compact.go @@ -759,7 +759,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp } toCompactDirs = append(toCompactDirs, bdir) } - level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "plan", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin)) + level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "plan", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds()) begin = time.Now() compID, err = comp.Compact(dir, toCompactDirs, nil) @@ -784,7 +784,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp cg.verticalCompactions.Inc() } level.Info(cg.logger).Log("msg", "compacted blocks", "new", compID, - "blocks", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "overlapping_blocks", overlappingBlocks) + "blocks", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds(), "overlapping_blocks", overlappingBlocks) bdir := filepath.Join(dir, compID.String()) index := filepath.Join(bdir, block.IndexFilename) @@ -821,7 +821,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp if err := block.Upload(ctx, cg.logger, cg.bkt, bdir, cg.hashFunc); err != nil { return false, ulid.ULID{}, retry(errors.Wrapf(err, "upload of %s failed", compID)) } - level.Info(cg.logger).Log("msg", "uploaded block", "result_block", compID, "duration", time.Since(begin)) + level.Info(cg.logger).Log("msg", "uploaded block", "result_block", compID, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds()) // Mark for deletion the blocks we just compacted from the group and bucket so they do not get included // into the next planning cycle.