Skip to content

Commit

Permalink
adds duration_ms in int64 to the logs (#4509)
Browse files Browse the repository at this point in the history
* Adds duration_ms in int64 to the logs

Signed-off-by: rhassanein <[email protected]>

* Add CHANGELOG entry for PR#4509

Signed-off-by: rhassanein <[email protected]>
  • Loading branch information
rhassanein authored Aug 6, 2021
1 parent 133a71c commit 1964c0c
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 7 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re
- [#4482](https://github.com/thanos-io/thanos/pull/4482) COS: Add http_config for cos object store client.
- [#4487](https://github.com/thanos-io/thanos/pull/4487) Query: Add memcached auto discovery support.
- [#4444](https://github.com/thanos-io/thanos/pull/4444) UI: Add search block UI.
- [#4509](https://github.com/thanos-io/thanos/pull/4509) Logging: Adds duration_ms in int64 to the logs.

### Fixed

Expand Down
6 changes: 3 additions & 3 deletions cmd/thanos/downsample.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ func processDownsampling(ctx context.Context, logger log.Logger, bkt objstore.Bu
if err != nil {
return errors.Wrapf(err, "download block %s", m.ULID)
}
level.Info(logger).Log("msg", "downloaded block", "id", m.ULID, "duration", time.Since(begin))
level.Info(logger).Log("msg", "downloaded block", "id", m.ULID, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds())

if err := block.VerifyIndex(logger, filepath.Join(bdir, block.IndexFilename), m.MinTime, m.MaxTime); err != nil {
return errors.Wrap(err, "input block index not valid")
Expand Down Expand Up @@ -345,7 +345,7 @@ func processDownsampling(ctx context.Context, logger log.Logger, bkt objstore.Bu
resdir := filepath.Join(dir, id.String())

level.Info(logger).Log("msg", "downsampled block",
"from", m.ULID, "to", id, "duration", time.Since(begin))
"from", m.ULID, "to", id, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds())

if err := block.VerifyIndex(logger, filepath.Join(resdir, block.IndexFilename), m.MinTime, m.MaxTime); err != nil {
return errors.Wrap(err, "output block index not valid")
Expand All @@ -358,7 +358,7 @@ func processDownsampling(ctx context.Context, logger log.Logger, bkt objstore.Bu
return errors.Wrapf(err, "upload downsampled block %s", id)
}

level.Info(logger).Log("msg", "uploaded block", "id", id, "duration", time.Since(begin))
level.Info(logger).Log("msg", "uploaded block", "id", id, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds())

// It is not harmful if these fails.
if err := os.RemoveAll(bdir); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/block/fetcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,7 @@ func (f *BaseFetcher) fetch(ctx context.Context, metrics *FetcherMetrics, filter
return metas, resp.partial, errors.Wrap(resp.metaErrs.Err(), "incomplete view")
}

level.Info(f.logger).Log("msg", "successfully synchronized block metadata", "duration", time.Since(start).String(), "cached", len(f.cached), "returned", len(metas), "partial", len(resp.partial))
level.Info(f.logger).Log("msg", "successfully synchronized block metadata", "duration", time.Since(start).String(), "duration_ms", time.Since(start).Milliseconds(), "cached", len(f.cached), "returned", len(metas), "partial", len(resp.partial))
return metas, resp.partial, nil
}

Expand Down
6 changes: 3 additions & 3 deletions pkg/compact/compact.go
Original file line number Diff line number Diff line change
Expand Up @@ -759,7 +759,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp
}
toCompactDirs = append(toCompactDirs, bdir)
}
level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "plan", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin))
level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "plan", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds())

begin = time.Now()
compID, err = comp.Compact(dir, toCompactDirs, nil)
Expand All @@ -784,7 +784,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp
cg.verticalCompactions.Inc()
}
level.Info(cg.logger).Log("msg", "compacted blocks", "new", compID,
"blocks", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "overlapping_blocks", overlappingBlocks)
"blocks", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds(), "overlapping_blocks", overlappingBlocks)

bdir := filepath.Join(dir, compID.String())
index := filepath.Join(bdir, block.IndexFilename)
Expand Down Expand Up @@ -821,7 +821,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp
if err := block.Upload(ctx, cg.logger, cg.bkt, bdir, cg.hashFunc); err != nil {
return false, ulid.ULID{}, retry(errors.Wrapf(err, "upload of %s failed", compID))
}
level.Info(cg.logger).Log("msg", "uploaded block", "result_block", compID, "duration", time.Since(begin))
level.Info(cg.logger).Log("msg", "uploaded block", "result_block", compID, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds())

// Mark for deletion the blocks we just compacted from the group and bucket so they do not get included
// into the next planning cycle.
Expand Down

0 comments on commit 1964c0c

Please sign in to comment.