-
Notifications
You must be signed in to change notification settings - Fork 2.1k
/
block.go
409 lines (356 loc) · 14.3 KB
/
block.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
// Copyright (c) The Thanos Authors.
// Licensed under the Apache License 2.0.
// Package block contains common functionality for interacting with TSDB blocks
// in the context of Thanos.
package block
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/thanos-io/thanos/pkg/block/metadata"
"github.com/thanos-io/thanos/pkg/objstore"
"github.com/thanos-io/thanos/pkg/runutil"
)
const (
// MetaFilename is the known JSON filename for meta information.
MetaFilename = "meta.json"
// IndexFilename is the known index file for block index.
IndexFilename = "index"
// IndexHeaderFilename is the canonical name for binary index header file that stores essential information.
IndexHeaderFilename = "index-header"
// ChunksDirname is the known dir name for chunks with compressed samples.
ChunksDirname = "chunks"
// DebugMetas is a directory for debug meta files that happen in the past. Useful for debugging.
DebugMetas = "debug/metas"
)
// Download downloads directory that is mean to be block directory. If any of the files
// have a hash calculated in the meta file and it matches with what is in the destination path then
// we do not download it. We always re-download the meta file.
func Download(ctx context.Context, logger log.Logger, bucket objstore.Bucket, id ulid.ULID, dst string) error {
if err := os.MkdirAll(dst, 0777); err != nil {
return errors.Wrap(err, "create dir")
}
if err := objstore.DownloadFile(ctx, logger, bucket, path.Join(id.String(), MetaFilename), path.Join(dst, MetaFilename)); err != nil {
return err
}
m, err := metadata.ReadFromDir(dst)
if err != nil {
return errors.Wrapf(err, "reading meta from %s", dst)
}
ignoredPaths := []string{MetaFilename}
for _, fl := range m.Thanos.Files {
if fl.Hash == nil || fl.Hash.Func == metadata.NoneFunc || fl.RelPath == "" {
continue
}
actualHash, err := metadata.CalculateHash(filepath.Join(dst, fl.RelPath), fl.Hash.Func, logger)
if err != nil {
level.Info(logger).Log("msg", "failed to calculate hash when downloading; re-downloading", "relPath", fl.RelPath, "err", err)
continue
}
if fl.Hash.Equal(&actualHash) {
ignoredPaths = append(ignoredPaths, fl.RelPath)
}
}
if err := objstore.DownloadDir(ctx, logger, bucket, id.String(), id.String(), dst, ignoredPaths...); err != nil {
return err
}
chunksDir := filepath.Join(dst, ChunksDirname)
_, err = os.Stat(chunksDir)
if os.IsNotExist(err) {
// This can happen if block is empty. We cannot easily upload empty directory, so create one here.
return os.Mkdir(chunksDir, os.ModePerm)
}
if err != nil {
return errors.Wrapf(err, "stat %s", chunksDir)
}
return nil
}
// Upload uploads block from given block dir that ends with block id.
// It makes sure cleanup is done on error to avoid partial block uploads.
// It also verifies basic features of Thanos block.
// TODO(bplotka): Ensure bucket operations have reasonable backoff retries.
// NOTE: Upload updates `meta.Thanos.File` section.
func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string, hf metadata.HashFunc) error {
return UploadWithRetry(ctx, logger, bkt, bdir, hf, 0)
}
// UploadWithRetry is a utility function for upload and acts as a workaround for absence of default parameters (which in this case is retryCounter = 0).
func UploadWithRetry(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string, hf metadata.HashFunc, retryCounter int) error {
flag := false
df, err := os.Stat(bdir)
if err != nil {
return err
}
if !df.IsDir() {
return errors.Errorf("%s is not a directory", bdir)
}
// Verify dir.
id, err := ulid.Parse(df.Name())
if err != nil {
return errors.Wrap(err, "not a block dir")
}
meta, err := metadata.ReadFromDir(bdir)
if err != nil {
// No meta or broken meta file.
return errors.Wrap(err, "read meta")
}
if meta.Thanos.Labels == nil || len(meta.Thanos.Labels) == 0 {
return errors.New("empty external labels are not allowed for Thanos block")
}
meta.Thanos.Files, err = gatherFileStats(bdir, hf, logger)
if err != nil {
return errors.Wrap(err, "gather meta file stats")
}
metaEncoded := strings.Builder{}
if err := meta.Write(&metaEncoded); err != nil {
return errors.Wrap(err, "encode meta file")
}
if err := bkt.Upload(ctx, path.Join(DebugMetas, fmt.Sprintf("%s.json", id)), strings.NewReader(metaEncoded.String())); err != nil {
if retryCounter == 5 {
return cleanUp(logger, bkt, id, errors.Wrap(err, "upload debug meta file"))
}
flag = true
}
if err := objstore.UploadDir(ctx, logger, bkt, path.Join(bdir, ChunksDirname), path.Join(id.String(), ChunksDirname)); err != nil {
if retryCounter == 5 {
return cleanUp(logger, bkt, id, errors.Wrap(err, "upload chunks"))
}
flag = true
}
if err := objstore.UploadFile(ctx, logger, bkt, path.Join(bdir, IndexFilename), path.Join(id.String(), IndexFilename)); err != nil {
if retryCounter == 5 {
return cleanUp(logger, bkt, id, errors.Wrap(err, "upload index"))
}
flag = true
}
if flag && retryCounter < 5 {
return UploadWithRetry(ctx, logger, bkt, bdir, hf, retryCounter+1)
}
// Meta.json always need to be uploaded as a last item. This will allow to assume block directories without meta file to be pending uploads.
if err := bkt.Upload(ctx, path.Join(id.String(), MetaFilename), strings.NewReader(metaEncoded.String())); err != nil {
// Don't call cleanUp here. Despite getting error, meta.json may have been uploaded in certain cases,
// and even though cleanUp will not see it yet, meta.json may appear in the bucket later.
// (Eg. S3 is known to behave this way when it returns 503 "SlowDown" error).
// If meta.json is not uploaded, this will produce partial blocks, but such blocks will be cleaned later.
return errors.Wrap(err, "upload meta file")
}
return nil
}
func cleanUp(logger log.Logger, bkt objstore.Bucket, id ulid.ULID, err error) error {
// Cleanup the dir with an uncancelable context.
cleanErr := Delete(context.Background(), logger, bkt, id)
if cleanErr != nil {
return errors.Wrapf(err, "failed to clean block after upload issue. Partial block in system. Err: %s", err.Error())
}
return err
}
// MarkForDeletion creates a file which stores information about when the block was marked for deletion.
func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, details string, markedForDeletion prometheus.Counter) error {
deletionMarkFile := path.Join(id.String(), metadata.DeletionMarkFilename)
deletionMarkExists, err := bkt.Exists(ctx, deletionMarkFile)
if err != nil {
return errors.Wrapf(err, "check exists %s in bucket", deletionMarkFile)
}
if deletionMarkExists {
level.Warn(logger).Log("msg", "requested to mark for deletion, but file already exists; this should not happen; investigate", "err", errors.Errorf("file %s already exists in bucket", deletionMarkFile))
return nil
}
deletionMark, err := json.Marshal(metadata.DeletionMark{
ID: id,
DeletionTime: time.Now().Unix(),
Version: metadata.DeletionMarkVersion1,
Details: details,
})
if err != nil {
return errors.Wrap(err, "json encode deletion mark")
}
if err := bkt.Upload(ctx, deletionMarkFile, bytes.NewBuffer(deletionMark)); err != nil {
return errors.Wrapf(err, "upload file %s to bucket", deletionMarkFile)
}
markedForDeletion.Inc()
level.Info(logger).Log("msg", "block has been marked for deletion", "block", id)
return nil
}
// Delete removes directory that is meant to be block directory.
// NOTE: Always prefer this method for deleting blocks.
// * We have to delete block's files in the certain order (meta.json first and deletion-mark.json last)
// to ensure we don't end up with malformed partial blocks. Thanos system handles well partial blocks
// only if they don't have meta.json. If meta.json is present Thanos assumes valid block.
// * This avoids deleting empty dir (whole bucket) by mistake.
func Delete(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) error {
metaFile := path.Join(id.String(), MetaFilename)
deletionMarkFile := path.Join(id.String(), metadata.DeletionMarkFilename)
// Delete block meta file.
ok, err := bkt.Exists(ctx, metaFile)
if err != nil {
return errors.Wrapf(err, "stat %s", metaFile)
}
if ok {
if err := bkt.Delete(ctx, metaFile); err != nil {
return errors.Wrapf(err, "delete %s", metaFile)
}
level.Debug(logger).Log("msg", "deleted file", "file", metaFile, "bucket", bkt.Name())
}
// Delete the block objects, but skip:
// - The metaFile as we just deleted. This is required for eventual object storages (list after write).
// - The deletionMarkFile as we'll delete it at last.
err = deleteDirRec(ctx, logger, bkt, id.String(), func(name string) bool {
return name == metaFile || name == deletionMarkFile
})
if err != nil {
return err
}
// Delete block deletion mark.
ok, err = bkt.Exists(ctx, deletionMarkFile)
if err != nil {
return errors.Wrapf(err, "stat %s", deletionMarkFile)
}
if ok {
if err := bkt.Delete(ctx, deletionMarkFile); err != nil {
return errors.Wrapf(err, "delete %s", deletionMarkFile)
}
level.Debug(logger).Log("msg", "deleted file", "file", deletionMarkFile, "bucket", bkt.Name())
}
return nil
}
// deleteDirRec removes all objects prefixed with dir from the bucket. It skips objects that return true for the passed keep function.
// NOTE: For objects removal use `block.Delete` strictly.
func deleteDirRec(ctx context.Context, logger log.Logger, bkt objstore.Bucket, dir string, keep func(name string) bool) error {
return bkt.Iter(ctx, dir, func(name string) error {
// If we hit a directory, call DeleteDir recursively.
if strings.HasSuffix(name, objstore.DirDelim) {
return deleteDirRec(ctx, logger, bkt, name, keep)
}
if keep(name) {
return nil
}
if err := bkt.Delete(ctx, name); err != nil {
return err
}
level.Debug(logger).Log("msg", "deleted file", "file", name, "bucket", bkt.Name())
return nil
})
}
// DownloadMeta downloads only meta file from bucket by block ID.
// TODO(bwplotka): Differentiate between network error & partial upload.
func DownloadMeta(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) (metadata.Meta, error) {
rc, err := bkt.Get(ctx, path.Join(id.String(), MetaFilename))
if err != nil {
return metadata.Meta{}, errors.Wrapf(err, "meta.json bkt get for %s", id.String())
}
defer runutil.CloseWithLogOnErr(logger, rc, "download meta bucket client")
var m metadata.Meta
obj, err := ioutil.ReadAll(rc)
if err != nil {
return metadata.Meta{}, errors.Wrapf(err, "read meta.json for block %s", id.String())
}
if err = json.Unmarshal(obj, &m); err != nil {
return metadata.Meta{}, errors.Wrapf(err, "unmarshal meta.json for block %s", id.String())
}
return m, nil
}
func IsBlockDir(path string) (id ulid.ULID, ok bool) {
id, err := ulid.Parse(filepath.Base(path))
return id, err == nil
}
// GetSegmentFiles returns list of segment files for given block. Paths are relative to the chunks directory.
// In case of errors, nil is returned.
func GetSegmentFiles(blockDir string) []string {
files, err := ioutil.ReadDir(filepath.Join(blockDir, ChunksDirname))
if err != nil {
return nil
}
// ReadDir returns files in sorted order already.
var result []string
for _, f := range files {
result = append(result, f.Name())
}
return result
}
// TODO(bwplotka): Gather stats when dirctly uploading files.
func gatherFileStats(blockDir string, hf metadata.HashFunc, logger log.Logger) (res []metadata.File, _ error) {
files, err := ioutil.ReadDir(filepath.Join(blockDir, ChunksDirname))
if err != nil {
return nil, errors.Wrapf(err, "read dir %v", filepath.Join(blockDir, ChunksDirname))
}
for _, f := range files {
mf := metadata.File{
RelPath: filepath.Join(ChunksDirname, f.Name()),
SizeBytes: f.Size(),
}
if hf != metadata.NoneFunc && !f.IsDir() {
h, err := metadata.CalculateHash(filepath.Join(blockDir, ChunksDirname, f.Name()), hf, logger)
if err != nil {
return nil, errors.Wrapf(err, "calculate hash %v", filepath.Join(ChunksDirname, f.Name()))
}
mf.Hash = &h
}
res = append(res, mf)
}
indexFile, err := os.Stat(filepath.Join(blockDir, IndexFilename))
if err != nil {
return nil, errors.Wrapf(err, "stat %v", filepath.Join(blockDir, IndexFilename))
}
mf := metadata.File{
RelPath: indexFile.Name(),
SizeBytes: indexFile.Size(),
}
if hf != metadata.NoneFunc {
h, err := metadata.CalculateHash(filepath.Join(blockDir, IndexFilename), hf, logger)
if err != nil {
return nil, errors.Wrapf(err, "calculate hash %v", indexFile.Name())
}
mf.Hash = &h
}
res = append(res, mf)
metaFile, err := os.Stat(filepath.Join(blockDir, MetaFilename))
if err != nil {
return nil, errors.Wrapf(err, "stat %v", filepath.Join(blockDir, MetaFilename))
}
res = append(res, metadata.File{RelPath: metaFile.Name()})
sort.Slice(res, func(i, j int) bool {
return strings.Compare(res[i].RelPath, res[j].RelPath) < 0
})
// TODO(bwplotka): Add optional files like tombstones?
return res, err
}
// MarkForNoCompact creates a file which marks block to be not compacted.
func MarkForNoCompact(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, reason metadata.NoCompactReason, details string, markedForNoCompact prometheus.Counter) error {
m := path.Join(id.String(), metadata.NoCompactMarkFilename)
noCompactMarkExists, err := bkt.Exists(ctx, m)
if err != nil {
return errors.Wrapf(err, "check exists %s in bucket", m)
}
if noCompactMarkExists {
level.Warn(logger).Log("msg", "requested to mark for no compaction, but file already exists; this should not happen; investigate", "err", errors.Errorf("file %s already exists in bucket", m))
return nil
}
noCompactMark, err := json.Marshal(metadata.NoCompactMark{
ID: id,
Version: metadata.NoCompactMarkVersion1,
NoCompactTime: time.Now().Unix(),
Reason: reason,
Details: details,
})
if err != nil {
return errors.Wrap(err, "json encode no compact mark")
}
if err := bkt.Upload(ctx, m, bytes.NewBuffer(noCompactMark)); err != nil {
return errors.Wrapf(err, "upload file %s to bucket", m)
}
markedForNoCompact.Inc()
level.Info(logger).Log("msg", "block has been marked for no compaction", "block", id)
return nil
}