Skip to content

Commit

Permalink
Update shard naming to allow for 1M+ shards (#1170)
Browse files Browse the repository at this point in the history
Co-authored-by: Azeem Shaikh <[email protected]>
  • Loading branch information
azeemshaikh38 and azeemsgoogle authored Oct 28, 2021
1 parent c73c562 commit 8735961
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 12 deletions.
11 changes: 0 additions & 11 deletions cron/bq/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"time"

Expand Down Expand Up @@ -67,16 +66,6 @@ func getBucketSummary(ctx context.Context, bucketURL string) (*bucketSummary, er
return nil, fmt.Errorf("error parsing Blob key: %w", err)
}
switch {
// TODO(azeems): Remove this case once all instances stop producing .shard_num file.
case filename == config.ShardNumFilename:
keyData, err := data.GetBlobContent(ctx, bucketURL, key)
if err != nil {
return nil, fmt.Errorf("error during GetBlobContent: %w", err)
}
summary.getOrCreate(creationTime).shardsExpected, err = strconv.Atoi(string(keyData))
if err != nil {
return nil, fmt.Errorf("error during strconv.Atoi: %w", err)
}
case strings.HasPrefix(filename, "shard-"):
summary.getOrCreate(creationTime).shardsCreated++
case filename == config.TransferStatusFilename:
Expand Down
2 changes: 1 addition & 1 deletion cron/worker/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func processRequest(ctx context.Context,
bucketURL, bucketURL2 string, checkDocs docs.Doc,
repoClient clients.RepoClient, logger *zap.Logger) error {
filename := data.GetBlobFilename(
fmt.Sprintf("shard-%05d", batchRequest.GetShardNum()),
fmt.Sprintf("shard-%07d", batchRequest.GetShardNum()),
batchRequest.GetJobTime().AsTime())
// Sanity check - make sure we are not re-processing an already processed request.
exists1, err := data.BlobExists(ctx, bucketURL, filename)
Expand Down

0 comments on commit 8735961

Please sign in to comment.