Skip to content

Commit

Permalink
Adding compactor.blocks-fetch-concurrency
Browse files Browse the repository at this point in the history
Signed-off-by: alanprot <[email protected]>
  • Loading branch information
alanprot committed Jul 14, 2022
1 parent 2a37238 commit b73a2bb
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 8 deletions.
8 changes: 6 additions & 2 deletions pkg/compactor/compactor.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,8 @@ var (
garbageCollectedBlocks,
blocksMarkedForNoCompaction,
metadata.NoneFunc,
cfg.BlockFilesConcurrency)
cfg.BlockFilesConcurrency,
cfg.BlocksFetchConcurrency)
}

ShuffleShardingGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion, blocksMarkedForNoCompaction, garbageCollectedBlocks prometheus.Counter, remainingPlannedCompactions prometheus.Gauge, ring *ring.Ring, ringLifecycle *ring.Lifecycler, limits Limits, userID string) compact.Grouper {
Expand All @@ -84,7 +85,8 @@ var (
ringLifecycle.Addr,
limits,
userID,
cfg.BlockFilesConcurrency)
cfg.BlockFilesConcurrency,
cfg.BlocksFetchConcurrency)
}

DefaultBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, PlannerFactory, error) {
Expand Down Expand Up @@ -166,6 +168,7 @@ type Config struct {
TenantCleanupDelay time.Duration `yaml:"tenant_cleanup_delay"`
SkipBlocksWithOutOfOrderChunksEnabled bool `yaml:"skip_blocks_with_out_of_order_chunks_enabled"`
BlockFilesConcurrency int `yaml:"block_files_concurrency"`
BlocksFetchConcurrency int `yaml:"blocks_fetch_concurrency"`

// Whether the migration of block deletion marks to the global markers location is enabled.
BlockDeletionMarksMigrationEnabled bool `yaml:"block_deletion_marks_migration_enabled"`
Expand Down Expand Up @@ -216,6 +219,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.BlockDeletionMarksMigrationEnabled, "compactor.block-deletion-marks-migration-enabled", false, "When enabled, at compactor startup the bucket will be scanned and all found deletion marks inside the block location will be copied to the markers global location too. This option can (and should) be safely disabled as soon as the compactor has successfully run at least once.")
f.BoolVar(&cfg.SkipBlocksWithOutOfOrderChunksEnabled, "compactor.skip-blocks-with-out-of-order-chunks-enabled", false, "When enabled, mark blocks containing index with out-of-order chunks for no compact instead of halting the compaction.")
f.IntVar(&cfg.BlockFilesConcurrency, "compactor.block-files-concurrency", 10, "Number of goroutines to use when fetching/uploading block files from object storage.")
f.IntVar(&cfg.BlocksFetchConcurrency, "compactor.blocks-fetch-concurrency", 10, "Number of goroutines to use when fetching blocks from object storage when compacting.")

f.Var(&cfg.EnabledTenants, "compactor.enabled-tenants", "Comma separated list of tenants that can be compacted. If specified, only these tenants will be compacted by compactor, otherwise all tenants can be compacted. Subject to sharding.")
f.Var(&cfg.DisabledTenants, "compactor.disabled-tenants", "Comma separated list of tenants that cannot be compacted by this compactor. If specified, and compactor would normally pick given tenant for compaction (via -compactor.enabled-tenants or sharding), it will be ignored instead.")
Expand Down
16 changes: 10 additions & 6 deletions pkg/compactor/shuffle_sharding_grouper.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ type ShuffleShardingGrouper struct {
limits Limits
userID string
blockFilesConcurrency int
blocksFetchConcurrency int

ring ring.ReadRing
ringLifecyclerAddr string
Expand All @@ -63,6 +64,7 @@ func NewShuffleShardingGrouper(
limits Limits,
userID string,
blockFilesConcurrency int,
blocksFetchConcurrency int,
) *ShuffleShardingGrouper {
if logger == nil {
logger = log.NewNopLogger()
Expand Down Expand Up @@ -100,12 +102,13 @@ func NewShuffleShardingGrouper(
Name: "thanos_compact_group_vertical_compactions_total",
Help: "Total number of group compaction attempts that resulted in a new block based on overlapping blocks.",
}, []string{"group"}),
compactorCfg: compactorCfg,
ring: ring,
ringLifecyclerAddr: ringLifecyclerAddr,
limits: limits,
userID: userID,
blockFilesConcurrency: blockFilesConcurrency,
compactorCfg: compactorCfg,
ring: ring,
ringLifecyclerAddr: ringLifecyclerAddr,
limits: limits,
userID: userID,
blockFilesConcurrency: blockFilesConcurrency,
blocksFetchConcurrency: blocksFetchConcurrency,
}
}

Expand Down Expand Up @@ -184,6 +187,7 @@ func (g *ShuffleShardingGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (re
g.blocksMarkedForNoCompact,
g.hashFunc,
g.blockFilesConcurrency,
g.blocksFetchConcurrency,
)
if err != nil {
return nil, errors.Wrap(err, "create compaction group")
Expand Down

0 comments on commit b73a2bb

Please sign in to comment.