diff --git a/cloud/kubernetes/bring-your-own-certs/client.yaml b/cloud/kubernetes/bring-your-own-certs/client.yaml index b74b03972f73..928d9a17e92c 100644 --- a/cloud/kubernetes/bring-your-own-certs/client.yaml +++ b/cloud/kubernetes/bring-your-own-certs/client.yaml @@ -20,7 +20,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 # Keep a pod open indefinitely so kubectl exec can be used to get a shell to it # and run cockroach client commands, such as cockroach sql, cockroach node status, etc. command: diff --git a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml index d563836772d3..bb7361943198 100644 --- a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml @@ -153,7 +153,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/client-secure.yaml b/cloud/kubernetes/client-secure.yaml index 865482746577..0825ba7c0618 100644 --- a/cloud/kubernetes/client-secure.yaml +++ b/cloud/kubernetes/client-secure.yaml @@ -32,7 +32,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/cluster-init-secure.yaml b/cloud/kubernetes/cluster-init-secure.yaml index 37740a41f07c..15d82040f4a8 100644 --- a/cloud/kubernetes/cluster-init-secure.yaml +++ b/cloud/kubernetes/cluster-init-secure.yaml @@ -34,7 +34,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/cluster-init.yaml b/cloud/kubernetes/cluster-init.yaml index 7965eb73dd9d..6590ba127540 100644 --- a/cloud/kubernetes/cluster-init.yaml +++ b/cloud/kubernetes/cluster-init.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml index 404412c2d645..9f7e13eb1b3c 100644 --- a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml @@ -195,7 +195,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/cockroachdb-statefulset.yaml b/cloud/kubernetes/cockroachdb-statefulset.yaml index 92ef0325bdb1..e3e8a7cc8537 100644 --- a/cloud/kubernetes/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/cockroachdb-statefulset.yaml @@ -98,7 +98,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/multiregion/client-secure.yaml b/cloud/kubernetes/multiregion/client-secure.yaml index 647cf92e2c9a..5d4f02244068 100644 --- a/cloud/kubernetes/multiregion/client-secure.yaml +++ b/cloud/kubernetes/multiregion/client-secure.yaml @@ -9,7 +9,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/multiregion/cluster-init-secure.yaml b/cloud/kubernetes/multiregion/cluster-init-secure.yaml index 8644f1186a6f..3edebae25d15 100644 --- a/cloud/kubernetes/multiregion/cluster-init-secure.yaml +++ b/cloud/kubernetes/multiregion/cluster-init-secure.yaml @@ -11,7 +11,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml index 99ef2e6c6c0f..d7878ce7d61c 100644 --- a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml @@ -167,7 +167,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml index f550e760d211..dea7298988ff 100644 --- a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml +++ b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml @@ -185,7 +185,7 @@ spec: name: cockroach-env containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml index 70da01e9d0fe..19c058da5a13 100644 --- a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml @@ -82,7 +82,7 @@ spec: hostNetwork: true containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent # TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free # to remove the requests and limits sections. If you didn't, you'll need to change these to diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml index 698c300d47dc..eaff5065f2a1 100644 --- a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml @@ -198,7 +198,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent # TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free # to remove the requests and limits sections. If you didn't, you'll need to change these to diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml index 6eb9b03ce254..16bfcda66164 100644 --- a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml @@ -141,7 +141,7 @@ spec: - name: cockroachdb # NOTE: Always use the most recent version of CockroachDB for the best # performance and reliability. - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml index cb66b616eb8e..ca8054e7b618 100644 --- a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml @@ -232,7 +232,7 @@ spec: - name: cockroachdb # NOTE: Always use the most recent version of CockroachDB for the best # performance and reliability. - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/v1.6/client-secure.yaml b/cloud/kubernetes/v1.6/client-secure.yaml index bd0ef6392626..8b6b20ded119 100644 --- a/cloud/kubernetes/v1.6/client-secure.yaml +++ b/cloud/kubernetes/v1.6/client-secure.yaml @@ -32,7 +32,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.6/cluster-init-secure.yaml b/cloud/kubernetes/v1.6/cluster-init-secure.yaml index 2e4f29e212a4..d3af9aa321aa 100644 --- a/cloud/kubernetes/v1.6/cluster-init-secure.yaml +++ b/cloud/kubernetes/v1.6/cluster-init-secure.yaml @@ -34,7 +34,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.6/cluster-init.yaml b/cloud/kubernetes/v1.6/cluster-init.yaml index 19d8acc3e1ec..03e5c1ecb1ad 100644 --- a/cloud/kubernetes/v1.6/cluster-init.yaml +++ b/cloud/kubernetes/v1.6/cluster-init.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml index 11b7df75999b..23539ef8a9f0 100644 --- a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml @@ -178,7 +178,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml index 20b6b62590ec..c7c565dc8fb3 100644 --- a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml @@ -81,7 +81,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.7/client-secure.yaml b/cloud/kubernetes/v1.7/client-secure.yaml index abc5e54c82fb..683488a3e673 100644 --- a/cloud/kubernetes/v1.7/client-secure.yaml +++ b/cloud/kubernetes/v1.7/client-secure.yaml @@ -32,7 +32,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.7/cluster-init-secure.yaml b/cloud/kubernetes/v1.7/cluster-init-secure.yaml index 19c7586122d3..26b4be34f16f 100644 --- a/cloud/kubernetes/v1.7/cluster-init-secure.yaml +++ b/cloud/kubernetes/v1.7/cluster-init-secure.yaml @@ -34,7 +34,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.7/cluster-init.yaml b/cloud/kubernetes/v1.7/cluster-init.yaml index 9701bc3fc6d2..f3256cd2df5e 100644 --- a/cloud/kubernetes/v1.7/cluster-init.yaml +++ b/cloud/kubernetes/v1.7/cluster-init.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml index 0ca41639cf73..9dc1039845ee 100644 --- a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml @@ -190,7 +190,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml index 501b6e6d815c..b4be69a79aa6 100644 --- a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml @@ -93,7 +93,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v22.1.5 + image: cockroachdb/cockroach:v22.1.6 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html index 38826353bf76..199ca927ddce 100644 --- a/docs/generated/settings/settings.html +++ b/docs/generated/settings/settings.html @@ -56,6 +56,7 @@ kv.snapshot_delegation.enabledbooleanfalseset to true to allow snapshots from follower replicas kv.snapshot_rebalance.max_ratebyte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshots kv.snapshot_recovery.max_ratebyte size32 MiBthe rate limit (bytes/sec) to use for recovery snapshots +kv.store.admission.provisioned_bandwidthbyte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be over-ridden on a per-store basis using the --store flag kv.transaction.max_intents_bytesinteger4194304maximum number of bytes used to track locks in transactions kv.transaction.max_refresh_spans_bytesinteger4194304maximum number of bytes used to track refresh spans in serializable transactions kv.transaction.reject_over_max_intents_budget.enabledbooleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressed diff --git a/pkg/base/store_spec.go b/pkg/base/store_spec.go index a8d6c77a9a2e..3b1de84d2a7e 100644 --- a/pkg/base/store_spec.go +++ b/pkg/base/store_spec.go @@ -161,6 +161,86 @@ func (ss *SizeSpec) Set(value string) error { return nil } +// ProvisionedRateSpec is an optional part of the StoreSpec. +// +// TODO(sumeer): We should map the file path specified in the store spec to +// the disk name. df can be used to map paths to names like /dev/nvme1n1 and +// /dev/sdb (these examples are from AWS EBS and GCP PD respectively) and the +// corresponding names produced by disk_counters.go are nvme1n1 and sdb +// respectively. We need to find or write a platform independent library -- +// see the discussion on +// https://github.com/cockroachdb/cockroach/pull/86063#pullrequestreview-1074487018. +// With that change, the ProvisionedRateSpec would only be needed to override +// the cluster setting when there are heterogenous bandwidth limits in a +// cluster (there would be no more DiskName field). +type ProvisionedRateSpec struct { + // DiskName is the name of the disk observed by the code in disk_counters.go + // when retrieving stats for this store. + DiskName string + // ProvisionedBandwidth is the bandwidth provisioned for this store in + // bytes/s. + ProvisionedBandwidth int64 +} + +func newStoreProvisionedRateSpec( + field redact.SafeString, value string, +) (ProvisionedRateSpec, error) { + var spec ProvisionedRateSpec + used := make(map[string]struct{}) + for _, split := range strings.Split(value, ":") { + if len(split) == 0 { + continue + } + subSplits := strings.Split(split, "=") + if len(subSplits) != 2 { + return ProvisionedRateSpec{}, errors.Errorf("%s field has invalid value %s", field, value) + } + subField := subSplits[0] + subValue := subSplits[1] + if _, ok := used[subField]; ok { + return ProvisionedRateSpec{}, errors.Errorf("%s field has duplicate sub-field %s", + field, subField) + } + used[subField] = struct{}{} + if len(subField) == 0 { + continue + } + if len(subValue) == 0 { + return ProvisionedRateSpec{}, + errors.Errorf("%s field has no value specified for sub-field %s", field, subField) + } + switch subField { + case "disk-name": + spec.DiskName = subValue + case "bandwidth": + if len(subValue) <= 2 || subValue[len(subValue)-2:] != "/s" { + return ProvisionedRateSpec{}, + errors.Errorf("%s field does not have bandwidth sub-field %s ending in /s", + field, subValue) + } + subValue = subValue[:len(subValue)-2] + var err error + spec.ProvisionedBandwidth, err = humanizeutil.ParseBytes(subValue) + if err != nil { + return ProvisionedRateSpec{}, + errors.Wrapf(err, "could not parse bandwidth in field %s", field) + } + if spec.ProvisionedBandwidth == 0 { + return ProvisionedRateSpec{}, + errors.Errorf("%s field is trying to set bandwidth to 0", field) + } + default: + return ProvisionedRateSpec{}, errors.Errorf("%s field has unknown sub-field %s", + field, subField) + } + } + if len(spec.DiskName) == 0 { + return ProvisionedRateSpec{}, + errors.Errorf("%s field did not specify disk-name", field) + } + return spec, nil +} + // StoreSpec contains the details that can be specified in the cli pertaining // to the --store flag. type StoreSpec struct { @@ -189,6 +269,8 @@ type StoreSpec struct { // through to C CCL code to set up encryption-at-rest. Must be set if and // only if encryption is enabled, otherwise left empty. EncryptionOptions []byte + // ProvisionedRateSpec is optional. + ProvisionedRateSpec ProvisionedRateSpec } // String returns a fully parsable version of the store spec. @@ -231,6 +313,16 @@ func (ss StoreSpec) String() string { fmt.Fprint(&buffer, optsStr) fmt.Fprint(&buffer, ",") } + if len(ss.ProvisionedRateSpec.DiskName) > 0 { + fmt.Fprintf(&buffer, "provisioned-rate=disk-name=%s", + ss.ProvisionedRateSpec.DiskName) + if ss.ProvisionedRateSpec.ProvisionedBandwidth > 0 { + fmt.Fprintf(&buffer, ":bandwidth=%s/s,", + humanizeutil.IBytes(ss.ProvisionedRateSpec.ProvisionedBandwidth)) + } else { + fmt.Fprintf(&buffer, ",") + } + } // Trim the extra comma from the end if it exists. if l := buffer.Len(); l > 0 { buffer.Truncate(l - 1) @@ -259,7 +351,7 @@ var fractionRegex = regexp.MustCompile(`^([-]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+|[0- // NewStoreSpec parses the string passed into a --store flag and returns a // StoreSpec if it is correctly parsed. -// There are four possible fields that can be passed in, comma separated: +// There are five possible fields that can be passed in, comma separated: // - path=xxx The directory in which to the rocks db instance should be // located, required unless using a in memory storage. // - type=mem This specifies that the store is an in memory storage instead of @@ -273,6 +365,10 @@ var fractionRegex = regexp.MustCompile(`^([-]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+|[0- // - 20% -> 20% of the available space // - 0.2 -> 20% of the available space // - attrs=xxx:yyy:zzz A colon separated list of optional attributes. +// - provisioned-rate=disk-name=[:bandwidth=] The +// provisioned-rate can be used for admission control for operations on the +// store. The bandwidth is optional, and if unspecified, a cluster setting +// (kv.store.admission.provisioned_bandwidth) will be used. // Note that commas are forbidden within any field name or value. func NewStoreSpec(value string) (StoreSpec, error) { const pathField = "path" @@ -399,6 +495,13 @@ func NewStoreSpec(value string) (StoreSpec, error) { return StoreSpec{}, err } ss.PebbleOptions = buf.String() + case "provisioned-rate": + rateSpec, err := newStoreProvisionedRateSpec("provisioned-rate", value) + if err != nil { + return StoreSpec{}, err + } + ss.ProvisionedRateSpec = rateSpec + default: return StoreSpec{}, fmt.Errorf("%s is not a valid store field", field) } diff --git a/pkg/base/store_spec_test.go b/pkg/base/store_spec_test.go index f7fd65984222..28e460568d2b 100644 --- a/pkg/base/store_spec_test.go +++ b/pkg/base/store_spec_test.go @@ -151,6 +151,14 @@ target_file_size=2097152` {"path=/mnt/hda1,type=other", "other is not a valid store type", StoreSpec{}}, {"path=/mnt/hda1,type=mem,size=20GiB", "path specified for in memory store", StoreSpec{}}, + // provisioned rate + {"path=/mnt/hda1,provisioned-rate=disk-name=nvme1n1:bandwidth=200MiB/s", "", + StoreSpec{Path: "/mnt/hda1", ProvisionedRateSpec: base.ProvisionedRateSpec{ + DiskName: "nvme1n1", ProvisionedBandwidth: 200 << 20}}}, + {"path=/mnt/hda1,provisioned-rate=disk-name=sdb", "", StoreSpec{ + Path: "/mnt/hda1", ProvisionedRateSpec: base.ProvisionedRateSpec{ + DiskName: "sdb", ProvisionedBandwidth: 0}}}, + // RocksDB {"path=/,rocksdb=key1=val1;key2=val2", "", StoreSpec{Path: "/", RocksDBOptions: "key1=val1;key2=val2"}}, diff --git a/pkg/ccl/streamingccl/event.go b/pkg/ccl/streamingccl/event.go index d04af2a9b1c6..4491b7cad25f 100644 --- a/pkg/ccl/streamingccl/event.go +++ b/pkg/ccl/streamingccl/event.go @@ -50,7 +50,7 @@ type Event interface { // GetResolvedSpans returns a list of span-time pairs indicating the time for // which all KV events within that span has been emitted. - GetResolvedSpans() *[]jobspb.ResolvedSpan + GetResolvedSpans() []jobspb.ResolvedSpan } // kvEvent is a key value pair that needs to be ingested. @@ -81,7 +81,7 @@ func (kve kvEvent) GetDeleteRange() *roachpb.RangeFeedDeleteRange { } // GetResolvedSpans implements the Event interface. -func (kve kvEvent) GetResolvedSpans() *[]jobspb.ResolvedSpan { +func (kve kvEvent) GetResolvedSpans() []jobspb.ResolvedSpan { return nil } @@ -111,7 +111,7 @@ func (sste sstableEvent) GetDeleteRange() *roachpb.RangeFeedDeleteRange { } // GetResolvedSpans implements the Event interface. -func (sste sstableEvent) GetResolvedSpans() *[]jobspb.ResolvedSpan { +func (sste sstableEvent) GetResolvedSpans() []jobspb.ResolvedSpan { return nil } @@ -143,7 +143,7 @@ func (dre delRangeEvent) GetDeleteRange() *roachpb.RangeFeedDeleteRange { } // GetResolvedSpans implements the Event interface. -func (dre delRangeEvent) GetResolvedSpans() *[]jobspb.ResolvedSpan { +func (dre delRangeEvent) GetResolvedSpans() []jobspb.ResolvedSpan { return nil } @@ -178,8 +178,8 @@ func (ce checkpointEvent) GetDeleteRange() *roachpb.RangeFeedDeleteRange { } // GetResolvedSpans implements the Event interface. -func (ce checkpointEvent) GetResolvedSpans() *[]jobspb.ResolvedSpan { - return &ce.resolvedSpans +func (ce checkpointEvent) GetResolvedSpans() []jobspb.ResolvedSpan { + return ce.resolvedSpans } // MakeKVEvent creates an Event from a KV. diff --git a/pkg/ccl/streamingccl/streamclient/client_test.go b/pkg/ccl/streamingccl/streamclient/client_test.go index 1a4351e1c55d..c2c0e98517b1 100644 --- a/pkg/ccl/streamingccl/streamclient/client_test.go +++ b/pkg/ccl/streamingccl/streamclient/client_test.go @@ -124,8 +124,6 @@ func TestGetFirstActiveClient(t *testing.T) { defer func() { require.NoError(t, client.Close(context.Background())) }() - interceptable, ok := client.(InterceptableStreamClient) - require.True(t, ok) streamAddresses := []string{ "randomgen://test0/", @@ -142,7 +140,7 @@ func TestGetFirstActiveClient(t *testing.T) { } // Track dials and error for all but test3 and test4 - interceptable.RegisterDialInterception(func(streamURL *url.URL) error { + client.RegisterDialInterception(func(streamURL *url.URL) error { addr := streamURL.String() addressDialCount[addr]++ if addr != streamAddresses[3] && addr != streamAddresses[4] { @@ -151,7 +149,7 @@ func TestGetFirstActiveClient(t *testing.T) { return nil }) - client, err := GetFirstActiveClient(context.Background(), streamAddresses) + activeClient, err := GetFirstActiveClient(context.Background(), streamAddresses) require.NoError(t, err) // Should've dialed the valid schemes up to the 5th one where it should've @@ -165,7 +163,7 @@ func TestGetFirstActiveClient(t *testing.T) { require.Equal(t, 0, addressDialCount[streamAddresses[6]]) // The 5th should've succeded as it was a valid scheme and succeeded Dial - require.Equal(t, client.(*randomStreamClient).streamURL.String(), streamAddresses[4]) + require.Equal(t, activeClient.(*RandomStreamClient).streamURL.String(), streamAddresses[4]) } // ExampleClientUsage serves as documentation to indicate how a stream @@ -243,7 +241,7 @@ func ExampleClient() { case streamingccl.CheckpointEvent: ingested.Lock() minTS := hlc.MaxTimestamp - for _, rs := range *event.GetResolvedSpans() { + for _, rs := range event.GetResolvedSpans() { if rs.Timestamp.Less(minTS) { minTS = rs.Timestamp } diff --git a/pkg/ccl/streamingccl/streamclient/random_stream_client.go b/pkg/ccl/streamingccl/streamclient/random_stream_client.go index 397f75941252..a16b9daf32f3 100644 --- a/pkg/ccl/streamingccl/streamclient/random_stream_client.go +++ b/pkg/ccl/streamingccl/streamclient/random_stream_client.go @@ -52,15 +52,17 @@ const ( // EventFrequency is the frequency in nanoseconds that the stream will emit // randomly generated KV events. EventFrequency = "EVENT_FREQUENCY" - // KVsPerCheckpoint controls approximately how many KV events should be emitted - // between checkpoint events. - KVsPerCheckpoint = "KVS_PER_CHECKPOINT" + // EventsPerCheckpoint controls approximately how many data events (KV/SST/DelRange) + // should be emitted between checkpoint events. + EventsPerCheckpoint = "EVENTS_PER_CHECKPOINT" // NumPartitions controls the number of partitions the client will stream data // back on. Each partition will encompass a single table span. NumPartitions = "NUM_PARTITIONS" - // DupProbability controls the probability with which we emit duplicate KV + // DupProbability controls the probability with which we emit duplicate data // events. DupProbability = "DUP_PROBABILITY" + // SSTProbability controls the probability with which we emit SST event. + SSTProbability = "SST_PROBABILITY" // TenantID specifies the ID of the tenant we are ingesting data into. This // allows the client to prefix the generated KVs with the appropriate tenant // prefix. @@ -74,8 +76,8 @@ const ( ) // TODO(dt): just make interceptors a singleton, not the whole client. -var randomStreamClientSingleton = func() *randomStreamClient { - c := randomStreamClient{} +var randomStreamClientSingleton = func() *RandomStreamClient { + c := RandomStreamClient{} c.mu.tableID = 52 return &c }() @@ -83,7 +85,7 @@ var randomStreamClientSingleton = func() *randomStreamClient { // GetRandomStreamClientSingletonForTesting returns the singleton instance of // the client. This is to be used in testing, when interceptors can be // registered on the client to observe events. -func GetRandomStreamClientSingletonForTesting() Client { +func GetRandomStreamClientSingletonForTesting() *RandomStreamClient { return randomStreamClientSingleton } @@ -99,45 +101,32 @@ type DialInterceptFn func(streamURL *url.URL) error // Heartbeat. type HeartbeatInterceptFn func(timestamp hlc.Timestamp) -// InterceptableStreamClient wraps a Client, and provides a method to register -// interceptor methods that are run on every streamed Event. -type InterceptableStreamClient interface { - Client - - // RegisterInterception is how you can register your interceptor to be called - // from an InterceptableStreamClient. - RegisterInterception(fn InterceptFn) - - // RegisterDialInterception registers an interceptor to be called - // whenever Dial is called on the client. - RegisterDialInterception(fn DialInterceptFn) - // RegisterHeartbeatInterception registers an interceptor to be called - // whenever Heartbeat is called on the client. - RegisterHeartbeatInterception(fn HeartbeatInterceptFn) - - // ClearInterceptors clears all registered interceptors on the client. - ClearInterceptors() -} +// SSTableMakerFn is a function that generates RangeFeedSSTable event +// with a given list of roachpb.KeyValue. +type SSTableMakerFn func(keyValues []roachpb.KeyValue) roachpb.RangeFeedSSTable // randomStreamConfig specifies the variables that controls the rate and type of // events that the generated stream emits. type randomStreamConfig struct { - valueRange int - eventFrequency time.Duration - kvsPerCheckpoint int - numPartitions int - dupProbability float64 - tenantID roachpb.TenantID + valueRange int + eventFrequency time.Duration + eventsPerCheckpoint int + numPartitions int + dupProbability float64 + sstProbability float64 + + tenantID roachpb.TenantID } func parseRandomStreamConfig(streamURL *url.URL) (randomStreamConfig, error) { c := randomStreamConfig{ - valueRange: 100, - eventFrequency: 10 * time.Microsecond, - kvsPerCheckpoint: 100, - numPartitions: 1, - dupProbability: 0.5, - tenantID: roachpb.SystemTenantID, + valueRange: 100, + eventFrequency: 10 * time.Microsecond, + eventsPerCheckpoint: 30, + numPartitions: 1, // TODO(casper): increases this + dupProbability: 0.3, + sstProbability: 0.2, + tenantID: roachpb.SystemTenantID, } var err error @@ -148,16 +137,23 @@ func parseRandomStreamConfig(streamURL *url.URL) (randomStreamConfig, error) { } } - if kvFreqStr := streamURL.Query().Get(EventFrequency); kvFreqStr != "" { - kvFreq, err := strconv.Atoi(kvFreqStr) - c.eventFrequency = time.Duration(kvFreq) + if eventFreqStr := streamURL.Query().Get(EventFrequency); eventFreqStr != "" { + eventFreq, err := strconv.Atoi(eventFreqStr) + c.eventFrequency = time.Duration(eventFreq) + if err != nil { + return c, err + } + } + + if eventsPerCheckpointStr := streamURL.Query().Get(EventsPerCheckpoint); eventsPerCheckpointStr != "" { + c.eventsPerCheckpoint, err = strconv.Atoi(eventsPerCheckpointStr) if err != nil { return c, err } } - if kvsPerCheckpointStr := streamURL.Query().Get(KVsPerCheckpoint); kvsPerCheckpointStr != "" { - c.kvsPerCheckpoint, err = strconv.Atoi(kvsPerCheckpointStr) + if sstProbabilityStr := streamURL.Query().Get(SSTProbability); sstProbabilityStr != "" { + c.sstProbability, err = strconv.ParseFloat(sstProbabilityStr, 32) if err != nil { return c, err } @@ -195,20 +191,88 @@ func (c randomStreamConfig) URL(table int) string { q := u.Query() q.Add(ValueRangeKey, strconv.Itoa(c.valueRange)) q.Add(EventFrequency, strconv.Itoa(int(c.eventFrequency))) - q.Add(KVsPerCheckpoint, strconv.Itoa(c.kvsPerCheckpoint)) + q.Add(EventsPerCheckpoint, strconv.Itoa(c.eventsPerCheckpoint)) q.Add(NumPartitions, strconv.Itoa(c.numPartitions)) q.Add(DupProbability, fmt.Sprintf("%f", c.dupProbability)) + q.Add(SSTProbability, fmt.Sprintf("%f", c.sstProbability)) q.Add(TenantID, strconv.Itoa(int(c.tenantID.ToUint64()))) u.RawQuery = q.Encode() return u.String() } -// randomStreamClient is a temporary stream client implementation that generates +type randomEventGenerator struct { + rng *rand.Rand + config randomStreamConfig + numEventsSinceLastResolved int + sstMaker SSTableMakerFn + tableDesc *tabledesc.Mutable + systemKVs []roachpb.KeyValue +} + +func newRandomEventGenerator( + rng *rand.Rand, partitionURL *url.URL, config randomStreamConfig, fn SSTableMakerFn, +) (*randomEventGenerator, error) { + var partitionTableID int + partitionTableID, err := strconv.Atoi(partitionURL.Host) + if err != nil { + return nil, err + } + tableDesc, systemKVs, err := getDescriptorAndNamespaceKVForTableID(config, descpb.ID(partitionTableID)) + if err != nil { + return nil, err + } + return &randomEventGenerator{ + rng: rng, + config: config, + numEventsSinceLastResolved: 0, + sstMaker: fn, + tableDesc: tableDesc, + systemKVs: systemKVs, + }, nil +} + +func (r *randomEventGenerator) generateNewEvent() streamingccl.Event { + var event streamingccl.Event + if r.numEventsSinceLastResolved == r.config.eventsPerCheckpoint { + // Emit a CheckpointEvent. + resolvedTime := timeutil.Now() + hlcResolvedTime := hlc.Timestamp{WallTime: resolvedTime.UnixNano()} + resolvedSpan := jobspb.ResolvedSpan{Span: r.tableDesc.TableSpan(keys.SystemSQLCodec), Timestamp: hlcResolvedTime} + event = streamingccl.MakeCheckpointEvent([]jobspb.ResolvedSpan{resolvedSpan}) + r.numEventsSinceLastResolved = 0 + } else { + // If there are system KVs to emit, prioritize those. + if len(r.systemKVs) > 0 { + systemKV := r.systemKVs[0] + systemKV.Value.Timestamp = hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} + event = streamingccl.MakeKVEvent(systemKV) + r.systemKVs = r.systemKVs[1:] + return event + } + + // Emit SST with given probability. + // TODO(casper): add support for DelRange. + if prob := r.rng.Float64(); prob < r.config.sstProbability { + size := 10 + r.rng.Intn(30) + keyVals := make([]roachpb.KeyValue, 0, size) + for i := 0; i < size; i++ { + keyVals = append(keyVals, makeRandomKey(r.rng, r.config, r.tableDesc)) + } + event = streamingccl.MakeSSTableEvent(r.sstMaker(keyVals)) + } else { + event = streamingccl.MakeKVEvent(makeRandomKey(r.rng, r.config, r.tableDesc)) + } + r.numEventsSinceLastResolved++ + } + return event +} + +// RandomStreamClient is a temporary stream client implementation that generates // random events. // // The client can be configured to return more than one partition via the stream // URL. Each partition covers a single table span. -type randomStreamClient struct { +type RandomStreamClient struct { config randomStreamConfig streamURL *url.URL @@ -221,12 +285,12 @@ type randomStreamClient struct { interceptors []InterceptFn dialInterceptors []DialInterceptFn heartbeatInterceptors []HeartbeatInterceptFn + sstMaker SSTableMakerFn tableID int } } -var _ Client = &randomStreamClient{} -var _ InterceptableStreamClient = &randomStreamClient{} +var _ Client = &RandomStreamClient{} // newRandomStreamClient returns a stream client that generates a random set of // events on a table with an integer key and integer value for the table with @@ -243,7 +307,7 @@ func newRandomStreamClient(streamURL *url.URL) (Client, error) { return c, nil } -func (m *randomStreamClient) getNextTableID() int { +func (m *RandomStreamClient) getNextTableID() int { m.mu.Lock() defer m.mu.Unlock() ret := m.mu.tableID @@ -251,7 +315,7 @@ func (m *randomStreamClient) getNextTableID() int { return ret } -func (m *randomStreamClient) tableDescForID(tableID int) (*tabledesc.Mutable, error) { +func (m *RandomStreamClient) tableDescForID(tableID int) (*tabledesc.Mutable, error) { partitionURI := m.config.URL(tableID) partitionURL, err := url.Parse(partitionURI) if err != nil { @@ -266,12 +330,12 @@ func (m *randomStreamClient) tableDescForID(tableID int) (*tabledesc.Mutable, er if err != nil { return nil, err } - tableDesc, _, err := m.getDescriptorAndNamespaceKVForTableID(config, descpb.ID(partitionTableID)) + tableDesc, _, err := getDescriptorAndNamespaceKVForTableID(config, descpb.ID(partitionTableID)) return tableDesc, err } // Dial implements Client interface. -func (m *randomStreamClient) Dial(ctx context.Context) error { +func (m *RandomStreamClient) Dial(ctx context.Context) error { m.mu.Lock() defer m.mu.Unlock() for _, interceptor := range m.mu.dialInterceptors { @@ -287,7 +351,7 @@ func (m *randomStreamClient) Dial(ctx context.Context) error { } // Plan implements the Client interface. -func (m *randomStreamClient) Plan(ctx context.Context, id streaming.StreamID) (Topology, error) { +func (m *RandomStreamClient) Plan(ctx context.Context, id streaming.StreamID) (Topology, error) { topology := make(Topology, 0, m.config.numPartitions) log.Infof(ctx, "planning random stream for tenant %d", m.config.tenantID) @@ -315,7 +379,7 @@ func (m *randomStreamClient) Plan(ctx context.Context, id streaming.StreamID) (T } // Create implements the Client interface. -func (m *randomStreamClient) Create( +func (m *RandomStreamClient) Create( ctx context.Context, target roachpb.TenantID, ) (streaming.StreamID, error) { log.Infof(ctx, "creating random stream for tenant %d", target.ToUint64()) @@ -324,7 +388,7 @@ func (m *randomStreamClient) Create( } // Heartbeat implements the Client interface. -func (m *randomStreamClient) Heartbeat( +func (m *RandomStreamClient) Heartbeat( ctx context.Context, _ streaming.StreamID, ts hlc.Timestamp, ) (streampb.StreamReplicationStatus, error) { m.mu.Lock() @@ -340,7 +404,7 @@ func (m *randomStreamClient) Heartbeat( // getDescriptorAndNamespaceKVForTableID returns the namespace and descriptor // KVs for the table with tableID. -func (m *randomStreamClient) getDescriptorAndNamespaceKVForTableID( +func getDescriptorAndNamespaceKVForTableID( config randomStreamConfig, tableID descpb.ID, ) (*tabledesc.Mutable, []roachpb.KeyValue, error) { tableName := fmt.Sprintf("%s%d", IngestionTablePrefix, tableID) @@ -387,18 +451,19 @@ func (m *randomStreamClient) getDescriptorAndNamespaceKVForTableID( } // Close implements the Client interface. -func (m *randomStreamClient) Close(ctx context.Context) error { +func (m *RandomStreamClient) Close(ctx context.Context) error { return nil } // Subscribe implements the Client interface. -func (m *randomStreamClient) Subscribe( +func (m *RandomStreamClient) Subscribe( ctx context.Context, stream streaming.StreamID, spec SubscriptionToken, checkpoint hlc.Timestamp, ) (Subscription, error) { partitionURL, err := url.Parse(string(spec)) if err != nil { return nil, err } + // add option for sst probability config, err := parseRandomStreamConfig(partitionURL) if err != nil { return nil, err @@ -411,85 +476,36 @@ func (m *randomStreamClient) Subscribe( panic("cannot start random stream client event stream in the future") } - var partitionTableID int - partitionTableID, err = strconv.Atoi(partitionURL.Host) - if err != nil { - return nil, err - } - log.Infof(ctx, "producing kvs for metadata for table %d for tenant %d based on %q", partitionTableID, config.tenantID, spec) - - tableDesc, systemKVs, err := m.getDescriptorAndNamespaceKVForTableID(config, descpb.ID(partitionTableID)) + // rand is not thread safe, so create a random source for each partition. + rng, _ := randutil.NewPseudoRand() + m.mu.Lock() + reg, err := newRandomEventGenerator(rng, partitionURL, config, m.mu.sstMaker) + m.mu.Unlock() if err != nil { return nil, err } - copyKeyVal := func(keyVal *roachpb.KeyValue) *roachpb.KeyValue { - res := roachpb.KeyValue{ - Key: make([]byte, len(keyVal.Key)), - Value: roachpb.Value{ - RawBytes: make([]byte, len(keyVal.Value.RawBytes)), - Timestamp: keyVal.Value.Timestamp, - }, - } - copy(res.Key, keyVal.Key) - copy(res.Value.RawBytes, keyVal.Value.RawBytes) - return &res - } - receiveFn := func(ctx context.Context) error { defer close(eventCh) - // rand is not thread safe, so create a random source for each partition. - r := rand.New(rand.NewSource(timeutil.Now().UnixNano())) - kvInterval := config.eventFrequency - - numKVEventsSinceLastResolved := 0 - - rng, _ := randutil.NewPseudoRand() - - var keyValCopy *roachpb.KeyValue + dataEventInterval := config.eventFrequency + var lastEventCopy streamingccl.Event for { var event streamingccl.Event - if numKVEventsSinceLastResolved == config.kvsPerCheckpoint { - // Emit a CheckpointEvent. - resolvedTime := timeutil.Now() - hlcResolvedTime := hlc.Timestamp{WallTime: resolvedTime.UnixNano()} - resolvedSpan := jobspb.ResolvedSpan{Span: tableDesc.TableSpan(keys.SystemSQLCodec), Timestamp: hlcResolvedTime} - event = streamingccl.MakeCheckpointEvent([]jobspb.ResolvedSpan{resolvedSpan}) - numKVEventsSinceLastResolved = 0 + if lastEventCopy != nil && rng.Float64() < config.dupProbability { + event = duplicateEvent(lastEventCopy) } else { - // If there are system KVs to emit, prioritize those. - if len(systemKVs) > 0 { - systemKV := systemKVs[0] - systemKV.Value.Timestamp = hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - event = streamingccl.MakeKVEvent(systemKV) - systemKVs = systemKVs[1:] - } else { - numKVEventsSinceLastResolved++ - // Generate a duplicate KVEvent. - if rng.Float64() < config.dupProbability && keyValCopy != nil { - event = streamingccl.MakeKVEvent(*keyValCopy) - } else { - event = streamingccl.MakeKVEvent(makeRandomKey(r, config, tableDesc)) - } - } - // Create a copy of KeyValue generated as the KeyValue in the event might get modified later. - keyValCopy = copyKeyVal(event.GetKV()) + event = reg.generateNewEvent() } + lastEventCopy = duplicateEvent(event) select { + // The event may get modified after sent to the channel. case eventCh <- event: case <-ctx.Done(): return ctx.Err() } - if event.Type() == streamingccl.KVEvent { - // Use the originally generated KeyValue copy as the KeyValue inside the event might - // get modified by ingestion processor's tenant rekeyer. - // 'keyValCopy' will only be set when it is a KV event. Copying the 'keyValCopy' again - // to prevent the event being modified by interceptor again. - event = streamingccl.MakeKVEvent(*copyKeyVal(keyValCopy)) - } func() { m.mu.Lock() defer m.mu.Unlock() @@ -497,13 +513,13 @@ func (m *randomStreamClient) Subscribe( if len(m.mu.interceptors) > 0 { for _, interceptor := range m.mu.interceptors { if interceptor != nil { - interceptor(event, spec) + interceptor(duplicateEvent(lastEventCopy), spec) } } } }() - time.Sleep(kvInterval) + time.Sleep(dataEventInterval) } } @@ -514,7 +530,7 @@ func (m *randomStreamClient) Subscribe( } // Complete implements the streamclient.Client interface. -func (m *randomStreamClient) Complete( +func (m *RandomStreamClient) Complete( ctx context.Context, streamID streaming.StreamID, successfulIngestion bool, ) error { return nil @@ -594,31 +610,78 @@ func makeRandomKey( } } -// RegisterInterception implements the InterceptableStreamClient interface. -func (m *randomStreamClient) RegisterInterception(fn InterceptFn) { +func duplicateEvent(event streamingccl.Event) streamingccl.Event { + var dup streamingccl.Event + switch event.Type() { + case streamingccl.CheckpointEvent: + resolvedSpans := make([]jobspb.ResolvedSpan, len(event.GetResolvedSpans())) + copy(resolvedSpans, event.GetResolvedSpans()) + dup = streamingccl.MakeCheckpointEvent(resolvedSpans) + case streamingccl.KVEvent: + eventKV := event.GetKV() + rawBytes := make([]byte, len(eventKV.Value.RawBytes)) + copy(rawBytes, eventKV.Value.RawBytes) + keyVal := roachpb.KeyValue{ + Key: event.GetKV().Key.Clone(), + Value: roachpb.Value{ + RawBytes: rawBytes, + Timestamp: eventKV.Value.Timestamp, + }, + } + dup = streamingccl.MakeKVEvent(keyVal) + case streamingccl.SSTableEvent: + sst := event.GetSSTable() + dataCopy := make([]byte, len(sst.Data)) + copy(dataCopy, sst.Data) + dup = streamingccl.MakeSSTableEvent(roachpb.RangeFeedSSTable{ + Data: dataCopy, + Span: sst.Span.Clone(), + WriteTS: sst.WriteTS, + }) + default: + panic("unsopported event type") + } + return dup +} + +// RegisterInterception registers a interceptor to be called after +// an event is emitted from the client. +func (m *RandomStreamClient) RegisterInterception(fn InterceptFn) { m.mu.Lock() defer m.mu.Unlock() m.mu.interceptors = append(m.mu.interceptors, fn) } -// RegisterDialInterception implements the InterceptableStreamClient interface. -func (m *randomStreamClient) RegisterDialInterception(fn DialInterceptFn) { +// RegisterDialInterception registers a interceptor to be called +// whenever Dial is called on the client. +func (m *RandomStreamClient) RegisterDialInterception(fn DialInterceptFn) { m.mu.Lock() defer m.mu.Unlock() m.mu.dialInterceptors = append(m.mu.dialInterceptors, fn) } -// RegisterHeartbeatInterception implements the InterceptableStreamClient interface. -func (m *randomStreamClient) RegisterHeartbeatInterception(fn HeartbeatInterceptFn) { +// RegisterHeartbeatInterception registers an interceptor to be called +// whenever Heartbeat is called on the client. +func (m *RandomStreamClient) RegisterHeartbeatInterception(fn HeartbeatInterceptFn) { m.mu.Lock() defer m.mu.Unlock() m.mu.heartbeatInterceptors = append(m.mu.heartbeatInterceptors, fn) } -// ClearInterceptors implements the InterceptableStreamClient interface. -func (m *randomStreamClient) ClearInterceptors() { +// RegisterSSTableGenerator registers a functor to be called +// whenever an SSTable event is to be generated. +func (m *RandomStreamClient) RegisterSSTableGenerator(fn SSTableMakerFn) { + m.mu.Lock() + defer m.mu.Unlock() + m.mu.sstMaker = fn +} + +// ClearInterceptors clears all registered interceptors on the client. +func (m *RandomStreamClient) ClearInterceptors() { m.mu.Lock() defer m.mu.Unlock() - m.mu.interceptors = make([]InterceptFn, 0) - m.mu.heartbeatInterceptors = make([]HeartbeatInterceptFn, 0) + m.mu.interceptors = m.mu.interceptors[:0] + m.mu.heartbeatInterceptors = m.mu.heartbeatInterceptors[:0] + m.mu.dialInterceptors = m.mu.dialInterceptors[:0] + m.mu.sstMaker = nil } diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go index 0c3c8483b57d..9ba472fe4e5e 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go @@ -419,7 +419,6 @@ func (sf *streamIngestionFrontier) maybeUpdatePartitionProgress() error { } progress := md.Progress - // Keep the recorded highwater empty until some advancement has been made if sf.highWaterAtStart.Less(highWatermark) { progress.Progress = &jobspb.Progress_HighWater{ diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go index c33f7f08192b..abb7cd46427e 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go @@ -302,14 +302,13 @@ func TestStreamIngestionFrontierProcessor(t *testing.T) { defer func() { require.NoError(t, client.Close(context.Background())) }() - interceptable, ok := client.(streamclient.InterceptableStreamClient) - require.True(t, ok) - defer interceptable.ClearInterceptors() + + client.ClearInterceptors() // Record heartbeats in a list and terminate the client once the expected // frontier timestamp has been reached heartbeats := make([]hlc.Timestamp, 0) - interceptable.RegisterHeartbeatInterception(func(heartbeatTs hlc.Timestamp) { + client.RegisterHeartbeatInterception(func(heartbeatTs hlc.Timestamp) { heartbeats = append(heartbeats, heartbeatTs) if tc.expectedFrontierTimestamp.LessEq(heartbeatTs) { doneCh <- struct{}{} diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go index b0b24ae27746..b0e9dd0f18f4 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go @@ -428,9 +428,7 @@ func (s *streamIngestionResumer) cancelProducerJob( streamID, s.job.ID()) if err = client.Complete(ctx, streamID, false /* successfulIngestion */); err != nil { log.Warningf(ctx, "encountered error when canceling the producer job: %v", err) - fmt.Println("canceled failure", err) } - fmt.Println("cancel sent") if err = client.Close(ctx); err != nil { log.Warningf(ctx, "encountered error when closing the stream client: %v", err) } diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go index 10f3b676bc2c..5b0d0040f35e 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go @@ -697,7 +697,7 @@ func (sip *streamIngestionProcessor) bufferKV(kv *roachpb.KeyValue) error { } func (sip *streamIngestionProcessor) bufferCheckpoint(event partitionEvent) error { - resolvedSpans := *event.GetResolvedSpans() + resolvedSpans := event.GetResolvedSpans() if resolvedSpans == nil { return errors.New("checkpoint event expected to have resolved spans") } diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go index 9137af0fac83..3b62a63314cf 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go @@ -37,6 +37,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/streaming" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/distsqlutils" + "github.com/cockroachdb/cockroach/pkg/testutils/storageutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -247,7 +248,8 @@ func TestStreamIngestionProcessor(t *testing.T) { {ID: "2", SubscriptionToken: p2, Spans: []roachpb.Span{p2Span}}, } out, err := runStreamIngestionProcessor(ctx, t, registry, kvDB, - partitions, startTime, []jobspb.ResolvedSpan{}, nil /* interceptEvents */, tenantRekey, mockClient, nil /* cutoverProvider */, nil /* streamingTestingKnobs */) + partitions, startTime, []jobspb.ResolvedSpan{}, tenantRekey, + mockClient, nil /* cutoverProvider */, nil /* streamingTestingKnobs */) require.NoError(t, err) emittedRows := readRows(out) @@ -290,7 +292,8 @@ func TestStreamIngestionProcessor(t *testing.T) { lastClientStart[token] = clientStartTime }} out, err := runStreamIngestionProcessor(ctx, t, registry, kvDB, - partitions, startTime, checkpoint, nil /* interceptEvents */, tenantRekey, mockClient, nil /* cutoverProvider */, streamingTestingKnobs) + partitions, startTime, checkpoint, tenantRekey, mockClient, + nil /* cutoverProvider */, streamingTestingKnobs) require.NoError(t, err) emittedRows := readRows(out) @@ -313,7 +316,8 @@ func TestStreamIngestionProcessor(t *testing.T) { {SubscriptionToken: streamclient.SubscriptionToken("2")}, } out, err := runStreamIngestionProcessor(ctx, t, registry, kvDB, - partitions, startTime, []jobspb.ResolvedSpan{}, nil /* interceptEvents */, tenantRekey, &errorStreamClient{}, nil /* cutoverProvider */, nil /* streamingTestingKnobs */) + partitions, startTime, []jobspb.ResolvedSpan{}, tenantRekey, &errorStreamClient{}, + nil /* cutoverProvider */, nil /* streamingTestingKnobs */) require.NoError(t, err) // Expect no rows, and just the error. @@ -445,10 +449,13 @@ func TestRandomClientGeneration(t *testing.T) { // The random client returns system and table data partitions. streamClient, err := streamclient.NewStreamClient(ctx, streamingccl.StreamAddress(streamAddr)) require.NoError(t, err) - id, err := streamClient.Create(ctx, roachpb.MakeTenantID(tenantID)) + + randomStreamClient, ok := streamClient.(*streamclient.RandomStreamClient) + require.True(t, ok) + id, err := randomStreamClient.Create(ctx, roachpb.MakeTenantID(tenantID)) require.NoError(t, err) - topo, err := streamClient.Plan(ctx, id) + topo, err := randomStreamClient.Plan(ctx, id) require.NoError(t, err) // One system and two table data partitions. require.Equal(t, 2 /* numPartitions */, len(topo)) @@ -467,11 +474,17 @@ func TestRandomClientGeneration(t *testing.T) { nil /* tableRekeys */, []execinfrapb.TenantRekey{tenantRekey}, true /* restoreTenantFromStream */) require.NoError(t, err) streamValidator := newStreamClientValidator(rekeyer) - validator := registerValidatorWithClient(streamValidator) + + randomStreamClient.ClearInterceptors() + randomStreamClient.RegisterSSTableGenerator(func(keyValues []roachpb.KeyValue) roachpb.RangeFeedSSTable { + return sstMaker(t, keyValues) + }) + randomStreamClient.RegisterInterception(cancelAfterCheckpoints) + randomStreamClient.RegisterInterception(validateFnWithValidator(t, streamValidator)) out, err := runStreamIngestionProcessor(ctx, t, registry, kvDB, - topo, startTime, []jobspb.ResolvedSpan{}, []streamclient.InterceptFn{cancelAfterCheckpoints, validator}, tenantRekey, - streamClient, noCutover{}, nil /* streamingTestingKnobs*/) + topo, startTime, []jobspb.ResolvedSpan{}, tenantRekey, + randomStreamClient, noCutover{}, nil /* streamingTestingKnobs*/) require.NoError(t, err) partitionSpanToTableID := getPartitionSpanToTableID(t, topo) @@ -537,14 +550,13 @@ func runStreamIngestionProcessor( partitions streamclient.Topology, startTime hlc.Timestamp, checkpoint []jobspb.ResolvedSpan, - interceptEvents []streamclient.InterceptFn, tenantRekey execinfrapb.TenantRekey, mockClient streamclient.Client, cutoverProvider cutoverProvider, streamingTestingKnobs *sql.StreamingTestingKnobs, ) (*distsqlutils.RowBuffer, error) { sip, out, err := getStreamIngestionProcessor(ctx, t, registry, kvDB, - partitions, startTime, checkpoint, interceptEvents, tenantRekey, mockClient, cutoverProvider, streamingTestingKnobs) + partitions, startTime, checkpoint, tenantRekey, mockClient, cutoverProvider, streamingTestingKnobs) require.NoError(t, err) sip.Run(ctx) @@ -567,7 +579,6 @@ func getStreamIngestionProcessor( partitions streamclient.Topology, startTime hlc.Timestamp, checkpoint []jobspb.ResolvedSpan, - interceptEvents []streamclient.InterceptFn, tenantRekey execinfrapb.TenantRekey, mockClient streamclient.Client, cutoverProvider cutoverProvider, @@ -624,11 +635,6 @@ func getStreamIngestionProcessor( sip.cutoverProvider = cutoverProvider } - if interceptable, ok := sip.forceClientForTests.(streamclient.InterceptableStreamClient); ok { - for _, interceptor := range interceptEvents { - interceptable.RegisterInterception(interceptor) - } - } return sip, out, err } @@ -642,33 +648,51 @@ func resolvedSpansMinTS(resolvedSpans []jobspb.ResolvedSpan) hlc.Timestamp { return minTS } -func registerValidatorWithClient( - validator *streamClientValidator, +func noteKeyVal( + validator *streamClientValidator, keyVal roachpb.KeyValue, spec streamclient.SubscriptionToken, +) { + if validator.rekeyer != nil { + rekey, _, err := validator.rekeyer.RewriteKey(keyVal.Key) + if err != nil { + panic(err.Error()) + } + keyVal.Key = rekey + keyVal.Value.ClearChecksum() + keyVal.Value.InitChecksum(keyVal.Key) + } + err := validator.noteRow(string(spec), string(keyVal.Key), string(keyVal.Value.RawBytes), + keyVal.Value.Timestamp) + if err != nil { + panic(err.Error()) + } +} + +func validateFnWithValidator( + t *testing.T, validator *streamClientValidator, ) func(event streamingccl.Event, spec streamclient.SubscriptionToken) { return func(event streamingccl.Event, spec streamclient.SubscriptionToken) { switch event.Type() { case streamingccl.CheckpointEvent: - resolvedTS := resolvedSpansMinTS(*event.GetResolvedSpans()) + resolvedTS := resolvedSpansMinTS(event.GetResolvedSpans()) err := validator.noteResolved(string(spec), resolvedTS) if err != nil { panic(err.Error()) } - case streamingccl.KVEvent: - keyVal := *event.GetKV() - if validator.rekeyer != nil { - rekey, _, err := validator.rekeyer.RewriteKey(keyVal.Key) - if err != nil { - panic(err.Error()) - } - keyVal.Key = rekey - keyVal.Value.ClearChecksum() - keyVal.Value.InitChecksum(keyVal.Key) - } - err := validator.noteRow(string(spec), string(keyVal.Key), string(keyVal.Value.RawBytes), - keyVal.Value.Timestamp) - if err != nil { - panic(err.Error()) + case streamingccl.SSTableEvent: + kvs := storageutils.ScanSST(t, event.GetSSTable().Data) + for _, keyVal := range kvs.MVCCKeyValues() { + noteKeyVal(validator, roachpb.KeyValue{ + Key: keyVal.Key.Key, + Value: roachpb.Value{ + RawBytes: keyVal.Value, + Timestamp: keyVal.Key.Timestamp, + }, + }, spec) } + case streamingccl.KVEvent: + noteKeyVal(validator, *event.GetKV(), spec) + case streamingccl.DeleteRangeEvent: + panic(errors.New("unsupported event type")) } } } diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go index ca6d32f799e0..b6a730d0fe4b 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go @@ -12,6 +12,7 @@ import ( "context" gosql "database/sql" "fmt" + "sort" "testing" "time" @@ -24,12 +25,14 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" + clustersettings "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/jobutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" + "github.com/cockroachdb/cockroach/pkg/testutils/storageutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -64,6 +67,35 @@ func getTestRandomClientURI(tenantID int) string { return makeTestStreamURI(valueRange, kvsPerResolved, numPartitions, kvFrequency, dupProbability, tenantID) } +func sstMaker(t *testing.T, keyValues []roachpb.KeyValue) roachpb.RangeFeedSSTable { + sort.Slice(keyValues, func(i, j int) bool { + return keyValues[i].Key.Compare(keyValues[j].Key) < 0 + }) + batchTS := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} + kvs := make(storageutils.KVs, 0, len(keyValues)) + for i, keyVal := range keyValues { + if i > 0 && keyVal.Key.Equal(keyValues[i-1].Key) { + continue + } + kvs = append(kvs, storage.MVCCKeyValue{ + Key: storage.MVCCKey{ + Key: keyVal.Key, + Timestamp: batchTS, + }, + Value: keyVal.Value.RawBytes, + }) + } + data, start, end := storageutils.MakeSST(t, clustersettings.MakeTestingClusterSettings(), kvs) + return roachpb.RangeFeedSSTable{ + Data: data, + Span: roachpb.Span{ + Key: start, + EndKey: end, + }, + WriteTS: batchTS, + } +} + // TestStreamIngestionJobWithRandomClient creates a stream ingestion job that is // fed KVs from the random stream client. After receiving a certain number of // resolved timestamp events the test completes the job to tear down the flow, @@ -96,22 +128,17 @@ func TestStreamIngestionJobWithRandomClient(t *testing.T) { }}, true /* restoreTenantFromStream */) require.NoError(t, err) streamValidator := newStreamClientValidator(rekeyer) - registerValidator := registerValidatorWithClient(streamValidator) client := streamclient.GetRandomStreamClientSingletonForTesting() defer func() { require.NoError(t, client.Close(ctx)) }() - interceptEvents := []streamclient.InterceptFn{ - completeJobAfterCheckpoints, - registerValidator, - } - if interceptable, ok := client.(streamclient.InterceptableStreamClient); ok { - for _, interceptor := range interceptEvents { - interceptable.RegisterInterception(interceptor) - } - } else { - t.Fatal("expected the random stream client to be interceptable") - } + + client.ClearInterceptors() + client.RegisterInterception(completeJobAfterCheckpoints) + client.RegisterInterception(validateFnWithValidator(t, streamValidator)) + client.RegisterSSTableGenerator(func(keyValues []roachpb.KeyValue) roachpb.RangeFeedSSTable { + return sstMaker(t, keyValues) + }) var receivedRevertRequest chan struct{} var allowResponse chan struct{} diff --git a/pkg/ccl/streamingccl/streamingtest/replication_helpers.go b/pkg/ccl/streamingccl/streamingtest/replication_helpers.go index 46bb3cb6212f..9c1d17da6808 100644 --- a/pkg/ccl/streamingccl/streamingtest/replication_helpers.go +++ b/pkg/ccl/streamingccl/streamingtest/replication_helpers.go @@ -65,7 +65,7 @@ func ResolvedAtLeast(lo hlc.Timestamp) FeedEventPredicate { if msg.Type() != streamingccl.CheckpointEvent { return false } - return lo.LessEq(minResolvedTimestamp(*msg.GetResolvedSpans())) + return lo.LessEq(minResolvedTimestamp(msg.GetResolvedSpans())) } } @@ -114,7 +114,7 @@ func (rf *ReplicationFeed) ObserveResolved(ctx context.Context, lo hlc.Timestamp require.NoError(rf.t, rf.consumeUntil(ctx, ResolvedAtLeast(lo), func(err error) bool { return true })) - return minResolvedTimestamp(*rf.msg.GetResolvedSpans()) + return minResolvedTimestamp(rf.msg.GetResolvedSpans()) } // ObserveError consumes the feed until the feed is exhausted, and the final error should diff --git a/pkg/ccl/streamingccl/streamproducer/producer_job.go b/pkg/ccl/streamingccl/streamproducer/producer_job.go index 7970f3fb6b1c..04091c7ba735 100644 --- a/pkg/ccl/streamingccl/streamproducer/producer_job.go +++ b/pkg/ccl/streamingccl/streamproducer/producer_job.go @@ -101,7 +101,6 @@ func (p *producerJobResumer) Resume(ctx context.Context, execCtx interface{}) er case jobspb.StreamReplicationProgress_FINISHED_SUCCESSFULLY: return p.releaseProtectedTimestamp(ctx, execCfg) case jobspb.StreamReplicationProgress_FINISHED_UNSUCCESSFULLY: - fmt.Println("producer try update cancel requested") return j.Update(ctx, nil, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { ju.UpdateStatus(jobs.StatusCancelRequested) return nil diff --git a/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go b/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go index 2d064a854e16..8ce708f0f742 100644 --- a/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go +++ b/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go @@ -10,7 +10,6 @@ package streamproducer import ( "context" - "fmt" "time" "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl" @@ -268,7 +267,6 @@ func completeReplicationStream( md.Progress.RunningStatus = "succeeding this producer job as the corresponding " + "stream ingestion finished successfully" } else { - fmt.Println("producer update stream ingestion status") md.Progress.GetStreamReplication().StreamIngestionStatus = jobspb.StreamReplicationProgress_FINISHED_UNSUCCESSFULLY md.Progress.RunningStatus = "canceling this producer job as the corresponding " + diff --git a/pkg/cli/cliflags/flags.go b/pkg/cli/cliflags/flags.go index d4283a200011..afdded387a64 100644 --- a/pkg/cli/cliflags/flags.go +++ b/pkg/cli/cliflags/flags.go @@ -938,6 +938,17 @@ memory that the store may consume, for example: --store=type=mem,size=20GiB --store=type=mem,size=90% + +Optionally, to configure admission control enforcement to prevent disk +bandwidth saturation, the "provisioned-rate" field can be specified with +the "disk-name" and an optional "bandwidth". The bandwidth is used to override +the value of the cluster setting, kv.store.admission.provisioned_bandwidth. +For example: +
+
+  --store=provisioned-rate=disk-name=nvme1n1
+  --store=provisioned-rate=disk-name=sdb:bandwidth=250MiB/s
+
 
Commas are forbidden in all values, since they are used to separate fields. Also, if you use equal signs in the file path to a store, you must use the diff --git a/pkg/cmd/roachtest/tests/predecessor_version.json b/pkg/cmd/roachtest/tests/predecessor_version.json index 5ddc19ffd15d..deb0929bcade 100644 --- a/pkg/cmd/roachtest/tests/predecessor_version.json +++ b/pkg/cmd/roachtest/tests/predecessor_version.json @@ -8,5 +8,5 @@ "21.1": "20.2.12", "21.2": "21.1.12", "22.1": "21.2.7", - "22.2": "22.1.5" + "22.2": "22.1.6" } diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go index b11490877a89..6b678c6835c4 100644 --- a/pkg/kv/kvserver/store.go +++ b/pkg/kv/kvserver/store.go @@ -3961,3 +3961,11 @@ func (n *KVAdmissionControllerImpl) FollowerStoreWriteBytes( storeAdmissionQ.BypassedWorkDone( followerWriteBytes.numEntries, followerWriteBytes.StoreWorkDoneInfo) } + +// ProvisionedBandwidthForAdmissionControl set a value of the provisioned +// bandwidth for each store in the cluster. +var ProvisionedBandwidthForAdmissionControl = settings.RegisterByteSizeSetting( + settings.SystemOnly, "kv.store.admission.provisioned_bandwidth", + "if set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), "+ + "for each store. It can be over-ridden on a per-store basis using the --store flag", + 0).WithPublic() diff --git a/pkg/server/BUILD.bazel b/pkg/server/BUILD.bazel index 7c0c12517e8a..e25f7d0dcc90 100644 --- a/pkg/server/BUILD.bazel +++ b/pkg/server/BUILD.bazel @@ -414,6 +414,7 @@ go_test( "//pkg/server/diagnostics", "//pkg/server/diagnostics/diagnosticspb", "//pkg/server/serverpb", + "//pkg/server/status", "//pkg/server/status/statuspb", "//pkg/server/telemetry", "//pkg/settings", @@ -446,6 +447,7 @@ go_test( "//pkg/upgrade", "//pkg/upgrade/upgrades", "//pkg/util", + "//pkg/util/admission", "//pkg/util/envutil", "//pkg/util/grpcutil", "//pkg/util/hlc", diff --git a/pkg/server/node.go b/pkg/server/node.go index dd47521fb69c..f904411ef648 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -244,6 +244,8 @@ type Node struct { // COCKROACH_DEBUG_TS_IMPORT_FILE env var. suppressNodeStatus syncutil.AtomicBool + diskStatsMap diskStatsMap + testingErrorEvent func(context.Context, *roachpb.BatchRequest, error) } @@ -772,17 +774,96 @@ func (n *Node) UpdateIOThreshold(id roachpb.StoreID, threshold *admissionpb.IOTh s.UpdateIOThreshold(threshold) } +// diskStatsMap encapsulates all the logic for populating DiskStats for +// admission.StoreMetrics. +type diskStatsMap struct { + provisionedRate map[roachpb.StoreID]base.ProvisionedRateSpec + diskNameToStoreID map[string]roachpb.StoreID +} + +func (dsm *diskStatsMap) tryPopulateAdmissionDiskStats( + ctx context.Context, + clusterProvisionedBandwidth int64, + diskStatsFunc func(context.Context) ([]status.DiskStats, error), +) (stats map[roachpb.StoreID]admission.DiskStats, err error) { + if dsm.empty() { + return stats, nil + } + diskStats, err := diskStatsFunc(ctx) + if err != nil { + return stats, err + } + stats = make(map[roachpb.StoreID]admission.DiskStats) + for id, spec := range dsm.provisionedRate { + s := admission.DiskStats{ProvisionedBandwidth: clusterProvisionedBandwidth} + if spec.ProvisionedBandwidth > 0 { + s.ProvisionedBandwidth = spec.ProvisionedBandwidth + } + stats[id] = s + } + for i := range diskStats { + if id, ok := dsm.diskNameToStoreID[diskStats[i].Name]; ok { + s := stats[id] + s.BytesRead = uint64(diskStats[i].ReadBytes) + s.BytesWritten = uint64(diskStats[i].WriteBytes) + stats[id] = s + } + } + return stats, nil +} + +func (dsm *diskStatsMap) empty() bool { + return len(dsm.provisionedRate) == 0 +} + +func (dsm *diskStatsMap) initDiskStatsMap(specs []base.StoreSpec, engines []storage.Engine) error { + *dsm = diskStatsMap{ + provisionedRate: make(map[roachpb.StoreID]base.ProvisionedRateSpec), + diskNameToStoreID: make(map[string]roachpb.StoreID), + } + for i := range engines { + id, err := kvserver.ReadStoreIdent(context.Background(), engines[i]) + if err != nil { + return err + } + if len(specs[i].ProvisionedRateSpec.DiskName) > 0 { + dsm.provisionedRate[id.StoreID] = specs[i].ProvisionedRateSpec + dsm.diskNameToStoreID[specs[i].ProvisionedRateSpec.DiskName] = id.StoreID + } + } + return nil +} + +func (n *Node) registerEnginesForDiskStatsMap( + specs []base.StoreSpec, engines []storage.Engine, +) error { + return n.diskStatsMap.initDiskStatsMap(specs, engines) +} + // GetPebbleMetrics implements admission.PebbleMetricsProvider. func (n *Node) GetPebbleMetrics() []admission.StoreMetrics { + clusterProvisionedBandwidth := kvserver.ProvisionedBandwidthForAdmissionControl.Get( + &n.storeCfg.Settings.SV) + storeIDToDiskStats, err := n.diskStatsMap.tryPopulateAdmissionDiskStats( + context.Background(), clusterProvisionedBandwidth, status.GetDiskCounters) + if err != nil { + log.Warningf(context.Background(), "%v", + errors.Wrapf(err, "unable to populate disk stats")) + } var metrics []admission.StoreMetrics _ = n.stores.VisitStores(func(store *kvserver.Store) error { m := store.Engine().GetMetrics() im := store.Engine().GetInternalIntervalMetrics() + diskStats := admission.DiskStats{ProvisionedBandwidth: clusterProvisionedBandwidth} + if s, ok := storeIDToDiskStats[store.StoreID()]; ok { + diskStats = s + } metrics = append(metrics, admission.StoreMetrics{ StoreID: int32(store.StoreID()), Metrics: m.Metrics, WriteStallCount: m.WriteStallCount, - InternalIntervalMetrics: im}) + InternalIntervalMetrics: im, + DiskStats: diskStats}) return nil }) return metrics diff --git a/pkg/server/node_test.go b/pkg/server/node_test.go index 85d7c19b14b4..a0ab4126395b 100644 --- a/pkg/server/node_test.go +++ b/pkg/server/node_test.go @@ -26,10 +26,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/server/status" "github.com/cockroachdb/cockroach/pkg/server/status/statuspb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/util/admission" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -747,3 +749,125 @@ func TestGetTenantWeights(t *testing.T) { checkSum(roachpb.SystemTenantID.ToUint64()) checkSum(otherTenantID) } + +func TestDiskStatsMap(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + // Specs for two stores, one of which overrides the cluster-level + // provisioned bandwidth. + specs := []base.StoreSpec{ + { + ProvisionedRateSpec: base.ProvisionedRateSpec{ + DiskName: "foo", + // ProvisionedBandwidth is 0 so the cluster setting will be used. + ProvisionedBandwidth: 0, + }, + }, + { + ProvisionedRateSpec: base.ProvisionedRateSpec{ + DiskName: "bar", + ProvisionedBandwidth: 200, + }, + }, + } + // Engines. + engines := []storage.Engine{ + storage.NewDefaultInMemForTesting(), + storage.NewDefaultInMemForTesting(), + } + defer func() { + for i := range engines { + engines[i].Close() + } + }() + // "foo" has store-id 10, "bar" has store-id 5. + engineIDs := []roachpb.StoreID{10, 5} + for i := range engines { + ident := roachpb.StoreIdent{StoreID: engineIDs[i]} + require.NoError(t, storage.MVCCBlindPutProto(ctx, engines[i], nil, keys.StoreIdentKey(), + hlc.Timestamp{}, hlc.ClockTimestamp{}, &ident, nil)) + } + var dsm diskStatsMap + clusterProvisionedBW := int64(150) + + // diskStatsMap contains nothing, so does not populate anything. + stats, err := dsm.tryPopulateAdmissionDiskStats(ctx, clusterProvisionedBW, nil) + require.NoError(t, err) + require.Equal(t, 0, len(stats)) + + // diskStatsMap initialized with these two stores. + require.NoError(t, dsm.initDiskStatsMap(specs, engines)) + + // diskStatsFunc returns stats for these two stores, and an unknown store. + diskStatsFunc := func(context.Context) ([]status.DiskStats, error) { + return []status.DiskStats{ + { + Name: "baz", + ReadBytes: 100, + WriteBytes: 200, + }, + { + Name: "foo", + ReadBytes: 500, + WriteBytes: 1000, + }, + { + Name: "bar", + ReadBytes: 2000, + WriteBytes: 2500, + }, + }, nil + } + stats, err = dsm.tryPopulateAdmissionDiskStats(ctx, clusterProvisionedBW, diskStatsFunc) + require.NoError(t, err) + // The stats for the two stores are as expected. + require.Equal(t, 2, len(stats)) + for i := range engineIDs { + ds, ok := stats[engineIDs[i]] + require.True(t, ok) + var expectedDS admission.DiskStats + switch engineIDs[i] { + // "foo" + case 10: + expectedDS = admission.DiskStats{ + BytesRead: 500, BytesWritten: 1000, ProvisionedBandwidth: clusterProvisionedBW} + // "bar" + case 5: + expectedDS = admission.DiskStats{ + BytesRead: 2000, BytesWritten: 2500, ProvisionedBandwidth: 200} + } + require.Equal(t, expectedDS, ds) + } + + // disk stats are only retrieved for "foo". + diskStatsFunc = func(context.Context) ([]status.DiskStats, error) { + return []status.DiskStats{ + { + Name: "foo", + ReadBytes: 3500, + WriteBytes: 4500, + }, + }, nil + } + stats, err = dsm.tryPopulateAdmissionDiskStats(ctx, clusterProvisionedBW, diskStatsFunc) + require.NoError(t, err) + require.Equal(t, 2, len(stats)) + for i := range engineIDs { + ds, ok := stats[engineIDs[i]] + require.True(t, ok) + var expectedDS admission.DiskStats + switch engineIDs[i] { + // "foo" + case 10: + expectedDS = admission.DiskStats{ + BytesRead: 3500, BytesWritten: 4500, ProvisionedBandwidth: clusterProvisionedBW} + // "bar". The read and write bytes are 0. + case 5: + expectedDS = admission.DiskStats{ + BytesRead: 0, BytesWritten: 0, ProvisionedBandwidth: 200} + } + require.Equal(t, expectedDS, ds) + } +} diff --git a/pkg/server/server.go b/pkg/server/server.go index f76232648385..13240d285b71 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -1610,6 +1610,10 @@ func (s *Server) PreStart(ctx context.Context) error { return err } + if err := s.node.registerEnginesForDiskStatsMap(s.cfg.Stores.Specs, s.engines); err != nil { + return errors.Wrapf(err, "failed to register engines for the disk stats map") + } + if err := s.debug.RegisterEngines(s.cfg.Stores.Specs, s.engines); err != nil { return errors.Wrapf(err, "failed to register engines with debug server") } diff --git a/pkg/server/status/disk_counters.go b/pkg/server/status/disk_counters.go index 1cb803b389d2..5c79fb262441 100644 --- a/pkg/server/status/disk_counters.go +++ b/pkg/server/status/disk_counters.go @@ -20,20 +20,22 @@ import ( "github.com/shirou/gopsutil/v3/disk" ) -func getDiskCounters(ctx context.Context) ([]diskStats, error) { +// GetDiskCounters returns DiskStats for all disks. +func GetDiskCounters(ctx context.Context) ([]DiskStats, error) { driveStats, err := disk.IOCountersWithContext(ctx) if err != nil { return nil, err } - output := make([]diskStats, len(driveStats)) + output := make([]DiskStats, len(driveStats)) i := 0 for _, counters := range driveStats { - output[i] = diskStats{ - readBytes: int64(counters.ReadBytes), + output[i] = DiskStats{ + Name: counters.Name, + ReadBytes: int64(counters.ReadBytes), readCount: int64(counters.ReadCount), readTime: time.Duration(counters.ReadTime) * time.Millisecond, - writeBytes: int64(counters.WriteBytes), + WriteBytes: int64(counters.WriteBytes), writeCount: int64(counters.WriteCount), writeTime: time.Duration(counters.WriteTime) * time.Millisecond, ioTime: time.Duration(counters.IoTime) * time.Millisecond, diff --git a/pkg/server/status/disk_counters_darwin.go b/pkg/server/status/disk_counters_darwin.go index 84599ce9b4bb..97c13b08a1e7 100644 --- a/pkg/server/status/disk_counters_darwin.go +++ b/pkg/server/status/disk_counters_darwin.go @@ -19,19 +19,21 @@ import ( "github.com/lufia/iostat" ) -func getDiskCounters(context.Context) ([]diskStats, error) { +// GetDiskCounters returns DiskStats for all disks. +func GetDiskCounters(context.Context) ([]DiskStats, error) { driveStats, err := iostat.ReadDriveStats() if err != nil { return nil, err } - output := make([]diskStats, len(driveStats)) + output := make([]DiskStats, len(driveStats)) for i, counters := range driveStats { - output[i] = diskStats{ - readBytes: counters.BytesRead, + output[i] = DiskStats{ + Name: counters.Name, + ReadBytes: counters.BytesRead, readCount: counters.NumRead, readTime: counters.TotalReadTime, - writeBytes: counters.BytesWritten, + WriteBytes: counters.BytesWritten, writeCount: counters.NumWrite, writeTime: counters.TotalWriteTime, ioTime: 0, // Not reported by this library. diff --git a/pkg/server/status/runtime.go b/pkg/server/status/runtime.go index 70eb0b5254b3..872885280264 100644 --- a/pkg/server/status/runtime.go +++ b/pkg/server/status/runtime.go @@ -260,12 +260,12 @@ type RuntimeStatSampler struct { cgoCall int64 gcCount int64 gcPauseTime uint64 - disk diskStats + disk DiskStats net net.IOCountersStat runnableSum float64 } - initialDiskCounters diskStats + initialDiskCounters DiskStats initialNetCounters net.IOCountersStat // Only show "not implemented" errors once, we don't need the log spam. @@ -472,7 +472,7 @@ func (rsr *RuntimeStatSampler) SampleEnvironment( } } - var deltaDisk diskStats + var deltaDisk DiskStats diskCounters, err := getSummedDiskCounters(ctx) if err != nil { log.Ops.Warningf(ctx, "problem fetching disk stats: %s; disk stats will be empty.", err) @@ -482,10 +482,10 @@ func (rsr *RuntimeStatSampler) SampleEnvironment( rsr.last.disk = diskCounters subtractDiskCounters(&diskCounters, rsr.initialDiskCounters) - rsr.HostDiskReadBytes.Update(diskCounters.readBytes) + rsr.HostDiskReadBytes.Update(diskCounters.ReadBytes) rsr.HostDiskReadCount.Update(diskCounters.readCount) rsr.HostDiskReadTime.Update(int64(diskCounters.readTime)) - rsr.HostDiskWriteBytes.Update(diskCounters.writeBytes) + rsr.HostDiskWriteBytes.Update(diskCounters.WriteBytes) rsr.HostDiskWriteCount.Update(diskCounters.writeCount) rsr.HostDiskWriteTime.Update(int64(diskCounters.writeTime)) rsr.HostDiskIOTime.Update(int64(diskCounters.ioTime)) @@ -590,22 +590,26 @@ func (rsr *RuntimeStatSampler) GetCPUCombinedPercentNorm() float64 { return rsr.CPUCombinedPercentNorm.Value() } -// diskStats contains the disk statistics returned by the operating +// DiskStats contains the disk statistics returned by the operating // system. Interpretation of some of these stats varies by platform, // although as much as possible they are normalized to the semantics // used by linux's diskstats interface. // // Except for iopsInProgress, these metrics act like counters (always // increasing, and best interpreted as a rate). -type diskStats struct { - readBytes int64 +type DiskStats struct { + // Name is the disk name. + Name string + // ReadBytes is the cumulative bytes read. + ReadBytes int64 readCount int64 // readTime (and writeTime) may increase more than 1s per second if // access to storage is parallelized. readTime time.Duration - writeBytes int64 + // WriteBytes is the cumulative bytes written. + WriteBytes int64 writeCount int64 writeTime time.Duration @@ -623,10 +627,10 @@ type diskStats struct { iopsInProgress int64 } -func getSummedDiskCounters(ctx context.Context) (diskStats, error) { - diskCounters, err := getDiskCounters(ctx) +func getSummedDiskCounters(ctx context.Context) (DiskStats, error) { + diskCounters, err := GetDiskCounters(ctx) if err != nil { - return diskStats{}, err + return DiskStats{}, err } return sumDiskCounters(diskCounters), nil @@ -643,14 +647,14 @@ func getSummedNetStats(ctx context.Context) (net.IOCountersStat, error) { // sumDiskCounters returns a new disk.IOCountersStat whose values are the sum of the // values in the slice of disk.IOCountersStats passed in. -func sumDiskCounters(disksStats []diskStats) diskStats { - output := diskStats{} +func sumDiskCounters(disksStats []DiskStats) DiskStats { + output := DiskStats{} for _, stats := range disksStats { - output.readBytes += stats.readBytes + output.ReadBytes += stats.ReadBytes output.readCount += stats.readCount output.readTime += stats.readTime - output.writeBytes += stats.writeBytes + output.WriteBytes += stats.WriteBytes output.writeCount += stats.writeCount output.writeTime += stats.writeTime @@ -664,13 +668,13 @@ func sumDiskCounters(disksStats []diskStats) diskStats { // subtractDiskCounters subtracts the counters in `sub` from the counters in `from`, // saving the results in `from`. -func subtractDiskCounters(from *diskStats, sub diskStats) { +func subtractDiskCounters(from *DiskStats, sub DiskStats) { from.writeCount -= sub.writeCount - from.writeBytes -= sub.writeBytes + from.WriteBytes -= sub.WriteBytes from.writeTime -= sub.writeTime from.readCount -= sub.readCount - from.readBytes -= sub.readBytes + from.ReadBytes -= sub.ReadBytes from.readTime -= sub.readTime from.ioTime -= sub.ioTime diff --git a/pkg/server/status/runtime_test.go b/pkg/server/status/runtime_test.go index 42daeec22200..0b6866ba8ed6 100644 --- a/pkg/server/status/runtime_test.go +++ b/pkg/server/status/runtime_test.go @@ -21,27 +21,27 @@ import ( func TestSumDiskCounters(t *testing.T) { defer leaktest.AfterTest(t)() - counters := []diskStats{ + counters := []DiskStats{ { - readBytes: 1, + ReadBytes: 1, readCount: 1, iopsInProgress: 1, - writeBytes: 1, + WriteBytes: 1, writeCount: 1, }, { - readBytes: 1, + ReadBytes: 1, readCount: 1, iopsInProgress: 1, - writeBytes: 1, + WriteBytes: 1, writeCount: 1, }, } summed := sumDiskCounters(counters) - expected := diskStats{ - readBytes: 2, + expected := DiskStats{ + ReadBytes: 2, readCount: 2, - writeBytes: 2, + WriteBytes: 2, writeCount: 2, iopsInProgress: 2, } @@ -82,24 +82,24 @@ func TestSumNetCounters(t *testing.T) { func TestSubtractDiskCounters(t *testing.T) { defer leaktest.AfterTest(t)() - from := diskStats{ - readBytes: 3, + from := DiskStats{ + ReadBytes: 3, readCount: 3, - writeBytes: 3, + WriteBytes: 3, writeCount: 3, iopsInProgress: 3, } - sub := diskStats{ - readBytes: 1, + sub := DiskStats{ + ReadBytes: 1, readCount: 1, iopsInProgress: 1, - writeBytes: 1, + WriteBytes: 1, writeCount: 1, } - expected := diskStats{ - readBytes: 2, + expected := DiskStats{ + ReadBytes: 2, readCount: 2, - writeBytes: 2, + WriteBytes: 2, writeCount: 2, // Don't touch iops in progress; it is a gauge, not a counter. iopsInProgress: 3, diff --git a/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/cluster.go b/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/cluster.go index 368b82120f0e..6ac75721d6ac 100644 --- a/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/cluster.go +++ b/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/cluster.go @@ -63,13 +63,17 @@ func (h *Handle) InitializeTenant(ctx context.Context, tenID roachpb.TenantID) * tenantState.db = sqlutils.MakeSQLRunner(h.tc.ServerConn(0)) tenantState.cleanup = func() {} // noop } else { + serverGCJobKnobs := testServer.TestingKnobs().GCJob + tenantGCJobKnobs := sql.GCJobTestingKnobs{SkipWaitingForMVCCGC: true} + if serverGCJobKnobs != nil { + tenantGCJobKnobs = *serverGCJobKnobs.(*sql.GCJobTestingKnobs) + tenantGCJobKnobs.SkipWaitingForMVCCGC = true + } tenantArgs := base.TestTenantArgs{ TenantID: tenID, TestingKnobs: base.TestingKnobs{ SpanConfig: h.scKnobs, - GCJob: &sql.GCJobTestingKnobs{ - SkipWaitingForMVCCGC: true, - }, + GCJob: &tenantGCJobKnobs, }, } var err error diff --git a/pkg/ui/workspaces/cluster-ui/src/api/insightsApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/insightsApi.ts index b07e7049cdcf..786d9ce76e60 100644 --- a/pkg/ui/workspaces/cluster-ui/src/api/insightsApi.ts +++ b/pkg/ui/workspaces/cluster-ui/src/api/insightsApi.ts @@ -18,6 +18,7 @@ import { InsightEventDetails, InsightExecEnum, InsightNameEnum, + StatementInsightEvent, } from "src/insights"; import moment from "moment"; @@ -30,10 +31,7 @@ export type InsightEventsResponse = InsightEventState[]; type InsightQuery = { name: InsightNameEnum; query: string; - toState: ( - response: SqlExecutionResponse, - results: Record, - ) => State[]; + toState: (response: SqlExecutionResponse) => State; }; type TransactionContentionResponseColumns = { @@ -48,31 +46,30 @@ type TransactionContentionResponseColumns = { function transactionContentionResultsToEventState( response: SqlExecutionResponse, - results: Record, ): InsightEventState[] { - response.execution.txn_results[0].rows.forEach(row => { - const key = row.blocking_txn_id; - if (!results[key]) { - results[key] = { - executionID: row.blocking_txn_id, - fingerprintID: row.blocking_txn_fingerprint_id, - queries: row.blocking_queries, - startTime: moment(row.collection_ts), - elapsedTime: moment.duration(row.contention_duration).asMilliseconds(), - contentionThreshold: moment.duration(row.threshold).asMilliseconds(), - application: row.app_name, - insightName: highWaitTimeQuery.name, - execType: InsightExecEnum.TRANSACTION, - }; - } - }); + if (!response.execution.txn_results[0].rows) { + // No data. + return []; + } - return Object.values(results); + return response.execution.txn_results[0].rows.map(row => ({ + transactionID: row.blocking_txn_id, + fingerprintID: row.blocking_txn_fingerprint_id, + queries: row.blocking_queries, + startTime: moment(row.collection_ts), + elapsedTimeMillis: moment + .duration(row.contention_duration) + .asMilliseconds(), + contentionThreshold: moment.duration(row.threshold).asMilliseconds(), + application: row.app_name, + insightName: highWaitTimeQuery.name, + execType: InsightExecEnum.TRANSACTION, + })); } const highWaitTimeQuery: InsightQuery< TransactionContentionResponseColumns, - InsightEventState + InsightEventsResponse > = { name: InsightNameEnum.highWaitTime, query: `SELECT * FROM (SELECT @@ -118,13 +115,7 @@ export function getInsightEventState(): Promise { }; return executeSql(request).then( result => { - if (!result.execution.txn_results[0].rows) { - // No data. - return []; - } - - const results: Record = {}; - return highWaitTimeQuery.toState(result, results); + return highWaitTimeQuery.toState(result); }, ); } @@ -157,41 +148,37 @@ type TransactionContentionDetailsResponseColumns = { function transactionContentionDetailsResultsToEventState( response: SqlExecutionResponse, - results: Record, ): InsightEventDetailsState[] { - response.execution.txn_results[0].rows.forEach(row => { - const key = row.blocking_txn_id; - if (!results[key]) { - results[key] = { - executionID: row.blocking_txn_id, - queries: row.blocking_queries, - startTime: moment(row.collection_ts), - elapsedTime: moment.duration(row.contention_duration).asMilliseconds(), - contentionThreshold: moment.duration(row.threshold).asMilliseconds(), - application: row.app_name, - fingerprintID: row.blocking_txn_fingerprint_id, - waitingExecutionID: row.waiting_txn_id, - waitingFingerprintID: row.waiting_txn_fingerprint_id, - waitingQueries: row.waiting_queries, - schemaName: row.schema_name, - databaseName: row.database_name, - tableName: row.table_name, - indexName: row.index_name, - contendedKey: row.key, - insightName: highWaitTimeQuery.name, - execType: InsightExecEnum.TRANSACTION, - }; - } - }); - - return Object.values(results); + if (!response.execution.txn_results[0].rows) { + // No data. + return []; + } + return response.execution.txn_results[0].rows.map(row => ({ + executionID: row.blocking_txn_id, + queries: row.blocking_queries, + startTime: moment(row.collection_ts), + elapsedTime: moment.duration(row.contention_duration).asMilliseconds(), + contentionThreshold: moment.duration(row.threshold).asMilliseconds(), + application: row.app_name, + fingerprintID: row.blocking_txn_fingerprint_id, + waitingExecutionID: row.waiting_txn_id, + waitingFingerprintID: row.waiting_txn_fingerprint_id, + waitingQueries: row.waiting_queries, + schemaName: row.schema_name, + databaseName: row.database_name, + tableName: row.table_name, + indexName: row.index_name, + contendedKey: row.key, + insightName: highWaitTimeQuery.name, + execType: InsightExecEnum.TRANSACTION, + })); } const highWaitTimeDetailsQuery = ( id: string, ): InsightQuery< TransactionContentionDetailsResponseColumns, - InsightEventDetailsState + InsightEventDetailsResponse > => { return { name: InsightNameEnum.highWaitTime, @@ -257,12 +244,119 @@ export function getInsightEventDetailsState( }; return executeSql(request).then( result => { - if (!result.execution.txn_results[0].rows) { - // No data. - return []; - } - const results: Record = {}; - return detailsQuery.toState(result, results); + return detailsQuery.toState(result); }, ); } + +type ExecutionInsightsResponseRow = { + session_id: string; + txn_id: string; + txn_fingerprint_id: string; // hex string + stmt_id: string; + stmt_fingerprint_id: string; // hex string + query: string; + start_time: string; // Timestamp + end_time: string; // Timestamp + full_scan: boolean; + user_name: string; + app_name: string; + database_name: string; + rows_read: number; + rows_written: number; + priority: string; + retries: number; + exec_node_ids: number[]; + contention: string; // interval + last_retry_reason?: string; + problems: string[]; +}; + +export type StatementInsights = StatementInsightEvent[]; + +function getStatementInsightsFromClusterExecutionInsightsResponse( + response: SqlExecutionResponse, +): StatementInsights { + if (!response.execution.txn_results[0].rows) { + // No data. + return []; + } + + return response.execution.txn_results[0].rows.map(row => { + const start = moment.utc(row.start_time); + const end = moment.utc(row.end_time); + return { + transactionID: row.txn_id, + transactionFingerprintID: row.txn_fingerprint_id, + query: row.query, + startTime: start, + endTime: end, + databaseName: row.database_name, + elapsedTimeMillis: end.diff(start, "milliseconds"), + application: row.app_name, + statementID: row.stmt_id, + statementFingerprintID: row.stmt_fingerprint_id, + sessionID: row.session_id, + isFullScan: row.full_scan, + rowsRead: row.rows_read, + rowsWritten: row.rows_written, + priority: row.priority, + retries: row.retries, + lastRetryReason: row.last_retry_reason, + timeSpentWaiting: row.contention ? moment.duration(row.contention) : null, + problems: row.problems, + }; + }); +} + +const statementInsightsQuery: InsightQuery< + ExecutionInsightsResponseRow, + StatementInsights +> = { + name: InsightNameEnum.highWaitTime, + // We only surface the most recently observed problem for a given statement. + query: `SELECT * from ( + SELECT + session_id, + txn_id, + encode(txn_fingerprint_id, 'hex') AS txn_fingerprint_id, + stmt_id, + encode(stmt_fingerprint_id, 'hex') AS stmt_fingerprint_id, + query, + start_time, + end_time, + full_scan, + app_name, + database_name, + rows_read, + rows_written, + priority, + retries, + contention, + last_retry_reason, + problems, + row_number() OVER ( + PARTITION BY txn_fingerprint_id + ORDER BY end_time DESC + ) AS rank + FROM crdb_internal.cluster_execution_insights + WHERE array_length(problems, 1) > 0 + ) WHERE rank = 1 + `, + toState: getStatementInsightsFromClusterExecutionInsightsResponse, +}; + +export function getStatementInsightsApi(): Promise { + const request: SqlExecutionRequest = { + statements: [ + { + sql: `${statementInsightsQuery.query}`, + }, + ], + execute: true, + max_result_size: 50000, // 50 kib + }; + return executeSql(request).then(result => { + return statementInsightsQuery.toState(result); + }); +} diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/types.ts b/pkg/ui/workspaces/cluster-ui/src/insights/types.ts index 1f0cbd258784..7cc5f031b40e 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/types.ts +++ b/pkg/ui/workspaces/cluster-ui/src/insights/types.ts @@ -21,12 +21,12 @@ export enum InsightExecEnum { } export type InsightEvent = { - executionID: string; + transactionID: string; fingerprintID: string; queries: string[]; insights: Insight[]; startTime: Moment; - elapsedTime: number; + elapsedTimeMillis: number; contentionThreshold: number; application: string; execType: InsightExecEnum; @@ -52,6 +52,29 @@ export type InsightEventDetails = { execType: InsightExecEnum; }; +export type StatementInsightEvent = { + // Some of these can be moved to a common InsightEvent type if txn query is updated. + statementID: string; + transactionID: string; + statementFingerprintID: string; + transactionFingerprintID: string; + startTime: Moment; + elapsedTimeMillis: number; + sessionID: string; + timeSpentWaiting?: moment.Duration; + isFullScan: boolean; + endTime: Moment; + databaseName: string; + rowsRead: number; + rowsWritten: number; + lastRetryReason?: string; + priority: string; + retries: number; + problems: string[]; + query: string; + application: string; +}; + export type Insight = { name: InsightNameEnum; label: string; diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/utils.ts b/pkg/ui/workspaces/cluster-ui/src/insights/utils.ts index 7dc57919f93a..2eb2d6f8e92a 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/utils.ts +++ b/pkg/ui/workspaces/cluster-ui/src/insights/utils.ts @@ -55,12 +55,12 @@ export function getInsightsFromState( return; } else { insightEvents.push({ - executionID: e.executionID, + transactionID: e.transactionID, fingerprintID: e.fingerprintID, queries: e.queries, insights: insightsForEvent, startTime: e.startTime, - elapsedTime: e.elapsedTime, + elapsedTimeMillis: e.elapsedTimeMillis, application: e.application, execType: InsightExecEnum.TRANSACTION, contentionThreshold: e.contentionThreshold, @@ -121,7 +121,7 @@ export const filterTransactionInsights = ( filteredTransactions = filteredTransactions.filter( txn => !search || - txn.executionID.toLowerCase()?.includes(search) || + txn.transactionID.toLowerCase()?.includes(search) || txn.queries?.find(query => query.toLowerCase().includes(search)), ); } diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsights.fixture.ts b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsights.fixture.ts index 97daba799413..b4bb01c6da48 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsights.fixture.ts +++ b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsights.fixture.ts @@ -15,20 +15,20 @@ import { InsightExecEnum } from "../../types"; export const transactionInsightsPropsFixture: TransactionInsightsViewProps = { transactions: [ { - executionID: "f72f37ea-b3a0-451f-80b8-dfb27d0bc2a5", + transactionID: "f72f37ea-b3a0-451f-80b8-dfb27d0bc2a5", fingerprintID: "\\x76245b7acd82d39d", queries: [ "SELECT IFNULL(a, b) FROM (SELECT (SELECT code FROM promo_codes WHERE code > $1 ORDER BY code LIMIT _) AS a, (SELECT code FROM promo_codes ORDER BY code LIMIT _) AS b)", ], insightName: "HIGH_WAIT_TIME", startTime: moment.utc("2022.08.10"), - elapsedTime: moment.duration("00:00:00.25").asMilliseconds(), + elapsedTimeMillis: moment.duration("00:00:00.25").asMilliseconds(), application: "demo", execType: InsightExecEnum.TRANSACTION, contentionThreshold: moment.duration("00:00:00.1").asMilliseconds(), }, { - executionID: "e72f37ea-b3a0-451f-80b8-dfb27d0bc2a5", + transactionID: "e72f37ea-b3a0-451f-80b8-dfb27d0bc2a5", fingerprintID: "\\x76245b7acd82d39e", queries: [ "INSERT INTO vehicles VALUES ($1, $2, __more6__)", @@ -36,20 +36,20 @@ export const transactionInsightsPropsFixture: TransactionInsightsViewProps = { ], insightName: "HIGH_WAIT_TIME", startTime: moment.utc("2022.08.10"), - elapsedTime: moment.duration("00:00:00.25").asMilliseconds(), + elapsedTimeMillis: moment.duration("00:00:00.25").asMilliseconds(), application: "demo", execType: InsightExecEnum.TRANSACTION, contentionThreshold: moment.duration("00:00:00.1").asMilliseconds(), }, { - executionID: "f72f37ea-b3a0-451f-80b8-dfb27d0bc2a0", + transactionID: "f72f37ea-b3a0-451f-80b8-dfb27d0bc2a0", fingerprintID: "\\x76245b7acd82d39f", queries: [ "UPSERT INTO vehicle_location_histories VALUES ($1, $2, now(), $3, $4)", ], insightName: "HIGH_WAIT_TIME", startTime: moment.utc("2022.08.10"), - elapsedTime: moment.duration("00:00:00.25").asMilliseconds(), + elapsedTimeMillis: moment.duration("00:00:00.25").asMilliseconds(), application: "demo", execType: InsightExecEnum.TRANSACTION, contentionThreshold: moment.duration("00:00:00.1").asMilliseconds(), diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsTable.tsx b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsTable.tsx index ca0d97ec7a0b..497ddbed847f 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsTable.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsTable.tsx @@ -35,11 +35,11 @@ export function makeTransactionInsightsColumns(): ColumnDescriptor name: "executionID", title: insightsTableTitles.executionID(execType), cell: (item: InsightEvent) => ( - - {String(item.executionID)} + + {String(item.transactionID)} ), - sort: (item: InsightEvent) => item.executionID, + sort: (item: InsightEvent) => item.transactionID, }, { name: "fingerprintID", @@ -73,8 +73,8 @@ export function makeTransactionInsightsColumns(): ColumnDescriptor { name: "elapsedTime", title: insightsTableTitles.elapsedTime(execType), - cell: (item: InsightEvent) => Duration(item.elapsedTime * 1e6), - sort: (item: InsightEvent) => item.elapsedTime, + cell: (item: InsightEvent) => Duration(item.elapsedTimeMillis * 1e6), + sort: (item: InsightEvent) => item.elapsedTimeMillis, }, { name: "applicationName", diff --git a/pkg/ui/workspaces/db-console/src/redux/apiReducers.ts b/pkg/ui/workspaces/db-console/src/redux/apiReducers.ts index 05df89112e1a..3b6771978376 100644 --- a/pkg/ui/workspaces/db-console/src/redux/apiReducers.ts +++ b/pkg/ui/workspaces/db-console/src/redux/apiReducers.ts @@ -403,6 +403,14 @@ const insightsReducerObj = new CachedDataReducer( ); export const refreshInsights = insightsReducerObj.refresh; +const statementInsightsReducerObj = new CachedDataReducer( + clusterUiApi.getStatementInsightsApi, + "statementInsights", + null, + moment.duration(30, "s"), // Timeout +); +export const refreshStatementInsights = statementInsightsReducerObj.refresh; + export const insightRequestKey = ( req: clusterUiApi.InsightEventDetailsRequest, ): string => `${req.id}`; @@ -451,6 +459,7 @@ export interface APIReducersState { clusterLocks: CachedDataReducerState; insights: CachedDataReducerState; insightDetails: KeyedCachedDataReducerState; + statementInsights: CachedDataReducerState; } export const apiReducersReducer = combineReducers({ @@ -494,6 +503,8 @@ export const apiReducersReducer = combineReducers({ [clusterLocksReducerObj.actionNamespace]: clusterLocksReducerObj.reducer, [insightsReducerObj.actionNamespace]: insightsReducerObj.reducer, [insightDetailsReducerObj.actionNamespace]: insightDetailsReducerObj.reducer, + [statementInsightsReducerObj.actionNamespace]: + statementInsightsReducerObj.reducer, }); export { CachedDataReducerState, KeyedCachedDataReducerState }; diff --git a/pkg/util/admission/io_load_listener.go b/pkg/util/admission/io_load_listener.go index 8a328b5b7a1e..6d4539447dfc 100644 --- a/pkg/util/admission/io_load_listener.go +++ b/pkg/util/admission/io_load_listener.go @@ -381,6 +381,8 @@ func computeIntervalDiskLoadInfo( // 100+ms for all write traffic. func (io *ioLoadListener) adjustTokens(ctx context.Context, metrics StoreMetrics) { sas := io.kvRequester.getStoreAdmissionStats() + // Copy the cumulative disk banwidth values for later use. + cumDiskBW := io.ioLoadListenerState.diskBW res := io.adjustTokensInner(ctx, io.ioLoadListenerState, metrics.Levels[0], metrics.WriteStallCount, metrics.InternalIntervalMetrics, L0FileCountOverloadThreshold.Get(&io.settings.SV), @@ -392,12 +394,12 @@ func (io *ioLoadListener) adjustTokens(ctx context.Context, metrics StoreMetrics { // Disk Bandwidth tokens. io.aux.diskBW.intervalDiskLoadInfo = computeIntervalDiskLoadInfo( - io.diskBW.bytesRead, io.diskBW.bytesWritten, metrics.DiskStats) + cumDiskBW.bytesRead, cumDiskBW.bytesWritten, metrics.DiskStats) io.mu.Lock() diskTokensUsed := io.mu.kvGranter.getDiskTokensUsedAndResetLocked() io.mu.Unlock() io.aux.diskBW.intervalLSMInfo = intervalLSMInfo{ - incomingBytes: int64(cumLSMIncomingBytes) - int64(io.diskBW.incomingLSMBytes), + incomingBytes: int64(cumLSMIncomingBytes) - int64(cumDiskBW.incomingLSMBytes), regularTokensUsed: diskTokensUsed[regularWorkClass], elasticTokensUsed: diskTokensUsed[elasticWorkClass], } diff --git a/pkg/util/admission/testdata/io_load_listener b/pkg/util/admission/testdata/io_load_listener index 85a0d1c44cd3..5635abe9af8b 100644 --- a/pkg/util/admission/testdata/io_load_listener +++ b/pkg/util/admission/testdata/io_load_listener @@ -85,7 +85,7 @@ prep-admission-stats admitted=10000 write-bytes=40000 set-state l0-bytes=10000 l0-added-write=101000 l0-files=21 l0-sublevels=21 ---- compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 2.25x+1 B (smoothed 2.00x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 5 B, compacted 98 KiB [≈49 KiB], flushed 0 B [≈0 B]; admitting 12 KiB (rate 833 B/s) due to L0 growth (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:101000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:101000} smoothedIntL0CompactedBytes:50000 smoothedCompactionByteTokens:12500 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:12500 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:5} l0WriteLM:{multiplier:2 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10000 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:40000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:2.25 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:101000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:101000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:101000} smoothedIntL0CompactedBytes:50000 smoothedCompactionByteTokens:12500 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:12500 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:5} l0WriteLM:{multiplier:2 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10000 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:40000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:2.25 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:100000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 5 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 2.00x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 209 setAvailableElasticDiskTokens: unlimited @@ -157,7 +157,7 @@ prep-admission-stats admitted=20000 write-bytes=80000 set-state l0-bytes=10000 l0-added-write=201000 l0-files=21 l0-sublevels=21 ---- compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 2.25x+1 B (smoothed 2.12x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 7 B, compacted 98 KiB [≈73 KiB], flushed 0 B [≈0 B]; admitting 24 KiB (rate 1.6 KiB/s) due to L0 growth (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:201000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:201000} smoothedIntL0CompactedBytes:75000 smoothedCompactionByteTokens:25000 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:25000 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:7} l0WriteLM:{multiplier:2.125 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10000 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:40000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:2.25 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:201000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:201000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:201000} smoothedIntL0CompactedBytes:75000 smoothedCompactionByteTokens:25000 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:25000 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:7} l0WriteLM:{multiplier:2.125 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10000 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:40000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:2.25 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:100000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 7 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 2.12x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 417 setAvailableElasticDiskTokens: unlimited @@ -225,7 +225,7 @@ tick: 59, setAvailableIOTokens: 397 setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=201000 l0-files=21 l0-sublevels=21 print-only-first-tick=true ---- compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 2.12x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 7 B, compacted 0 B [≈37 KiB], flushed 0 B [≈0 B]; admitting 21 KiB (rate 1.4 KiB/s) due to L0 growth (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:201000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:201000} smoothedIntL0CompactedBytes:37500 smoothedCompactionByteTokens:21875 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:21875 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:7} l0WriteLM:{multiplier:2.125 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:201000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:201000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:201000} smoothedIntL0CompactedBytes:37500 smoothedCompactionByteTokens:21875 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:21875 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:7} l0WriteLM:{multiplier:2.125 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 7 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 2.12x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 365 setAvailableElasticDiskTokens: unlimited @@ -239,7 +239,7 @@ prep-admission-stats admitted=30000 write-bytes=120000 set-state l0-bytes=10000 l0-added-write=501000 l0-files=21 l0-sublevels=20 print-only-first-tick=true ---- compaction score 1.000 (21 ssts, 20 sub-levels), L0 growth 293 KiB (write 293 KiB ingest 0 B ignored 0 B): requests 10000 (0 bypassed) with 39 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 3.00x+18 B (smoothed 2.56x+9 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 18 B, compacted 293 KiB [≈165 KiB], flushed 0 B [≈0 B]; admitting all -{ioLoadListenerState:{cumL0AddedBytes:501000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501000} smoothedIntL0CompactedBytes:168750 smoothedCompactionByteTokens:160937.5 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:18} l0WriteLM:{multiplier:2.5625 constant:9} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:300000 intL0CompactedBytes:300000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10000 intL0WriteBytes:300000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:40000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:3 constant:18} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:501000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:501000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:501000} smoothedIntL0CompactedBytes:168750 smoothedCompactionByteTokens:160937.5 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:18} l0WriteLM:{multiplier:2.5625 constant:9} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:300000 intL0CompactedBytes:300000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10000 intL0WriteBytes:300000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:40000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:3 constant:18} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:300000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 18 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 2.56x+9 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: unlimited setAvailableElasticDiskTokens: unlimited @@ -272,7 +272,7 @@ prep-admission-stats admitted=10 write-bytes=130000 ingested-bytes=20000 set-state l0-bytes=1000 l0-added-write=171000 l0-added-ingested=30000 l0-files=21 l0-sublevels=21 print-only-first-tick=true ---- compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 195 KiB (write 166 KiB ingest 29 KiB ignored 0 B): requests 10 (0 bypassed) with 127 KiB acc-write (0 B bypassed) + 20 KiB acc-ingest (0 B bypassed) + write-model 1.31x+1 B (smoothed 1.53x+1 B) + ingested-model 1.50x+1 B (smoothed 1.12x+1 B) + at-admission-tokens 9.8 KiB, compacted 195 KiB [≈98 KiB], flushed 0 B [≈0 B]; admitting 24 KiB (rate 1.6 KiB/s) due to L0 growth (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:201000 curL0Bytes:1000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:201000} smoothedIntL0CompactedBytes:100000 smoothedCompactionByteTokens:25000 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:25000 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:10000} l0WriteLM:{multiplier:1.5288076923076923 constant:1} l0IngestLM:{multiplier:1.125 constant:1} ingestLM:{multiplier:1.2497500000000001 constant:1} aux:{intL0AddedBytes:200000 intL0CompactedBytes:200000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10 intL0WriteBytes:170000 intL0IngestedBytes:30000 intLSMIngestedBytes:30000 intL0WriteAccountedBytes:130000 intIngestedAccountedBytes:20000 intL0WriteLinearModel:{multiplier:1.3076153846153846 constant:1} intL0IngestedLinearModel:{multiplier:1.4995 constant:1} intIngestedLinearModel:{multiplier:1.4995 constant:1} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:201000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:201000 curL0Bytes:1000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:201000} smoothedIntL0CompactedBytes:100000 smoothedCompactionByteTokens:25000 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:25000 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:10000} l0WriteLM:{multiplier:1.5288076923076923 constant:1} l0IngestLM:{multiplier:1.125 constant:1} ingestLM:{multiplier:1.2497500000000001 constant:1} aux:{intL0AddedBytes:200000 intL0CompactedBytes:200000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10 intL0WriteBytes:170000 intL0IngestedBytes:30000 intLSMIngestedBytes:30000 intL0WriteAccountedBytes:130000 intIngestedAccountedBytes:20000 intL0WriteLinearModel:{multiplier:1.3076153846153846 constant:1} intL0IngestedLinearModel:{multiplier:1.4995 constant:1} intIngestedLinearModel:{multiplier:1.4995 constant:1} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:200000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 10000 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.53x+1 l0-ingest-lm: 1.12x+1 ingest-lm: 1.25x+1 setAvailableIOTokens: 417 setAvailableElasticDiskTokens: unlimited @@ -287,7 +287,7 @@ prep-admission-stats admitted=20 write-bytes=150000 ingested-bytes=20000 set-state l0-bytes=1000 l0-added-write=191000 l0-added-ingested=30000 l0-files=21 l0-sublevels=21 print-only-first-tick=true ---- compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 20 KiB (write 20 KiB ingest 0 B ignored 0 B): requests 10 (0 bypassed) with 20 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 1.00x+1 B (smoothed 1.26x+1 B) + ingested-model 0.00x+0 B (smoothed 1.12x+1 B) + at-admission-tokens 5.9 KiB, compacted 20 KiB [≈59 KiB], flushed 0 B [≈0 B]; admitting 27 KiB (rate 1.8 KiB/s) due to L0 growth (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:221000 curL0Bytes:1000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:221000} smoothedIntL0CompactedBytes:60000 smoothedCompactionByteTokens:27500 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:27500 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:6000} l0WriteLM:{multiplier:1.2641538461538462 constant:1} l0IngestLM:{multiplier:1.125 constant:1} ingestLM:{multiplier:1.2497500000000001 constant:1} aux:{intL0AddedBytes:20000 intL0CompactedBytes:20000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10 intL0WriteBytes:20000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:20000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0.9995 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:221000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:221000 curL0Bytes:1000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:221000} smoothedIntL0CompactedBytes:60000 smoothedCompactionByteTokens:27500 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:27500 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:6000} l0WriteLM:{multiplier:1.2641538461538462 constant:1} l0IngestLM:{multiplier:1.125 constant:1} ingestLM:{multiplier:1.2497500000000001 constant:1} aux:{intL0AddedBytes:20000 intL0CompactedBytes:20000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10 intL0WriteBytes:20000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:20000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0.9995 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:20000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 6000 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.26x+1 l0-ingest-lm: 1.12x+1 ingest-lm: 1.25x+1 setAvailableIOTokens: 459 setAvailableElasticDiskTokens: unlimited @@ -301,7 +301,7 @@ prep-admission-stats admitted=30 write-bytes=250000 ingested-bytes=20000 ingeste set-state l0-bytes=1000 l0-added-write=211000 l0-added-ingested=30000 l0-files=21 l0-sublevels=21 print-only-first-tick=true ---- compaction score 1.050[L0-overload] (21 ssts, 21 sub-levels), L0 growth 20 KiB (write 20 KiB ingest 0 B ignored 0 B): requests 10 (0 bypassed) with 98 KiB acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.50x+1 B (smoothed 0.88x+1 B) + ingested-model 0.00x+0 B (smoothed 1.12x+1 B) + at-admission-tokens 3.9 KiB, compacted 20 KiB [≈39 KiB], flushed 0 B [≈0 B]; admitting 23 KiB (rate 1.5 KiB/s) due to L0 growth (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:241000 curL0Bytes:1000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:241000} smoothedIntL0CompactedBytes:40000 smoothedCompactionByteTokens:23750 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:23750 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:4000} l0WriteLM:{multiplier:0.8820769230769231 constant:1} l0IngestLM:{multiplier:1.125 constant:1} ingestLM:{multiplier:1.2497500000000001 constant:1} aux:{intL0AddedBytes:20000 intL0CompactedBytes:20000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10 intL0WriteBytes:20000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:100000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0.5 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:241000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:241000 curL0Bytes:1000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:241000} smoothedIntL0CompactedBytes:40000 smoothedCompactionByteTokens:23750 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:23750 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:4000} l0WriteLM:{multiplier:0.8820769230769231 constant:1} l0IngestLM:{multiplier:1.125 constant:1} ingestLM:{multiplier:1.2497500000000001 constant:1} aux:{intL0AddedBytes:20000 intL0CompactedBytes:20000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:10 intL0WriteBytes:20000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:100000 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0.5 constant:1} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:20000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 4000 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 0.88x+1 l0-ingest-lm: 1.12x+1 ingest-lm: 1.25x+1 setAvailableIOTokens: 396 setAvailableElasticDiskTokens: unlimited @@ -325,7 +325,7 @@ tick: 0, setAvailableIOTokens: unlimited setAvailableElasticDiskTokens: unlimite set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=1000 flush-work-sec=2 flush-idle-sec=100 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 9.8 KiB (write 9.8 KiB ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 9.8 KiB [≈4.9 KiB], flushed 7.3 KiB [≈0 B]; admitting all -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:5000 smoothedCompactionByteTokens:5000 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:10000 intL0CompactedBytes:10000 intFlushTokens:7500 intFlushUtilization:0.0196078431372549 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:10000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:5000 smoothedCompactionByteTokens:5000 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:10000 intL0CompactedBytes:10000 intFlushTokens:7500 intFlushUtilization:0.0196078431372549 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:10000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:10000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: unlimited setAvailableElasticDiskTokens: unlimited @@ -334,7 +334,7 @@ setAvailableIOTokens: unlimited setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=1000 flush-work-sec=2 flush-idle-sec=10 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈2.4 KiB], flushed 7.3 KiB [≈7.3 KiB]; admitting 11 KiB (rate 750 B/s) due to memtable flush (multiplier 1.500) (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:2500 smoothedCompactionByteTokens:2500 smoothedNumFlushTokens:7500 flushUtilTargetFraction:1.5 totalNumByteTokens:11250 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:7500 intFlushUtilization:0.16666666666666666 intWriteStalls:0 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:0 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:2500 smoothedCompactionByteTokens:2500 smoothedNumFlushTokens:7500 flushUtilTargetFraction:1.5 totalNumByteTokens:11250 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:7500 intFlushUtilization:0.16666666666666666 intWriteStalls:0 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 188 setAvailableElasticDiskTokens: unlimited @@ -345,7 +345,7 @@ setAvailableIOTokens: 188 setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=1 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈1.2 KiB], flushed 73 KiB [≈40 KiB]; admitting 59 KiB (rate 4.0 KiB/s) due to memtable flush (multiplier 1.475) (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:1 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:1250 smoothedCompactionByteTokens:1250 smoothedNumFlushTokens:41250 flushUtilTargetFraction:1.475 totalNumByteTokens:60843 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:1 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:1250 smoothedCompactionByteTokens:1250 smoothedNumFlushTokens:41250 flushUtilTargetFraction:1.475 totalNumByteTokens:60843 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1015 setAvailableElasticDiskTokens: unlimited @@ -356,7 +356,7 @@ setAvailableIOTokens: 1015 setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=3 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈625 B], flushed 73 KiB [≈57 KiB]; admitting 81 KiB (rate 5.4 KiB/s) due to memtable flush (multiplier 1.425) (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:3 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:625 smoothedCompactionByteTokens:625 smoothedNumFlushTokens:58125 flushUtilTargetFraction:1.4250000000000003 totalNumByteTokens:82828 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:2 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:3 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:625 smoothedCompactionByteTokens:625 smoothedNumFlushTokens:58125 flushUtilTargetFraction:1.4250000000000003 totalNumByteTokens:82828 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:2 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1381 setAvailableElasticDiskTokens: unlimited @@ -366,7 +366,7 @@ setAvailableIOTokens: 1381 setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=8 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈312 B], flushed 73 KiB [≈65 KiB]; admitting 88 KiB (rate 5.8 KiB/s) due to memtable flush (multiplier 1.350) (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:8 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:312 smoothedCompactionByteTokens:312.5 smoothedNumFlushTokens:66562.5 flushUtilTargetFraction:1.3500000000000005 totalNumByteTokens:89859 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:5 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:8 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:312 smoothedCompactionByteTokens:312.5 smoothedNumFlushTokens:66562.5 flushUtilTargetFraction:1.3500000000000005 totalNumByteTokens:89859 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:5 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1498 setAvailableElasticDiskTokens: unlimited @@ -375,7 +375,7 @@ setAvailableIOTokens: 1498 setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=9 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈156 B], flushed 73 KiB [≈69 KiB]; admitting 92 KiB (rate 6.1 KiB/s) due to memtable flush (multiplier 1.325) (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:9 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:156 smoothedCompactionByteTokens:156.25 smoothedNumFlushTokens:70781.25 flushUtilTargetFraction:1.3250000000000006 totalNumByteTokens:93785 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:9 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:156 smoothedCompactionByteTokens:156.25 smoothedNumFlushTokens:70781.25 flushUtilTargetFraction:1.3250000000000006 totalNumByteTokens:93785 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1564 setAvailableElasticDiskTokens: unlimited @@ -389,7 +389,7 @@ set-min-flush-util percent=130 set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=10 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈78 B], flushed 73 KiB [≈71 KiB]; admitting 92 KiB (rate 6.2 KiB/s) due to memtable flush (multiplier 1.300) (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:10 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:78 smoothedCompactionByteTokens:78.125 smoothedNumFlushTokens:72890.625 flushUtilTargetFraction:1.3000000000000007 totalNumByteTokens:94757 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:10 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:78 smoothedCompactionByteTokens:78.125 smoothedNumFlushTokens:72890.625 flushUtilTargetFraction:1.3000000000000007 totalNumByteTokens:94757 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1580 setAvailableElasticDiskTokens: unlimited @@ -399,7 +399,7 @@ setAvailableIOTokens: 1580 setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=11 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈39 B], flushed 73 KiB [≈72 KiB]; admitting 94 KiB (rate 6.3 KiB/s) due to memtable flush (multiplier 1.300) (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:11 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:39 smoothedCompactionByteTokens:39.0625 smoothedNumFlushTokens:73945.3125 flushUtilTargetFraction:1.3000000000000007 totalNumByteTokens:96128 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:11 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:39 smoothedCompactionByteTokens:39.0625 smoothedNumFlushTokens:73945.3125 flushUtilTargetFraction:1.3000000000000007 totalNumByteTokens:96128 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1603 setAvailableElasticDiskTokens: unlimited @@ -414,7 +414,7 @@ set-min-flush-util percent=135 set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=12 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈19 B], flushed 73 KiB [≈73 KiB]; admitting 98 KiB (rate 6.5 KiB/s) due to memtable flush (multiplier 1.350) (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:12 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:19 smoothedCompactionByteTokens:19.53125 smoothedNumFlushTokens:74472.65625 flushUtilTargetFraction:1.35 totalNumByteTokens:100538 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:12 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:19 smoothedCompactionByteTokens:19.53125 smoothedNumFlushTokens:74472.65625 flushUtilTargetFraction:1.35 totalNumByteTokens:100538 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1676 setAvailableElasticDiskTokens: unlimited @@ -423,7 +423,7 @@ setAvailableIOTokens: 1676 setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=100 write-stall-count=13 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈9 B], flushed 73 KiB [≈73 KiB]; admitting all -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:13 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:9 smoothedCompactionByteTokens:9.765625 smoothedNumFlushTokens:74472.65625 flushUtilTargetFraction:1.35 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.0196078431372549 intWriteStalls:1 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:13 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:9 smoothedCompactionByteTokens:9.765625 smoothedNumFlushTokens:74472.65625 flushUtilTargetFraction:1.35 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.0196078431372549 intWriteStalls:1 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: unlimited setAvailableElasticDiskTokens: unlimited @@ -432,7 +432,7 @@ setAvailableIOTokens: unlimited setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=13 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈4 B], flushed 73 KiB [≈73 KiB]; admitting 98 KiB (rate 6.6 KiB/s) due to memtable flush (multiplier 1.350) (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:13 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:4 smoothedCompactionByteTokens:4.8828125 smoothedNumFlushTokens:74736.328125 flushUtilTargetFraction:1.35 totalNumByteTokens:100894 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:0 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:13 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:4 smoothedCompactionByteTokens:4.8828125 smoothedNumFlushTokens:74736.328125 flushUtilTargetFraction:1.35 totalNumByteTokens:100894 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:0 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1682 setAvailableElasticDiskTokens: unlimited @@ -442,7 +442,7 @@ setAvailableIOTokens: 1682 setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=13 all-tokens-used=true print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈2 B], flushed 73 KiB [≈73 KiB]; admitting 99 KiB (rate 6.6 KiB/s) due to memtable flush (multiplier 1.350) (used 0 B) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:13 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:2 smoothedCompactionByteTokens:2.44140625 smoothedNumFlushTokens:74868.1640625 flushUtilTargetFraction:1.35 totalNumByteTokens:101072 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:0 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:13 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:2 smoothedCompactionByteTokens:2.44140625 smoothedNumFlushTokens:74868.1640625 flushUtilTargetFraction:1.35 totalNumByteTokens:101072 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:0 prevTokensUsed:0 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1685 setAvailableElasticDiskTokens: unlimited @@ -452,7 +452,7 @@ setAvailableIOTokens: 1685 setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=13 all-tokens-used=true print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈1 B], flushed 73 KiB [≈73 KiB]; admitting 101 KiB (rate 6.7 KiB/s) due to memtable flush (multiplier 1.375) (used 197 KiB) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:13 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:1 smoothedCompactionByteTokens:1.220703125 smoothedNumFlushTokens:74934.08203125 flushUtilTargetFraction:1.375 totalNumByteTokens:103034 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:0 prevTokensUsed:202144 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:13 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:1 smoothedCompactionByteTokens:1.220703125 smoothedNumFlushTokens:74934.08203125 flushUtilTargetFraction:1.375 totalNumByteTokens:103034 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:0 prevTokensUsed:202144 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1718 setAvailableElasticDiskTokens: unlimited @@ -462,7 +462,7 @@ setAvailableIOTokens: 1718 setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=13 all-tokens-used=true print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈0 B], flushed 73 KiB [≈73 KiB]; admitting 102 KiB (rate 6.8 KiB/s) due to memtable flush (multiplier 1.400) (used 201 KiB) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:13 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:0 smoothedCompactionByteTokens:0.6103515625 smoothedNumFlushTokens:74967.041015625 flushUtilTargetFraction:1.4 totalNumByteTokens:104953 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:0 prevTokensUsed:206068 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:13 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:0 smoothedCompactionByteTokens:0.6103515625 smoothedNumFlushTokens:74967.041015625 flushUtilTargetFraction:1.4 totalNumByteTokens:104953 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:0 prevTokensUsed:206068 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1750 setAvailableElasticDiskTokens: unlimited @@ -472,7 +472,7 @@ setAvailableIOTokens: 1750 setAvailableElasticDiskTokens: unlimited set-state l0-bytes=10000 l0-added-write=11000 l0-files=1 l0-sublevels=1 flush-bytes=10000 flush-work-sec=2 flush-idle-sec=10 write-stall-count=14 all-tokens-used=true print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 0 B (write 0 B ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 0 B [≈0 B], flushed 73 KiB [≈73 KiB]; admitting 101 KiB (rate 6.7 KiB/s) due to memtable flush (multiplier 1.375) (used 205 KiB) -{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:14 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:0 smoothedCompactionByteTokens:0.30517578125 smoothedNumFlushTokens:74983.5205078125 flushUtilTargetFraction:1.375 totalNumByteTokens:103102 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:209906 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:11000 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:11000 curL0Bytes:10000 cumWriteStallCount:14 diskBW:{bytesRead:0 bytesWritten:0 incomingLSMBytes:11000} smoothedIntL0CompactedBytes:0 smoothedCompactionByteTokens:0.30517578125 smoothedNumFlushTokens:74983.5205078125 flushUtilTargetFraction:1.375 totalNumByteTokens:103102 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:0 intL0CompactedBytes:0 intFlushTokens:75000 intFlushUtilization:0.16666666666666666 intWriteStalls:1 prevTokensUsed:209906 tokenKind:1 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:0 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:true diskBW:{intervalDiskLoadInfo:{readBandwidth:0 writeBandwidth:0 provisionedBandwidth:0} intervalLSMInfo:{incomingBytes:0 regularTokensUsed:0 elasticTokensUsed:0}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: 1719 setAvailableElasticDiskTokens: unlimited @@ -495,26 +495,30 @@ store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: unlimited setAvailableElasticDiskTokens: 105 +# Considered moderate load because not enough history at low load. Elastic +# tokens don't increase since not fully utilized. set-state l0-bytes=100 l0-added-write=200000 bytes-read=2000000 bytes-written=4000000 provisioned-bandwidth=4000000 disk-bw-tokens-used=(100,100) l0-files=1 l0-sublevels=1 print-only-first-tick=true ---- -compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 98 KiB [≈73 KiB], flushed 0 B [≈0 B]; admitting all; elastic tokens 6.1 KiB (used 100 B, regular used 100 B): write model 1.75x+1 B ingest model 1.00x+1 B, disk bw read 130 KiB write 260 KiB provisioned 3.8 MiB -{ioLoadListenerState:{cumL0AddedBytes:200000 curL0Bytes:100 cumWriteStallCount:0 diskBW:{bytesRead:2000000 bytesWritten:4000000 incomingLSMBytes:200000} smoothedIntL0CompactedBytes:75000 smoothedCompactionByteTokens:75000 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:6250 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:133333 writeBandwidth:266666 provisionedBandwidth:4000000} intervalLSMInfo:{incomingBytes:200000 regularTokensUsed:100 elasticTokensUsed:100}}} ioThreshold:} +compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 98 KiB [≈73 KiB], flushed 0 B [≈0 B]; admitting all; elastic tokens 6.1 KiB (used 100 B, regular used 100 B): write model 1.75x+1 B ingest model 1.00x+1 B, disk bw read 65 KiB write 130 KiB provisioned 3.8 MiB +{ioLoadListenerState:{cumL0AddedBytes:200000 curL0Bytes:100 cumWriteStallCount:0 diskBW:{bytesRead:2000000 bytesWritten:4000000 incomingLSMBytes:200000} smoothedIntL0CompactedBytes:75000 smoothedCompactionByteTokens:75000 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:6250 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:66666 writeBandwidth:133333 provisionedBandwidth:4000000} intervalLSMInfo:{incomingBytes:100000 regularTokensUsed:100 elasticTokensUsed:100}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: unlimited setAvailableElasticDiskTokens: 105 -set-state l0-bytes=100 l0-added-write=300000 bytes-read=3000000 bytes-written=6000000 provisioned-bandwidth=4000000 disk-bw-tokens-used=(100,100) l0-files=1 l0-sublevels=1 print-only-first-tick=true +# Stay at moderate load since utilization increasing. +set-state l0-bytes=100 l0-added-write=300000 bytes-read=4000000 bytes-written=8000000 provisioned-bandwidth=4000000 disk-bw-tokens-used=(100,100) l0-files=1 l0-sublevels=1 print-only-first-tick=true ---- -compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 98 KiB [≈85 KiB], flushed 0 B [≈0 B]; admitting all; elastic tokens 6.1 KiB (used 100 B, regular used 100 B): write model 1.75x+1 B ingest model 1.00x+1 B, disk bw read 195 KiB write 391 KiB provisioned 3.8 MiB -{ioLoadListenerState:{cumL0AddedBytes:300000 curL0Bytes:100 cumWriteStallCount:0 diskBW:{bytesRead:3000000 bytesWritten:6000000 incomingLSMBytes:300000} smoothedIntL0CompactedBytes:87500 smoothedCompactionByteTokens:87500 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:6250 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:200000 writeBandwidth:400000 provisionedBandwidth:4000000} intervalLSMInfo:{incomingBytes:300000 regularTokensUsed:100 elasticTokensUsed:100}}} ioThreshold:} +compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 98 KiB [≈85 KiB], flushed 0 B [≈0 B]; admitting all; elastic tokens 6.1 KiB (used 100 B, regular used 100 B): write model 1.75x+1 B ingest model 1.00x+1 B, disk bw read 130 KiB write 260 KiB provisioned 3.8 MiB +{ioLoadListenerState:{cumL0AddedBytes:300000 curL0Bytes:100 cumWriteStallCount:0 diskBW:{bytesRead:4000000 bytesWritten:8000000 incomingLSMBytes:300000} smoothedIntL0CompactedBytes:87500 smoothedCompactionByteTokens:87500 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:6250 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:133333 writeBandwidth:266666 provisionedBandwidth:4000000} intervalLSMInfo:{incomingBytes:100000 regularTokensUsed:100 elasticTokensUsed:100}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: unlimited setAvailableElasticDiskTokens: 105 -set-state l0-bytes=100 l0-added-write=400000 bytes-read=4000000 bytes-written=8000000 provisioned-bandwidth=5000000 disk-bw-tokens-used=(100,100) l0-files=1 l0-sublevels=1 print-only-first-tick=true +# Drop to low load. +set-state l0-bytes=100 l0-added-write=400000 bytes-read=5000000 bytes-written=9000000 provisioned-bandwidth=5000000 disk-bw-tokens-used=(100,100) l0-files=1 l0-sublevels=1 print-only-first-tick=true ---- compaction score 0.050 (1 ssts, 1 sub-levels), L0 growth 98 KiB (write 98 KiB ingest 0 B ignored 0 B): requests 0 (0 bypassed) with 0 B acc-write (0 B bypassed) + 0 B acc-ingest (0 B bypassed) + write-model 0.00x+0 B (smoothed 1.75x+1 B) + ingested-model 0.00x+0 B (smoothed 0.75x+1 B) + at-admission-tokens 1 B, compacted 98 KiB [≈92 KiB], flushed 0 B [≈0 B]; admitting all -{ioLoadListenerState:{cumL0AddedBytes:400000 curL0Bytes:100 cumWriteStallCount:0 diskBW:{bytesRead:4000000 bytesWritten:8000000 incomingLSMBytes:400000} smoothedIntL0CompactedBytes:93750 smoothedCompactionByteTokens:93750 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:266666 writeBandwidth:533333 provisionedBandwidth:5000000} intervalLSMInfo:{incomingBytes:400000 regularTokensUsed:100 elasticTokensUsed:100}}} ioThreshold:} +{ioLoadListenerState:{cumL0AddedBytes:400000 curL0Bytes:100 cumWriteStallCount:0 diskBW:{bytesRead:5000000 bytesWritten:9000000 incomingLSMBytes:400000} smoothedIntL0CompactedBytes:93750 smoothedCompactionByteTokens:93750 smoothedNumFlushTokens:0 flushUtilTargetFraction:1.5 totalNumByteTokens:9223372036854775807 byteTokensAllocated:0 byteTokensUsed:0 elasticDiskBWTokens:9223372036854775807 elasticDiskBWTokensAllocated:0} requestEstimates:{writeTokens:1} l0WriteLM:{multiplier:1.75 constant:1} l0IngestLM:{multiplier:0.7505 constant:1} ingestLM:{multiplier:1 constant:1} aux:{intL0AddedBytes:100000 intL0CompactedBytes:100000 intFlushTokens:0 intFlushUtilization:0 intWriteStalls:0 prevTokensUsed:0 tokenKind:0 perWorkTokensAux:{intWorkCount:0 intL0WriteBytes:100000 intL0IngestedBytes:0 intLSMIngestedBytes:0 intL0WriteAccountedBytes:0 intIngestedAccountedBytes:0 intL0WriteLinearModel:{multiplier:0 constant:0} intL0IngestedLinearModel:{multiplier:0 constant:0} intIngestedLinearModel:{multiplier:0 constant:0} intBypassedWorkCount:0 intL0WriteBypassedAccountedBytes:0 intIngestedBypassedAccountedBytes:0 intL0IgnoredIngestedBytes:0} doLogFlush:false diskBW:{intervalDiskLoadInfo:{readBandwidth:66666 writeBandwidth:66666 provisionedBandwidth:5000000} intervalLSMInfo:{incomingBytes:100000 regularTokensUsed:100 elasticTokensUsed:100}}} ioThreshold:} store-request-estimates: writeTokens: 1 tick: 0, setAdmittedDoneModelsLocked: l0-write-lm: 1.75x+1 l0-ingest-lm: 0.75x+1 ingest-lm: 1.00x+1 setAvailableIOTokens: unlimited setAvailableElasticDiskTokens: unlimited