Skip to content

Commit

Permalink
Use failure_rate instead of failure count for circuit breaker
Browse files Browse the repository at this point in the history
Continuation of bazelbuild#18359
I ran multiple experiment and tried to find optimal failure threshold and failure window interval with different remote_timeout, for healthy remote cache, semi-healthy (overloaded) remote cache and unhealthy remote cache.
As I described [here](bazelbuild#18359 (comment)) even with healthy remote cache there was 5-10% circuit trip and we were not getting the best result.

Issue related to the failure count:
1. When the remote cache is healthy, builds are fast, and Bazel makes a high number of calls to the buildfarm. As a result, even with a moderate failure rate, the failure count may exceed the threshold.
2. Additionally, write calls, which have a higher probability of failure compared to other calls, are batched immediately after the completion of an action's build. This further increases the chances of breaching the failure threshold within the defined window interval.
3. On the other hand, when the remote cache is unhealthy or semi-healthy, builds are significantly slowed down, and Bazel makes fewer calls to the remote cache.

Finding a configuration that works well for both healthy and unhealthy remote caches was not feasible. Therefore, changed the  approach to use the failure rate, and easily found a configuration  that worked effectively in both scenarios.

Closes bazelbuild#18539.

PiperOrigin-RevId: 538588379
Change-Id: I64a49eeeb32846d41d54ca3b637ded3085588528
  • Loading branch information
amishra-u committed Jun 7, 2023
1 parent ca13a8e commit 414bd11
Show file tree
Hide file tree
Showing 7 changed files with 109 additions and 96 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
public class RemoteRetrier extends Retrier {

@Nullable
public static Status fromException(Exception e) {
private static Status fromException(Exception e) {
for (Throwable cause = e; cause != null; cause = cause.getCause()) {
if (cause instanceof StatusRuntimeException) {
return ((StatusRuntimeException) cause).getStatus();
Expand Down
23 changes: 14 additions & 9 deletions src/main/java/com/google/devtools/build/lib/remote/Retrier.java
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ enum State {
State state();

/** Called after an execution failed. */
void recordFailure(Exception e);
void recordFailure();

/** Called after an execution succeeded. */
void recordSuccess();
Expand Down Expand Up @@ -130,7 +130,7 @@ public State state() {
}

@Override
public void recordFailure(Exception e) {}
public void recordFailure() {}

@Override
public void recordSuccess() {}
Expand Down Expand Up @@ -245,12 +245,14 @@ public <T> T execute(Callable<T> call, Backoff backoff) throws Exception {
circuitBreaker.recordSuccess();
return r;
} catch (Exception e) {
circuitBreaker.recordFailure(e);
Throwables.throwIfInstanceOf(e, InterruptedException.class);
if (State.TRIAL_CALL.equals(circuitState)) {
if (!shouldRetry.test(e)) {
// A non-retriable error doesn't represent server failure.
circuitBreaker.recordSuccess();
throw e;
}
if (!shouldRetry.test(e)) {
circuitBreaker.recordFailure();
if (State.TRIAL_CALL.equals(circuitState)) {
throw e;
}
final long delayMillis = backoff.nextDelayMillis(e);
Expand Down Expand Up @@ -297,11 +299,11 @@ public <T> ListenableFuture<T> executeAsync(AsyncCallable<T> call, Backoff backo

private <T> ListenableFuture<T> onExecuteAsyncFailure(
Exception t, AsyncCallable<T> call, Backoff backoff, State circuitState) {
circuitBreaker.recordFailure(t);
if (circuitState.equals(State.TRIAL_CALL)) {
return Futures.immediateFailedFuture(t);
}
if (isRetriable(t)) {
circuitBreaker.recordFailure();
if (circuitState.equals(State.TRIAL_CALL)) {
return Futures.immediateFailedFuture(t);
}
long waitMillis = backoff.nextDelayMillis(t);
if (waitMillis >= 0) {
try {
Expand All @@ -315,6 +317,9 @@ private <T> ListenableFuture<T> onExecuteAsyncFailure(
return Futures.immediateFailedFuture(t);
}
} else {
// gRPC Errors NOT_FOUND, OUT_OF_RANGE, ALREADY_EXISTS etc. are non-retriable error, and they don't represent an
// issue in Server. So treating these errors as successful api call.
circuitBreaker.recordSuccess();
return Futures.immediateFailedFuture(t);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,31 +14,11 @@
package com.google.devtools.build.lib.remote.circuitbreaker;

import com.google.devtools.build.lib.remote.Retrier;
import com.google.devtools.build.lib.remote.common.CacheNotFoundException;
import com.google.devtools.build.lib.remote.options.RemoteOptions;
import io.grpc.Status;
import java.util.function.Predicate;

import static com.google.devtools.build.lib.remote.RemoteRetrier.fromException;


/** Factory for {@link Retrier.CircuitBreaker} */
public class CircuitBreakerFactory {
public static final Predicate<? super Exception> DEFAULT_IGNORED_ERRORS =
e -> {
Status s = fromException(e);
if (s == null) {
return e.getClass() == CacheNotFoundException.class;
}
switch (s.getCode()) {
case NOT_FOUND:
case OUT_OF_RANGE:
System.out.println("out of range");
return true;
default:
return false;
}
};
public static final int DEFAULT_MIN_CALL_COUNT_TO_COMPUTE_FAILURE_RATE = 100;

private CircuitBreakerFactory() {}

Expand All @@ -53,7 +33,7 @@ private CircuitBreakerFactory() {}
public static Retrier.CircuitBreaker createCircuitBreaker(final RemoteOptions remoteOptions) {
if (remoteOptions.circuitBreakerStrategy == RemoteOptions.CircuitBreakerStrategy.FAILURE) {
return new FailureCircuitBreaker(
remoteOptions.remoteFailureThreshold,
remoteOptions.remoteFailureRateThreshold,
(int) remoteOptions.remoteFailureWindowInterval.toMillis());
}
return Retrier.ALLOW_ALL_CALLS;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,41 +18,43 @@
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Predicate;

/**
* The {@link FailureCircuitBreaker} implementation of the {@link Retrier.CircuitBreaker} prevents
* further calls to a remote cache once the number of failures within a given window exceeds a
* specified threshold for a build. In the context of Bazel, a new instance of {@link
* Retrier.CircuitBreaker} is created for each build. Therefore, if the circuit breaker trips during
* a build, the remote cache will be disabled for that build. However, it will be enabled again for
* the next build as a new instance of {@link Retrier.CircuitBreaker} will be created.
* further calls to a remote cache once the failures rate within a given window exceeds a specified
* threshold for a build. In the context of Bazel, a new instance of {@link Retrier.CircuitBreaker}
* is created for each build. Therefore, if the circuit breaker trips during a build, the remote
* cache will be disabled for that build. However, it will be enabled again for the next build as a
* new instance of {@link Retrier.CircuitBreaker} will be created.
*/
public class FailureCircuitBreaker implements Retrier.CircuitBreaker {

private State state;
private final AtomicInteger successes;
private final AtomicInteger failures;
private final int failureThreshold;
private final int failureRateThreshold;
private final int slidingWindowSize;
private final int minCallCountToComputeFailureRate;
private final ScheduledExecutorService scheduledExecutor;
private final Predicate<? super Exception> ignoredErrors;

/**
* Creates a {@link FailureCircuitBreaker}.
*
* @param failureThreshold is used to set the number of failures required to trip the circuit
* breaker in given time window.
* @param failureRateThreshold is used to set the min percentage of failure required to trip the
* circuit breaker in given time window.
* @param slidingWindowSize the size of the sliding window in milliseconds to calculate the number
* of failures.
*/
public FailureCircuitBreaker(int failureThreshold, int slidingWindowSize) {
this.failureThreshold = failureThreshold;
public FailureCircuitBreaker(int failureRateThreshold, int slidingWindowSize) {
this.failures = new AtomicInteger(0);
this.successes = new AtomicInteger(0);
this.failureRateThreshold = failureRateThreshold;
this.slidingWindowSize = slidingWindowSize;
this.minCallCountToComputeFailureRate =
CircuitBreakerFactory.DEFAULT_MIN_CALL_COUNT_TO_COMPUTE_FAILURE_RATE;
this.state = State.ACCEPT_CALLS;
this.scheduledExecutor =
slidingWindowSize > 0 ? Executors.newSingleThreadScheduledExecutor() : null;
this.ignoredErrors = CircuitBreakerFactory.DEFAULT_IGNORED_ERRORS;
}

@Override
Expand All @@ -61,23 +63,30 @@ public State state() {
}

@Override
public void recordFailure(Exception e) {
if (!ignoredErrors.test(e)) {
int failureCount = failures.incrementAndGet();
if (slidingWindowSize > 0) {
var unused =
scheduledExecutor.schedule(
failures::decrementAndGet, slidingWindowSize, TimeUnit.MILLISECONDS);
}
// Since the state can only be changed to the open state, synchronization is not required.
if (failureCount > this.failureThreshold) {
this.state = State.REJECT_CALLS;
}
public void recordFailure() {
int failureCount = failures.incrementAndGet();
int totalCallCount = successes.get() + failureCount;
if (slidingWindowSize > 0) {
var unused = scheduledExecutor.schedule(failures::decrementAndGet, slidingWindowSize, TimeUnit.MILLISECONDS);
}

if (totalCallCount < minCallCountToComputeFailureRate) {
// The remote call count is below the threshold required to calculate the failure rate.
return;
}
double failureRate = (failureCount * 100.0) / totalCallCount;

// Since the state can only be changed to the open state, synchronization is not required.
if (failureRate > this.failureRateThreshold) {
this.state = State.REJECT_CALLS;
}
}

@Override
public void recordSuccess() {
// do nothing, implement if we need to set threshold on failure rate instead of count.
successes.incrementAndGet();
if (slidingWindowSize > 0) {
var unused = scheduledExecutor.schedule(successes::decrementAndGet, slidingWindowSize, TimeUnit.MILLISECONDS);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -666,15 +666,16 @@ public RemoteOutputsStrategyConverter() {
public CircuitBreakerStrategy circuitBreakerStrategy;

@Option(
name = "experimental_remote_failure_threshold",
defaultValue = "100",
name = "experimental_remote_failure_rate_threshold",
defaultValue = "10",
documentationCategory = OptionDocumentationCategory.REMOTE,
effectTags = {OptionEffectTag.EXECUTION},
converter = Converters.PercentageConverter.class,
help =
"Sets the allowed number of failures in a specific time window after which it stops"
+ " calling to the remote cache/executor. By default the value is 100. Setting this"
+ " to 0 or negative means no limitation.")
public int remoteFailureThreshold;
"Sets the allowed number of failure rate in percentage for a specific time window after"
+ " which it stops calling to the remote cache/executor. By default the value is 10."
+ " Setting this to 0 means no limitation.")
public int remoteFailureRateThreshold;

@Option(
name = "experimental_remote_failure_window_interval",
Expand All @@ -683,7 +684,7 @@ public RemoteOutputsStrategyConverter() {
effectTags = {OptionEffectTag.EXECUTION},
converter = RemoteDurationConverter.class,
help =
"The interval in which the failure count of the remote requests are computed. On zero or"
"The interval in which the failure rate of the remote requests are computed. On zero or"
+ " negative value the failure duration is computed the whole duration of the"
+ " execution.Following units can be used: Days (d), hours (h), minutes (m), seconds"
+ " (s), and milliseconds (ms). If the unit is omitted, the value is interpreted as"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@
import java.util.function.Predicate;
import java.util.function.Supplier;
import javax.annotation.concurrent.ThreadSafe;

import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
Expand Down Expand Up @@ -94,7 +97,7 @@ public void retryShouldWork_failure() throws Exception {
assertThat(e).hasMessageThat().isEqualTo("call failed");

assertThat(numCalls.get()).isEqualTo(3);
verify(alwaysOpen, times(3)).recordFailure(any(Exception.class));
verify(alwaysOpen, times(3)).recordFailure();
verify(alwaysOpen, never()).recordSuccess();
}

Expand All @@ -118,8 +121,8 @@ public void retryShouldWorkNoRetries_failure() throws Exception {
assertThat(e).hasMessageThat().isEqualTo("call failed");

assertThat(numCalls.get()).isEqualTo(1);
verify(alwaysOpen, times(1)).recordFailure(e);
verify(alwaysOpen, never()).recordSuccess();
verify(alwaysOpen, never()).recordFailure();
verify(alwaysOpen, times(1)).recordSuccess();
}

@Test
Expand All @@ -139,7 +142,7 @@ public void retryShouldWork_success() throws Exception {
});
assertThat(val).isEqualTo(1);

verify(alwaysOpen, times(2)).recordFailure(any(Exception.class));
verify(alwaysOpen, times(2)).recordFailure();
verify(alwaysOpen, times(1)).recordSuccess();
}

Expand Down Expand Up @@ -351,7 +354,7 @@ public synchronized State state() {
}

@Override
public synchronized void recordFailure(Exception e) {
public synchronized void recordFailure() {
consecutiveFailures++;
if (consecutiveFailures >= maxConsecutiveFailures) {
state = State.REJECT_CALLS;
Expand Down
Loading

0 comments on commit 414bd11

Please sign in to comment.