Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

GH-3686: Fix observation scope closure in the KafkaMLContainer #3689

Merged
merged 1 commit into from
Dec 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2750,7 +2750,6 @@ private void pauseForNackSleep() {
* @throws Error an error.
*/
@Nullable
@SuppressWarnings("try")
private RuntimeException doInvokeRecordListener(final ConsumerRecord<K, V> cRecord, // NOSONAR
Iterator<ConsumerRecord<K, V>> iterator) {

Expand All @@ -2763,7 +2762,9 @@ private RuntimeException doInvokeRecordListener(final ConsumerRecord<K, V> cReco
this.observationRegistry);

observation.start();
try (Observation.Scope ignored = observation.openScope()) {
Observation.Scope observationScope = observation.openScope();
// We cannot use 'try-with-resource' because the resource is closed just before catch block
try {
invokeOnMessage(cRecord);
successTimer(sample, cRecord);
recordInterceptAfter(cRecord, null);
Expand Down Expand Up @@ -2802,6 +2803,7 @@ private RuntimeException doInvokeRecordListener(final ConsumerRecord<K, V> cReco
if (!(this.listener instanceof RecordMessagingMessageListenerAdapter<K, V>)) {
observation.stop();
}
observationScope.close();
}
return null;
}
Expand Down Expand Up @@ -4020,6 +4022,6 @@ private static class StopAfterFenceException extends KafkaException {

}

private record FailedRecordTuple<K, V>(ConsumerRecord<K, V> record, RuntimeException ex) { };
private record FailedRecordTuple<K, V>(ConsumerRecord<K, V> record, RuntimeException ex) { }

}
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import io.micrometer.core.instrument.observation.DefaultMeterObservationHandler;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import io.micrometer.core.tck.MeterRegistryAssert;
import io.micrometer.observation.Observation;
import io.micrometer.observation.ObservationHandler;
import io.micrometer.observation.ObservationRegistry;
import io.micrometer.observation.tck.TestObservationRegistry;
Expand All @@ -45,6 +46,7 @@
import io.micrometer.tracing.test.simple.SimpleSpan;
import io.micrometer.tracing.test.simple.SimpleTracer;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerConfig;
Expand Down Expand Up @@ -74,6 +76,7 @@
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.kafka.listener.RecordInterceptor;
import org.springframework.kafka.support.micrometer.KafkaListenerObservation.DefaultKafkaListenerObservationConvention;
import org.springframework.kafka.support.micrometer.KafkaTemplateObservation.DefaultKafkaTemplateObservationConvention;
import org.springframework.kafka.test.EmbeddedKafkaBroker;
Expand Down Expand Up @@ -356,7 +359,7 @@ private void assertThatAdmin(Object object, KafkaAdmin admin, String brokersStri
@Test
void observationRuntimeException(@Autowired ExceptionListener listener, @Autowired SimpleTracer tracer,
@Autowired @Qualifier("throwableTemplate") KafkaTemplate<Integer, String> runtimeExceptionTemplate,
@Autowired KafkaListenerEndpointRegistry endpointRegistry)
@Autowired KafkaListenerEndpointRegistry endpointRegistry, @Autowired Config config)
throws ExecutionException, InterruptedException, TimeoutException {

runtimeExceptionTemplate.send(OBSERVATION_RUNTIME_EXCEPTION, "testRuntimeException").get(10, TimeUnit.SECONDS);
Expand All @@ -372,6 +375,8 @@ void observationRuntimeException(@Autowired ExceptionListener listener, @Autowir
assertThat(span.getError().getCause())
.isInstanceOf(IllegalStateException.class)
.hasMessage("obs4 run time exception");

assertThat(config.scopeInFailureReference.get()).isNotNull();
}

@Test
Expand Down Expand Up @@ -445,6 +450,8 @@ public static class Config {

KafkaAdmin mockAdmin = mock(KafkaAdmin.class);

AtomicReference<Observation.Scope> scopeInFailureReference = new AtomicReference<>();

@Bean
KafkaAdmin admin(EmbeddedKafkaBroker broker) {
String[] brokers = StringUtils.commaDelimitedListToStringArray(broker.getBrokersAsString());
Expand Down Expand Up @@ -512,7 +519,7 @@ KafkaTemplate<Integer, String> reuseAdminBeanKafkaTemplate(

@Bean
ConcurrentKafkaListenerContainerFactory<Integer, String> kafkaListenerContainerFactory(
ConsumerFactory<Integer, String> cf) {
ConsumerFactory<Integer, String> cf, ObservationRegistry observationRegistry) {

ConcurrentKafkaListenerContainerFactory<Integer, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
Expand All @@ -522,6 +529,24 @@ ConcurrentKafkaListenerContainerFactory<Integer, String> kafkaListenerContainerF
if (container.getListenerId().equals("obs3")) {
container.setKafkaAdmin(this.mockAdmin);
}
if (container.getListenerId().equals("obs4")) {
container.setRecordInterceptor(new RecordInterceptor<>() {

@Override
public ConsumerRecord<Integer, String> intercept(ConsumerRecord<Integer, String> record,
Consumer<Integer, String> consumer) {

return record;
}

@Override
public void failure(ConsumerRecord<Integer, String> record, Exception exception,
Consumer<Integer, String> consumer) {

Config.this.scopeInFailureReference.set(observationRegistry.getCurrentObservationScope());
}
});
}
});
return factory;
}
Expand Down
Loading