Merge pull request #21 from deviceinsight/feature/ISSUE-20
ISSUE-20: Expose cache metrics
This commit is contained in:
commit
bb6650e1bc
@ -7,6 +7,8 @@
|
||||
millisecond values (`long`) as well to stay compatible with old configurations.
|
||||
* Dependency versions are now managed by `spring-boot-dependencies`.
|
||||
(https://github.com/deviceinsight/kafka-health-check/issues/17[ISSUE-17])
|
||||
* As of now, cache metrics can be exposed. For this purpose, a corresponding MeterRegistry instance must be passed
|
||||
when instantiating the Kafka Health Check. (https://github.com/deviceinsight/kafka-health-check/issues/20[ISSUE-20])
|
||||
|
||||
== Version 1.2.0
|
||||
|
||||
|
6
pom.xml
6
pom.xml
@ -16,7 +16,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-dependencies</artifactId>
|
||||
<version>2.3.8.RELEASE</version>
|
||||
<version>2.4.4</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
|
||||
@ -30,7 +30,7 @@
|
||||
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
||||
|
||||
<!-- Versions -->
|
||||
<guava.version>30.1-jre</guava.version>
|
||||
<guava.version>30.1.1-jre</guava.version>
|
||||
|
||||
<nexus-staging-maven-plugin.version>1.6.8</nexus-staging-maven-plugin.version>
|
||||
<maven-gpg-plugin.version>1.6</maven-gpg-plugin.version>
|
||||
@ -134,7 +134,7 @@
|
||||
|
||||
<developers>
|
||||
<developer>
|
||||
<id>ManuZiD</id>
|
||||
<id>ezienecker</id>
|
||||
<name>Emanuel Zienecker</name>
|
||||
<email>emanuel.zienecker@device-insight.com</email>
|
||||
<roles>
|
||||
|
@ -4,6 +4,9 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS;
|
||||
|
||||
import com.github.benmanes.caffeine.cache.Cache;
|
||||
import com.github.benmanes.caffeine.cache.Caffeine;
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.Tag;
|
||||
import io.micrometer.core.instrument.binder.cache.CaffeineCacheMetrics;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
|
||||
@ -44,9 +47,9 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(KafkaConsumingHealthIndicator.class);
|
||||
private static final String CONSUMER_GROUP_PREFIX = "health-check-";
|
||||
private static final String CACHE_NAME = "kafka-health-check";
|
||||
|
||||
private final Consumer<String, String> consumer;
|
||||
|
||||
private final Producer<String, String> producer;
|
||||
|
||||
private final String topic;
|
||||
@ -57,11 +60,18 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
||||
private final ExecutorService executor;
|
||||
private final AtomicBoolean running;
|
||||
private final Cache<String, String> cache;
|
||||
private final String consumerGroupId;
|
||||
|
||||
private KafkaCommunicationResult kafkaCommunicationResult;
|
||||
|
||||
public KafkaConsumingHealthIndicator(KafkaHealthProperties kafkaHealthProperties,
|
||||
Map<String, Object> kafkaConsumerProperties, Map<String, Object> kafkaProducerProperties) {
|
||||
this(kafkaHealthProperties, kafkaConsumerProperties, kafkaProducerProperties, null);
|
||||
}
|
||||
|
||||
public KafkaConsumingHealthIndicator(KafkaHealthProperties kafkaHealthProperties,
|
||||
Map<String, Object> kafkaConsumerProperties, Map<String, Object> kafkaProducerProperties,
|
||||
MeterRegistry meterRegistry) {
|
||||
|
||||
logger.info("Initializing kafka health check with properties: {}", kafkaHealthProperties);
|
||||
this.topic = kafkaHealthProperties.getTopic();
|
||||
@ -71,7 +81,8 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
||||
|
||||
Map<String, Object> kafkaConsumerPropertiesCopy = new HashMap<>(kafkaConsumerProperties);
|
||||
|
||||
setConsumerGroup(kafkaConsumerPropertiesCopy);
|
||||
this.consumerGroupId = getUniqueConsumerGroupId(kafkaConsumerPropertiesCopy);
|
||||
kafkaConsumerPropertiesCopy.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
|
||||
|
||||
StringDeserializer deserializer = new StringDeserializer();
|
||||
StringSerializer serializer = new StringSerializer();
|
||||
@ -81,8 +92,9 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
||||
|
||||
this.executor = Executors.newSingleThreadExecutor();
|
||||
this.running = new AtomicBoolean(true);
|
||||
this.cache =
|
||||
Caffeine.newBuilder().expireAfterWrite(sendReceiveTimeout).build();
|
||||
this.cache = Caffeine.newBuilder().expireAfterWrite(sendReceiveTimeout).build();
|
||||
|
||||
enableCacheMetrics(cache, meterRegistry);
|
||||
|
||||
this.kafkaCommunicationResult =
|
||||
KafkaCommunicationResult.failure(new RejectedExecutionException("Kafka Health Check is starting."));
|
||||
@ -93,8 +105,7 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
||||
subscribeToTopic();
|
||||
|
||||
if (kafkaCommunicationResult.isFailure()) {
|
||||
throw new BeanInitializationException("Kafka health check failed",
|
||||
kafkaCommunicationResult.getException());
|
||||
throw new BeanInitializationException("Kafka health check failed", kafkaCommunicationResult.getException());
|
||||
}
|
||||
|
||||
executor.submit(() -> {
|
||||
@ -113,12 +124,11 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
private void setConsumerGroup(Map<String, Object> kafkaConsumerProperties) {
|
||||
private String getUniqueConsumerGroupId(Map<String, Object> kafkaConsumerProperties) {
|
||||
try {
|
||||
String groupId = (String) kafkaConsumerProperties.getOrDefault(ConsumerConfig.GROUP_ID_CONFIG,
|
||||
UUID.randomUUID().toString());
|
||||
kafkaConsumerProperties.put(ConsumerConfig.GROUP_ID_CONFIG,
|
||||
CONSUMER_GROUP_PREFIX + groupId + "-" + InetAddress.getLocalHost().getHostAddress());
|
||||
return CONSUMER_GROUP_PREFIX + groupId + "-" + InetAddress.getLocalHost().getHostAddress();
|
||||
} catch (UnknownHostException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
@ -203,8 +213,7 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
||||
if (kafkaCommunicationResult.isFailure()) {
|
||||
goDown(builder);
|
||||
} else {
|
||||
builder.down(new TimeoutException(
|
||||
"Sending and receiving took longer than " + sendReceiveTimeout ))
|
||||
builder.down(new TimeoutException("Sending and receiving took longer than " + sendReceiveTimeout))
|
||||
.withDetail("topic", topic);
|
||||
}
|
||||
|
||||
@ -216,4 +225,13 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
||||
private void goDown(Health.Builder builder) {
|
||||
builder.down(kafkaCommunicationResult.getException()).withDetail("topic", topic);
|
||||
}
|
||||
|
||||
private void enableCacheMetrics(Cache<String, String> cache, MeterRegistry meterRegistry) {
|
||||
if (meterRegistry == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
CaffeineCacheMetrics.monitor(meterRegistry, cache, CACHE_NAME,
|
||||
Collections.singletonList(Tag.of("instance", consumerGroupId)));
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package com.deviceinsight.kafka.health;
|
||||
import static com.deviceinsight.kafka.health.KafkaConsumingHealthIndicatorTest.TOPIC;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
|
||||
import kafka.server.KafkaServer;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
|
Loading…
Reference in New Issue
Block a user