Merge pull request #21 from deviceinsight/feature/ISSUE-20
ISSUE-20: Expose cache metrics
This commit is contained in:
commit
bb6650e1bc
@ -7,6 +7,8 @@
|
|||||||
millisecond values (`long`) as well to stay compatible with old configurations.
|
millisecond values (`long`) as well to stay compatible with old configurations.
|
||||||
* Dependency versions are now managed by `spring-boot-dependencies`.
|
* Dependency versions are now managed by `spring-boot-dependencies`.
|
||||||
(https://github.com/deviceinsight/kafka-health-check/issues/17[ISSUE-17])
|
(https://github.com/deviceinsight/kafka-health-check/issues/17[ISSUE-17])
|
||||||
|
* As of now, cache metrics can be exposed. For this purpose, a corresponding MeterRegistry instance must be passed
|
||||||
|
when instantiating the Kafka Health Check. (https://github.com/deviceinsight/kafka-health-check/issues/20[ISSUE-20])
|
||||||
|
|
||||||
== Version 1.2.0
|
== Version 1.2.0
|
||||||
|
|
||||||
|
6
pom.xml
6
pom.xml
@ -16,7 +16,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>org.springframework.boot</groupId>
|
<groupId>org.springframework.boot</groupId>
|
||||||
<artifactId>spring-boot-dependencies</artifactId>
|
<artifactId>spring-boot-dependencies</artifactId>
|
||||||
<version>2.3.8.RELEASE</version>
|
<version>2.4.4</version>
|
||||||
<relativePath />
|
<relativePath />
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -30,7 +30,7 @@
|
|||||||
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
||||||
|
|
||||||
<!-- Versions -->
|
<!-- Versions -->
|
||||||
<guava.version>30.1-jre</guava.version>
|
<guava.version>30.1.1-jre</guava.version>
|
||||||
|
|
||||||
<nexus-staging-maven-plugin.version>1.6.8</nexus-staging-maven-plugin.version>
|
<nexus-staging-maven-plugin.version>1.6.8</nexus-staging-maven-plugin.version>
|
||||||
<maven-gpg-plugin.version>1.6</maven-gpg-plugin.version>
|
<maven-gpg-plugin.version>1.6</maven-gpg-plugin.version>
|
||||||
@ -134,7 +134,7 @@
|
|||||||
|
|
||||||
<developers>
|
<developers>
|
||||||
<developer>
|
<developer>
|
||||||
<id>ManuZiD</id>
|
<id>ezienecker</id>
|
||||||
<name>Emanuel Zienecker</name>
|
<name>Emanuel Zienecker</name>
|
||||||
<email>emanuel.zienecker@device-insight.com</email>
|
<email>emanuel.zienecker@device-insight.com</email>
|
||||||
<roles>
|
<roles>
|
||||||
|
@ -4,6 +4,9 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS;
|
|||||||
|
|
||||||
import com.github.benmanes.caffeine.cache.Cache;
|
import com.github.benmanes.caffeine.cache.Cache;
|
||||||
import com.github.benmanes.caffeine.cache.Caffeine;
|
import com.github.benmanes.caffeine.cache.Caffeine;
|
||||||
|
import io.micrometer.core.instrument.MeterRegistry;
|
||||||
|
import io.micrometer.core.instrument.Tag;
|
||||||
|
import io.micrometer.core.instrument.binder.cache.CaffeineCacheMetrics;
|
||||||
import org.apache.kafka.clients.consumer.Consumer;
|
import org.apache.kafka.clients.consumer.Consumer;
|
||||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
|
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
|
||||||
@ -44,9 +47,9 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
|||||||
|
|
||||||
private static final Logger logger = LoggerFactory.getLogger(KafkaConsumingHealthIndicator.class);
|
private static final Logger logger = LoggerFactory.getLogger(KafkaConsumingHealthIndicator.class);
|
||||||
private static final String CONSUMER_GROUP_PREFIX = "health-check-";
|
private static final String CONSUMER_GROUP_PREFIX = "health-check-";
|
||||||
|
private static final String CACHE_NAME = "kafka-health-check";
|
||||||
|
|
||||||
private final Consumer<String, String> consumer;
|
private final Consumer<String, String> consumer;
|
||||||
|
|
||||||
private final Producer<String, String> producer;
|
private final Producer<String, String> producer;
|
||||||
|
|
||||||
private final String topic;
|
private final String topic;
|
||||||
@ -57,11 +60,18 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
|||||||
private final ExecutorService executor;
|
private final ExecutorService executor;
|
||||||
private final AtomicBoolean running;
|
private final AtomicBoolean running;
|
||||||
private final Cache<String, String> cache;
|
private final Cache<String, String> cache;
|
||||||
|
private final String consumerGroupId;
|
||||||
|
|
||||||
private KafkaCommunicationResult kafkaCommunicationResult;
|
private KafkaCommunicationResult kafkaCommunicationResult;
|
||||||
|
|
||||||
public KafkaConsumingHealthIndicator(KafkaHealthProperties kafkaHealthProperties,
|
public KafkaConsumingHealthIndicator(KafkaHealthProperties kafkaHealthProperties,
|
||||||
Map<String, Object> kafkaConsumerProperties, Map<String, Object> kafkaProducerProperties) {
|
Map<String, Object> kafkaConsumerProperties, Map<String, Object> kafkaProducerProperties) {
|
||||||
|
this(kafkaHealthProperties, kafkaConsumerProperties, kafkaProducerProperties, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public KafkaConsumingHealthIndicator(KafkaHealthProperties kafkaHealthProperties,
|
||||||
|
Map<String, Object> kafkaConsumerProperties, Map<String, Object> kafkaProducerProperties,
|
||||||
|
MeterRegistry meterRegistry) {
|
||||||
|
|
||||||
logger.info("Initializing kafka health check with properties: {}", kafkaHealthProperties);
|
logger.info("Initializing kafka health check with properties: {}", kafkaHealthProperties);
|
||||||
this.topic = kafkaHealthProperties.getTopic();
|
this.topic = kafkaHealthProperties.getTopic();
|
||||||
@ -71,7 +81,8 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
|||||||
|
|
||||||
Map<String, Object> kafkaConsumerPropertiesCopy = new HashMap<>(kafkaConsumerProperties);
|
Map<String, Object> kafkaConsumerPropertiesCopy = new HashMap<>(kafkaConsumerProperties);
|
||||||
|
|
||||||
setConsumerGroup(kafkaConsumerPropertiesCopy);
|
this.consumerGroupId = getUniqueConsumerGroupId(kafkaConsumerPropertiesCopy);
|
||||||
|
kafkaConsumerPropertiesCopy.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
|
||||||
|
|
||||||
StringDeserializer deserializer = new StringDeserializer();
|
StringDeserializer deserializer = new StringDeserializer();
|
||||||
StringSerializer serializer = new StringSerializer();
|
StringSerializer serializer = new StringSerializer();
|
||||||
@ -81,8 +92,9 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
|||||||
|
|
||||||
this.executor = Executors.newSingleThreadExecutor();
|
this.executor = Executors.newSingleThreadExecutor();
|
||||||
this.running = new AtomicBoolean(true);
|
this.running = new AtomicBoolean(true);
|
||||||
this.cache =
|
this.cache = Caffeine.newBuilder().expireAfterWrite(sendReceiveTimeout).build();
|
||||||
Caffeine.newBuilder().expireAfterWrite(sendReceiveTimeout).build();
|
|
||||||
|
enableCacheMetrics(cache, meterRegistry);
|
||||||
|
|
||||||
this.kafkaCommunicationResult =
|
this.kafkaCommunicationResult =
|
||||||
KafkaCommunicationResult.failure(new RejectedExecutionException("Kafka Health Check is starting."));
|
KafkaCommunicationResult.failure(new RejectedExecutionException("Kafka Health Check is starting."));
|
||||||
@ -93,8 +105,7 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
|||||||
subscribeToTopic();
|
subscribeToTopic();
|
||||||
|
|
||||||
if (kafkaCommunicationResult.isFailure()) {
|
if (kafkaCommunicationResult.isFailure()) {
|
||||||
throw new BeanInitializationException("Kafka health check failed",
|
throw new BeanInitializationException("Kafka health check failed", kafkaCommunicationResult.getException());
|
||||||
kafkaCommunicationResult.getException());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
executor.submit(() -> {
|
executor.submit(() -> {
|
||||||
@ -113,12 +124,11 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
|||||||
consumer.close();
|
consumer.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setConsumerGroup(Map<String, Object> kafkaConsumerProperties) {
|
private String getUniqueConsumerGroupId(Map<String, Object> kafkaConsumerProperties) {
|
||||||
try {
|
try {
|
||||||
String groupId = (String) kafkaConsumerProperties.getOrDefault(ConsumerConfig.GROUP_ID_CONFIG,
|
String groupId = (String) kafkaConsumerProperties.getOrDefault(ConsumerConfig.GROUP_ID_CONFIG,
|
||||||
UUID.randomUUID().toString());
|
UUID.randomUUID().toString());
|
||||||
kafkaConsumerProperties.put(ConsumerConfig.GROUP_ID_CONFIG,
|
return CONSUMER_GROUP_PREFIX + groupId + "-" + InetAddress.getLocalHost().getHostAddress();
|
||||||
CONSUMER_GROUP_PREFIX + groupId + "-" + InetAddress.getLocalHost().getHostAddress());
|
|
||||||
} catch (UnknownHostException e) {
|
} catch (UnknownHostException e) {
|
||||||
throw new IllegalStateException(e);
|
throw new IllegalStateException(e);
|
||||||
}
|
}
|
||||||
@ -203,8 +213,7 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
|||||||
if (kafkaCommunicationResult.isFailure()) {
|
if (kafkaCommunicationResult.isFailure()) {
|
||||||
goDown(builder);
|
goDown(builder);
|
||||||
} else {
|
} else {
|
||||||
builder.down(new TimeoutException(
|
builder.down(new TimeoutException("Sending and receiving took longer than " + sendReceiveTimeout))
|
||||||
"Sending and receiving took longer than " + sendReceiveTimeout ))
|
|
||||||
.withDetail("topic", topic);
|
.withDetail("topic", topic);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,4 +225,13 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
|
|||||||
private void goDown(Health.Builder builder) {
|
private void goDown(Health.Builder builder) {
|
||||||
builder.down(kafkaCommunicationResult.getException()).withDetail("topic", topic);
|
builder.down(kafkaCommunicationResult.getException()).withDetail("topic", topic);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void enableCacheMetrics(Cache<String, String> cache, MeterRegistry meterRegistry) {
|
||||||
|
if (meterRegistry == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
CaffeineCacheMetrics.monitor(meterRegistry, cache, CACHE_NAME,
|
||||||
|
Collections.singletonList(Tag.of("instance", consumerGroupId)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package com.deviceinsight.kafka.health;
|
|||||||
import static com.deviceinsight.kafka.health.KafkaConsumingHealthIndicatorTest.TOPIC;
|
import static com.deviceinsight.kafka.health.KafkaConsumingHealthIndicatorTest.TOPIC;
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
|
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
|
||||||
import kafka.server.KafkaServer;
|
import kafka.server.KafkaServer;
|
||||||
import org.apache.kafka.clients.consumer.Consumer;
|
import org.apache.kafka.clients.consumer.Consumer;
|
||||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||||
|
Loading…
Reference in New Issue
Block a user