ISSUE-24: Filtering messages that do not come from the same instance

This commit is contained in:
Emanuel Zienecker 2021-04-13 11:56:23 +02:00
parent c1b2ea0c35
commit 6749017a48
2 changed files with 9 additions and 3 deletions

View File

@ -11,6 +11,8 @@
when instantiating the Kafka Health Check. (https://github.com/deviceinsight/kafka-health-check/issues/20[ISSUE-20]) when instantiating the Kafka Health Check. (https://github.com/deviceinsight/kafka-health-check/issues/20[ISSUE-20])
* The cache size can now be configured via the property `kafka.health.cache.maximum-size`. * The cache size can now be configured via the property `kafka.health.cache.maximum-size`.
The default value for the cache size is 200. (https://github.com/deviceinsight/kafka-health-check/issues/22[ISSUE-22]) The default value for the cache size is 200. (https://github.com/deviceinsight/kafka-health-check/issues/22[ISSUE-22])
* Filtering messages that do not come from the same instance.
(https://github.com/deviceinsight/kafka-health-check/issues/24[ISSUE-24])
== Version 1.2.0 == Version 1.2.0

View File

@ -39,6 +39,7 @@ import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.StreamSupport;
import javax.annotation.PostConstruct; import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy; import javax.annotation.PreDestroy;
@ -114,7 +115,9 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
executor.submit(() -> { executor.submit(() -> {
while (running.get()) { while (running.get()) {
ConsumerRecords<String, String> records = consumer.poll(pollTimeout); ConsumerRecords<String, String> records = consumer.poll(pollTimeout);
records.forEach(record -> cache.put(record.key(), record.value())); StreamSupport.stream(records.spliterator(), false)
.filter(record -> record.key() != null && record.key().equals(consumerGroupId))
.forEach(record -> cache.put(record.key(), record.value()));
} }
}); });
} }
@ -191,7 +194,8 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
logger.trace("Send health check message = {}", message); logger.trace("Send health check message = {}", message);
producer.send(new ProducerRecord<>(topic, message, message)).get(sendReceiveTimeout.toMillis(), MILLISECONDS); producer.send(new ProducerRecord<>(topic, consumerGroupId, message))
.get(sendReceiveTimeout.toMillis(), MILLISECONDS);
return message; return message;
} }
@ -206,7 +210,7 @@ public class KafkaConsumingHealthIndicator extends AbstractHealthIndicator {
long startTime = System.currentTimeMillis(); long startTime = System.currentTimeMillis();
while (true) { while (true) {
String receivedMessage = cache.getIfPresent(expectedMessage); String receivedMessage = cache.getIfPresent(consumerGroupId);
if (expectedMessage.equals(receivedMessage)) { if (expectedMessage.equals(receivedMessage)) {
builder.up(); builder.up();