Skip to content

Commit

Permalink
feat: Kafka producer config, ability to set batch.size
Browse files Browse the repository at this point in the history
feat: handling producer events
  • Loading branch information
brunodomenici committed Dec 12, 2024
1 parent 982925c commit 19418e6
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 13 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ Prometheus-kafka-adapter listens for metrics coming from Prometheus and sends th
- `KAFKA_TOPIC`: defines kafka topic to be used, defaults to `metrics`. Could use go template, labels are passed (as a map) to the template: e.g: `metrics.{{ index . "__name__" }}` to use per-metric topic. Two template functions are available: replace (`{{ index . "__name__" | replace "message" "msg" }}`) and substring (`{{ index . "__name__" | substring 0 5 }}`)
- `KAFKA_COMPRESSION`: defines the compression type to be used, defaults to `none`.
- `KAFKA_BATCH_NUM_MESSAGES`: defines the number of messages to batch write, defaults to `10000`.
- `KAFKA_BATCH_SIZE`: Maximum size (in bytes) of all messages batched in one MessageSet, including protocol framing overhead, defaults to `1000000`.
- `KAFKA_LINGER_MS`: Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches, defaults to `5`.
- `SERIALIZATION_FORMAT`: defines the serialization format, can be `json`, `avro-json`, defaults to `json`.
- `PORT`: defines http port to listen, defaults to `8080`, used directly by [gin](https://github.com/gin-gonic/gin).
Expand Down
5 changes: 5 additions & 0 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ var (
basicauthPassword = ""
kafkaCompression = "none"
kafkaBatchNumMessages = "10000"
kafkaBatchSize = "1000000"
kafkaLingerMs = "5"
kafkaSslClientCertFile = ""
kafkaSslClientKeyFile = ""
Expand Down Expand Up @@ -83,6 +84,10 @@ func init() {
kafkaBatchNumMessages = value
}

if value := os.Getenv("KAFKA_BATCH_SIZE"); value != "" {
kafkaBatchSize = value
}

if value := os.Getenv("KAFKA_LINGER_MS"); value != "" {
kafkaLingerMs = value
}
Expand Down
29 changes: 25 additions & 4 deletions handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,13 +76,34 @@ func receiveHandler(producer *kafka.Producer, serializer Serializer) func(c *gin
Value: metric,
}, nil)

go func() {
for event := range producer.Events() {
switch ev := event.(type) {
case *kafka.Message:
message := ev
if message.TopicPartition.Error != nil {
logrus.WithError(message.TopicPartition.Error).Errorf("failed to deliver message: %v",
message.TopicPartition)
} else {
logrus.Debugf("delivered to topic %s [%d] at offset %v",
*message.TopicPartition.Topic,
message.TopicPartition.Partition,
message.TopicPartition.Offset)
}
case kafka.Error:
logrus.WithError(err).Errorf("Error: %v", ev)
default:
logrus.Infof("Ignored event: %s", ev)
}
}
}()

if err != nil {
if err.(kafka.Error).Code() == kafka.ErrQueueFull {
// Producer queue is full, wait 1s for messages
// to be delivered then try again.
logrus.Info("producer queue is full, waiting 1s")
// Producer queue is full, wait 1s for messages to delivered
// Maybe we should fail fast? As we are losing data...
logrus.Warning("producer queue is full, waiting 1s")
time.Sleep(time.Second)
continue
}

objectsFailed.Add(float64(1))
Expand Down
18 changes: 9 additions & 9 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,15 @@ func main() {
logrus.Info("creating kafka producer")

kafkaConfig := kafka.ConfigMap{
"bootstrap.servers": kafkaBrokerList,
"compression.codec": kafkaCompression,
"batch.num.messages": kafkaBatchNumMessages,
"queue.buffering.max.messages": kafkaBatchNumMessages,
"enable.idempotence": true,
"linger.ms": kafkaLingerMs,
"go.batch.producer": true, // Enable batch producer (for increased performance).
"go.delivery.reports": false, // per-message delivery reports to the Events() channel
"acks": kafkaAcks,
"bootstrap.servers": kafkaBrokerList,
"compression.codec": kafkaCompression,
"batch.num.messages": kafkaBatchNumMessages,
"batch.size": kafkaBatchSize,
"linger.ms": kafkaLingerMs,
"go.batch.producer": true, // Enable batch producer (for increased performance).
"go.delivery.reports": true, // per-message delivery reports to the Events() channel
"go.logs.channel.enable": true,
"acks": kafkaAcks,
}

if kafkaSslClientCertFile != "" && kafkaSslClientKeyFile != "" && kafkaSslCACertFile != "" {
Expand Down

0 comments on commit 19418e6

Please sign in to comment.