Skip to content

Commit

Permalink
Add sending USER_END events to Kafka (#1388)
Browse files Browse the repository at this point in the history
  • Loading branch information
leszko authored Nov 15, 2024
1 parent e60920a commit d2d4f40
Show file tree
Hide file tree
Showing 8 changed files with 243 additions and 112 deletions.
20 changes: 8 additions & 12 deletions api/http.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"net/http"
"time"

"github.com/golang/glog"
"github.com/julienschmidt/httprouter"
"github.com/livepeer/catalyst-api/balancer"
"github.com/livepeer/catalyst-api/config"
Expand Down Expand Up @@ -64,17 +63,14 @@ func NewCatalystAPIRouter(cli config.Cli, vodEngine *pipeline.Coordinator, bal b
router.GET("/healthcheck", withLogging(catalystApiHandlers.Healthcheck()))

if cli.EnableAnalytics == "true" || cli.EnableAnalytics == "enabled" {
logProcessor, err := analytics.NewLogProcessor(cli.KafkaBootstrapServers, cli.KafkaUser, cli.KafkaPassword, cli.AnalyticsKafkaTopic)
if err != nil {
glog.Fatalf("failed to configure analytics log processor, err=%v", err)
} else {
analyticsApiHandlers := handlers.NewAnalyticsHandlersCollection(mapic, lapi, logProcessor)
router.POST("/analytics/log", withCORS(analyticsApiHandlers.Log()))
// Redirect GET /analytics/log to the specific catalyst node, e.g. "mdw-staging-staging-catalyst-0.livepeer.monster"
// This is useful for the player, because then it can stick to one node while sending analytics logs
router.GET("/analytics/log", withLogging(withCORS(geoHandlers.RedirectConstPathHandler())))
router.HEAD("/analytics/log", withLogging(withCORS(geoHandlers.RedirectConstPathHandler())))
}
logProcessor := analytics.NewLogProcessor(cli.KafkaBootstrapServers, cli.KafkaUser, cli.KafkaPassword, cli.AnalyticsKafkaTopic)

analyticsApiHandlers := handlers.NewAnalyticsHandlersCollection(mapic, lapi, logProcessor)
router.POST("/analytics/log", withCORS(analyticsApiHandlers.Log()))
// Redirect GET /analytics/log to the specific catalyst node, e.g. "mdw-staging-staging-catalyst-0.livepeer.monster"
// This is useful for the player, because then it can stick to one node while sending analytics logs
router.GET("/analytics/log", withLogging(withCORS(geoHandlers.RedirectConstPathHandler())))
router.HEAD("/analytics/log", withLogging(withCORS(geoHandlers.RedirectConstPathHandler())))
}

// Playback endpoint
Expand Down
2 changes: 1 addition & 1 deletion api/http_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func NewCatalystAPIRouterInternal(cli config.Cli, vodEngine *pipeline.Coordinato
eventsHandler := handlers.NewEventsHandlersCollection(c, mapic, bal, eventsEndpoint)
ffmpegSegmentingHandlers := &ffmpeg.HandlersCollection{VODEngine: vodEngine}
accessControlHandlers := accesscontrol.NewAccessControlHandlersCollection(cli, mapic)
analyticsHandlers := analytics.NewAnalyticsHandler(metricsDB)
analyticsHandlers := analytics.NewAnalyticsHandler(cli, metricsDB)
encryptionHandlers := accesscontrol.NewEncryptionHandlersCollection(cli, spkiPublicKey)
adminHandlers := &admin.AdminHandlersCollection{Cluster: c}
mistCallbackHandlers := misttriggers.NewMistCallbackHandlersCollection(cli, broker)
Expand Down
1 change: 1 addition & 0 deletions config/cli.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ type Cli struct {
KafkaUser string
KafkaPassword string
AnalyticsKafkaTopic string
UserEndKafkaTopic string
SerfMembersEndpoint string
EventsEndpoint string
CatalystApiURL string
Expand Down
59 changes: 59 additions & 0 deletions handlers/analytics/kafka.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
package analytics

import (
"context"
"crypto/tls"

"github.com/golang/glog"
"github.com/livepeer/catalyst-api/metrics"
"github.com/segmentio/kafka-go"
"github.com/segmentio/kafka-go/sasl/plain"
)

func sendWithRetries(writer *kafka.Writer, msgs []kafka.Message) {
// We retry sending messages to Kafka in case of a failure
// We don't use any backoff, because the number of events are filling up very quickly, so in case of a failure
// it's better to lose events than fill up the memory and crash the whole catalyst-api
kafkaWriteRetries := 3
var err error
for i := 0; i < kafkaWriteRetries; i++ {
err = writer.WriteMessages(context.Background(), msgs...)
if err == nil {
return
} else {
glog.Warningf("error while sending analytics log to Kafka, retrying, try=%d, err=%v", i, err)
}
}
metrics.Metrics.AnalyticsMetrics.LogProcessorWriteErrors.Inc()
glog.Errorf("error while sending events to Kafka, the events are lost, err=%d", err)
}

func logWriteMetrics(writer *kafka.Writer) {
stats := writer.Stats()
metrics.Metrics.AnalyticsMetrics.KafkaWriteErrors.Add(float64(stats.Errors))
metrics.Metrics.AnalyticsMetrics.KafkaWriteMessages.Add(float64(stats.Messages))
metrics.Metrics.AnalyticsMetrics.KafkaWriteAvgTime.Observe(stats.WriteTime.Avg.Seconds())
metrics.Metrics.AnalyticsMetrics.KafkaWriteRetries.Add(float64(stats.Retries))
}

func newWriter(bootstrapServers, user, password, topic string) *kafka.Writer {
dialer := &kafka.Dialer{
Timeout: kafkaRequestTimeout,
SASLMechanism: plain.Mechanism{
Username: user,
Password: password,
},
DualStack: true,
TLS: &tls.Config{
MinVersion: tls.VersionTLS12,
},
}

// Create a new Kafka writer
return kafka.NewWriter(kafka.WriterConfig{
Brokers: []string{bootstrapServers},
Topic: topic,
Balancer: kafka.CRC32Balancer{},
Dialer: dialer,
})
}
60 changes: 8 additions & 52 deletions handlers/analytics/log_processor.go
Original file line number Diff line number Diff line change
@@ -1,20 +1,17 @@
package analytics

import (
"context"
"crypto/tls"
"encoding/json"
"time"

"github.com/golang/glog"
"github.com/livepeer/catalyst-api/metrics"
"github.com/segmentio/kafka-go"
"github.com/segmentio/kafka-go/sasl/plain"
)

const (
KafkaBatchInterval = 1 * time.Second
KafkaRequestTimeout = 60 * time.Second
kafkaBatchInterval = 1 * time.Second
kafkaRequestTimeout = 60 * time.Second
)

type ILogProcessor interface {
Expand Down Expand Up @@ -95,37 +92,18 @@ type KafkaKey struct {
EventType string `json:"event_type"`
}

func NewLogProcessor(bootstrapServers, user, password, topic string) (*LogProcessor, error) {
dialer := &kafka.Dialer{
Timeout: KafkaRequestTimeout,
SASLMechanism: plain.Mechanism{
Username: user,
Password: password,
},
DualStack: true,
TLS: &tls.Config{
MinVersion: tls.VersionTLS12,
},
}

// Create a new Kafka writer
writer := kafka.NewWriter(kafka.WriterConfig{
Brokers: []string{bootstrapServers},
Topic: topic,
Balancer: kafka.CRC32Balancer{},
Dialer: dialer,
})

func NewLogProcessor(bootstrapServers, user, password, topic string) *LogProcessor {
writer := newWriter(bootstrapServers, user, password, topic)
return &LogProcessor{
logs: []LogData{},
writer: writer,
topic: topic,
}, nil
}
}

// Start starts LogProcessor which sends events to Kafka in batches.
func (lp *LogProcessor) Start(ch chan LogData) {
t := time.NewTicker(KafkaBatchInterval)
t := time.NewTicker(kafkaBatchInterval)
go func() {
for {
select {
Expand Down Expand Up @@ -156,7 +134,7 @@ func updateMetrics(d LogData) {
}

func (p *LogProcessor) sendEvents() {
defer p.logWriteMetrics()
defer logWriteMetrics(p.writer)

if len(p.logs) > 0 {
glog.Infof("sending analytics logs, count=%d", len(p.logs))
Expand Down Expand Up @@ -184,27 +162,5 @@ func (p *LogProcessor) sendEvents() {
}
p.logs = []LogData{}

// We retry sending messages to Kafka in case of a failure
// We don't use any backoff, because the number of events are filling up very quickly, so in case of a failure
// it's better to lose analytics logs than fill up the memory and crash the whole catalyst-api
kafkaWriteRetries := 3
var err error
for i := 0; i < kafkaWriteRetries; i++ {
err = p.writer.WriteMessages(context.Background(), msgs...)
if err == nil {
return
} else {
glog.Warningf("error while sending analytics log to Kafka, retrying, try=%d, err=%v", i, err)
}
}
metrics.Metrics.AnalyticsMetrics.LogProcessorWriteErrors.Inc()
glog.Errorf("error while sending analytics log to Kafka, the analytics logs are lost, err=%d", err)
}

func (p *LogProcessor) logWriteMetrics() {
stats := p.writer.Stats()
metrics.Metrics.AnalyticsMetrics.KafkaWriteErrors.Add(float64(stats.Errors))
metrics.Metrics.AnalyticsMetrics.KafkaWriteMessages.Add(float64(stats.Messages))
metrics.Metrics.AnalyticsMetrics.KafkaWriteAvgTime.Observe(stats.WriteTime.Avg.Seconds())
metrics.Metrics.AnalyticsMetrics.KafkaWriteRetries.Add(float64(stats.Retries))
sendWithRetries(p.writer, msgs)
}
Loading

0 comments on commit d2d4f40

Please sign in to comment.