diff --git a/flow/activities/flowable.go b/flow/activities/flowable.go index 9fffed4dc..df78cc3dc 100644 --- a/flow/activities/flowable.go +++ b/flow/activities/flowable.go @@ -283,6 +283,7 @@ func (a *FlowableActivity) MaintainPull( config *protos.FlowConnectionConfigs, sessionID string, ) error { + ctx = context.WithValue(ctx, shared.FlowNameKey, config.FlowJobName) srcConn, err := connectors.GetByNameAs[connectors.CDCPullConnector](ctx, config.Env, a.CatalogPool, config.SourceName) if err != nil { return err @@ -312,6 +313,7 @@ func (a *FlowableActivity) MaintainPull( a.CdcCacheRw.Lock() delete(a.CdcCache, sessionID) a.CdcCacheRw.Unlock() + a.Alerter.LogFlowError(ctx, config.FlowJobName, err) return temporal.NewNonRetryableApplicationError("connection to source down", "disconnect", err) } case <-done: @@ -619,22 +621,40 @@ func (a *FlowableActivity) DropFlowSource(ctx context.Context, req *protos.DropF ctx = context.WithValue(ctx, shared.FlowNameKey, req.FlowJobName) srcConn, err := connectors.GetByNameAs[connectors.CDCPullConnector](ctx, nil, a.CatalogPool, req.PeerName) if err != nil { - return fmt.Errorf("failed to get source connector: %w", err) + srcConnErr := fmt.Errorf("[DropFlowSource] failed to get source connector: %w", err) + a.Alerter.LogFlowError(ctx, req.FlowJobName, srcConnErr) + return srcConnErr } defer connectors.CloseConnector(ctx, srcConn) - return srcConn.PullFlowCleanup(ctx, req.FlowJobName) + err = srcConn.PullFlowCleanup(ctx, req.FlowJobName) + if err != nil { + pullCleanupErr := fmt.Errorf("[DropFlowSource] failed to clean up source: %w", err) + a.Alerter.LogFlowError(ctx, req.FlowJobName, pullCleanupErr) + return pullCleanupErr + } + + return nil } func (a *FlowableActivity) DropFlowDestination(ctx context.Context, req *protos.DropFlowActivityInput) error { ctx = context.WithValue(ctx, shared.FlowNameKey, req.FlowJobName) dstConn, err := connectors.GetByNameAs[connectors.CDCSyncConnector](ctx, nil, a.CatalogPool, req.PeerName) if err != nil { - return fmt.Errorf("failed to get destination connector: %w", err) + dstConnErr := fmt.Errorf("[DropFlowDestination] failed to get destination connector: %w", err) + a.Alerter.LogFlowError(ctx, req.FlowJobName, dstConnErr) + return dstConnErr } defer connectors.CloseConnector(ctx, dstConn) - return dstConn.SyncFlowCleanup(ctx, req.FlowJobName) + err = dstConn.SyncFlowCleanup(ctx, req.FlowJobName) + if err != nil { + syncFlowCleanupErr := fmt.Errorf("[DropFlowDestination] failed to clean up destination: %w", err) + a.Alerter.LogFlowError(ctx, req.FlowJobName, syncFlowCleanupErr) + return syncFlowCleanupErr + } + + return nil } func (a *FlowableActivity) SendWALHeartbeat(ctx context.Context) error { @@ -669,7 +689,7 @@ func (a *FlowableActivity) SendWALHeartbeat(ctx context.Context) error { func() { pgConfig := pgPeer.GetPostgresConfig() - pgConn, peerErr := connpostgres.NewPostgresConnector(ctx, pgConfig) + pgConn, peerErr := connpostgres.NewPostgresConnector(ctx, nil, pgConfig) if peerErr != nil { logger.Error(fmt.Sprintf("error creating connector for postgres peer %s with host %s: %v", pgPeer.Name, pgConfig.Host, peerErr)) @@ -767,6 +787,17 @@ func (a *FlowableActivity) RecordSlotSizes(ctx context.Context) error { return } slotMetricGauges.OpenReplicationConnectionsGauge = openReplicationConnectionsGauge + + intervalSinceLastNormalizeGauge, err := otel_metrics.GetOrInitFloat64SyncGauge(a.OtelManager.Meter, + a.OtelManager.Float64GaugesCache, + peerdb_gauges.BuildGaugeName(peerdb_gauges.IntervalSinceLastNormalizeGaugeName), + metric.WithUnit("s"), + metric.WithDescription("Interval since last normalize")) + if err != nil { + logger.Error("Failed to get interval since last normalize gauge", slog.Any("error", err)) + return + } + slotMetricGauges.IntervalSinceLastNormalizeGauge = intervalSinceLastNormalizeGauge } if err := srcConn.HandleSlotInfo(ctx, a.Alerter, a.CatalogPool, &alerting.AlertKeys{ @@ -993,7 +1024,7 @@ func (a *FlowableActivity) RemoveTablesFromRawTable( // we can ignore the error return nil } - return fmt.Errorf("[RemoveTablesFromRawTable]:failed to get destination connector: %w", err) + return fmt.Errorf("[RemoveTablesFromRawTable] failed to get destination connector: %w", err) } defer connectors.CloseConnector(ctx, dstConn) diff --git a/flow/activities/flowable_core.go b/flow/activities/flowable_core.go index bb75daa6d..aabf8776c 100644 --- a/flow/activities/flowable_core.go +++ b/flow/activities/flowable_core.go @@ -144,6 +144,7 @@ func syncCore[TPull connectors.CDCPullConnectorCore, TSync connectors.CDCSyncCon return dstConn.GetLastOffset(ctx, config.FlowJobName) }() if err != nil { + a.Alerter.LogFlowError(ctx, flowName, err) return nil, err } @@ -352,7 +353,7 @@ func (a *FlowableActivity) getPostgresPeerConfigs(ctx context.Context) ([]*proto return nil, err } - peerOptions, err := peerdbenv.Decrypt(encKeyID, encPeerOptions) + peerOptions, err := peerdbenv.Decrypt(ctx, encKeyID, encPeerOptions) if err != nil { return nil, err } diff --git a/flow/alerting/alerting.go b/flow/alerting/alerting.go index 464affff0..8d4163b08 100644 --- a/flow/alerting/alerting.go +++ b/flow/alerting/alerting.go @@ -21,8 +21,9 @@ import ( // alerting service, no cool name :( type Alerter struct { - catalogPool *pgxpool.Pool - telemetrySender telemetry.Sender + CatalogPool *pgxpool.Pool + snsTelemetrySender telemetry.Sender + incidentIoTelemetrySender telemetry.Sender } type AlertSenderConfig struct { @@ -38,7 +39,7 @@ type AlertKeys struct { } func (a *Alerter) registerSendersFromPool(ctx context.Context) ([]AlertSenderConfig, error) { - rows, err := a.catalogPool.Query(ctx, + rows, err := a.CatalogPool.Query(ctx, `SELECT id, service_type, @@ -50,7 +51,7 @@ func (a *Alerter) registerSendersFromPool(ctx context.Context) ([]AlertSenderCon return nil, fmt.Errorf("failed to read alerter config from catalog: %w", err) } - keys := peerdbenv.PeerDBEncKeys() + keys := peerdbenv.PeerDBEncKeys(ctx) return pgx.CollectRows(rows, func(row pgx.CollectableRow) (AlertSenderConfig, error) { var alertSenderConfig AlertSenderConfig var serviceType ServiceType @@ -126,14 +127,31 @@ func NewAlerter(ctx context.Context, catalogPool *pgxpool.Pool) *Alerter { snsMessageSender, err = telemetry.NewSNSMessageSenderWithNewClient(ctx, &telemetry.SNSMessageSenderConfig{ Topic: snsTopic, }) - logger.LoggerFromCtx(ctx).Info("Successfully registered telemetry sender") + logger.LoggerFromCtx(ctx).Info("Successfully registered sns telemetry sender") if err != nil { panic(fmt.Sprintf("unable to setup telemetry is nil for Alerter %+v", err)) } } + + incidentIoURL := peerdbenv.PeerDBGetIncidentIoUrl() + incidentIoAuth := peerdbenv.PeerDBGetIncidentIoToken() + var incidentIoTelemetrySender telemetry.Sender + if incidentIoURL != "" && incidentIoAuth != "" { + var err error + incidentIoTelemetrySender, err = telemetry.NewIncidentIoMessageSender(ctx, telemetry.IncidentIoMessageSenderConfig{ + URL: incidentIoURL, + Token: incidentIoAuth, + }) + logger.LoggerFromCtx(ctx).Info("Successfully registered incident.io telemetry sender") + if err != nil { + panic(fmt.Sprintf("unable to setup incident.io telemetry is nil for Alerter %+v", err)) + } + } + return &Alerter{ - catalogPool: catalogPool, - telemetrySender: snsMessageSender, + CatalogPool: catalogPool, + snsTelemetrySender: snsMessageSender, + incidentIoTelemetrySender: incidentIoTelemetrySender, } } @@ -172,17 +190,29 @@ func (a *Alerter) AlertIfSlotLag(ctx context.Context, alertKeys *AlertKeys, slot `currently at %.2fMB!`, deploymentUIDPrefix, slotInfo.SlotName, alertKeys.PeerName, slotInfo.LagInMb) badWalStatusAlertKey := fmt.Sprintf("%s Bad WAL Status for Peer %s", deploymentUIDPrefix, alertKeys.PeerName) - badWalStatusAlertMessageTemplate := fmt.Sprintf("%sSlot `%s` on peer `%s` has bad WAL status: `%s`", + badWalStatusAlertMessage := fmt.Sprintf("%sSlot `%s` on peer `%s` has bad WAL status: `%s`", deploymentUIDPrefix, slotInfo.SlotName, alertKeys.PeerName, slotInfo.WalStatus) for _, alertSenderConfig := range alertSendersForMirrors { - if slotInfo.LagInMb > float32(lowestSlotLagMBAlertThreshold) { - a.alertToProvider(ctx, alertSenderConfig, thresholdAlertKey, - fmt.Sprintf(thresholdAlertMessageTemplate, defaultSlotLagMBAlertThreshold)) + if a.checkAndAddAlertToCatalog(ctx, + alertSenderConfig.Id, thresholdAlertKey, + fmt.Sprintf(thresholdAlertMessageTemplate, lowestSlotLagMBAlertThreshold)) { + if alertSenderConfig.Sender.getSlotLagMBAlertThreshold() > 0 { + if slotInfo.LagInMb > float32(alertSenderConfig.Sender.getSlotLagMBAlertThreshold()) { + a.alertToProvider(ctx, alertSenderConfig, thresholdAlertKey, + fmt.Sprintf(thresholdAlertMessageTemplate, alertSenderConfig.Sender.getSlotLagMBAlertThreshold())) + } + } else { + if slotInfo.LagInMb > float32(defaultSlotLagMBAlertThreshold) { + a.alertToProvider(ctx, alertSenderConfig, thresholdAlertKey, + fmt.Sprintf(thresholdAlertMessageTemplate, defaultSlotLagMBAlertThreshold)) + } + } } - if slotInfo.WalStatus == "lost" || slotInfo.WalStatus == "unreserved" { - a.alertToProvider(ctx, alertSenderConfig, badWalStatusAlertKey, badWalStatusAlertMessageTemplate) + if (slotInfo.WalStatus == "lost" || slotInfo.WalStatus == "unreserved") && + a.checkAndAddAlertToCatalog(ctx, alertSenderConfig.Id, badWalStatusAlertKey, badWalStatusAlertMessage) { + a.alertToProvider(ctx, alertSenderConfig, badWalStatusAlertKey, badWalStatusAlertMessage) } } } @@ -192,7 +222,7 @@ func (a *Alerter) AlertIfOpenConnections(ctx context.Context, alertKeys *AlertKe ) { alertSenderConfigs, err := a.registerSendersFromPool(ctx) if err != nil { - logger.LoggerFromCtx(ctx).Warn("failed to set Slack senders", slog.Any("error", err)) + logger.LoggerFromCtx(ctx).Warn("failed to set alert senders", slog.Any("error", err)) return } @@ -244,6 +274,49 @@ func (a *Alerter) AlertIfOpenConnections(ctx context.Context, alertKeys *AlertKe } } +func (a *Alerter) AlertIfTooLongSinceLastNormalize(ctx context.Context, alertKeys *AlertKeys, + intervalSinceLastNormalize time.Duration, +) { + intervalSinceLastNormalizeThreshold, err := peerdbenv.PeerDBIntervalSinceLastNormalizeThresholdMinutes(ctx, nil) + if err != nil { + logger.LoggerFromCtx(ctx). + Warn("failed to get interval since last normalize threshold from catalog", slog.Any("error", err)) + } + + if intervalSinceLastNormalizeThreshold == 0 { + logger.LoggerFromCtx(ctx).Info("Alerting disabled via environment variable, returning") + return + } + alertSenderConfigs, err := a.registerSendersFromPool(ctx) + if err != nil { + logger.LoggerFromCtx(ctx).Warn("failed to set alert senders", slog.Any("error", err)) + return + } + + deploymentUIDPrefix := "" + if peerdbenv.PeerDBDeploymentUID() != "" { + deploymentUIDPrefix = fmt.Sprintf("[%s] - ", peerdbenv.PeerDBDeploymentUID()) + } + + if intervalSinceLastNormalize > time.Duration(intervalSinceLastNormalizeThreshold)*time.Minute { + alertKey := fmt.Sprintf("%s Too long since last data normalize for PeerDB mirror %s", + deploymentUIDPrefix, alertKeys.FlowName) + alertMessage := fmt.Sprintf("%sData hasn't been synced to the target for mirror `%s` since the last `%s`."+ + ` This could indicate an issue with the pipeline — please check the UI and logs to confirm.`+ + ` Alternatively, it might be that the source database is idle and not receiving new updates.`, deploymentUIDPrefix, + alertKeys.FlowName, intervalSinceLastNormalize) + + for _, alertSenderConfig := range alertSenderConfigs { + if len(alertSenderConfig.AlertForMirrors) == 0 || + slices.Contains(alertSenderConfig.AlertForMirrors, alertKeys.FlowName) { + if a.checkAndAddAlertToCatalog(ctx, alertSenderConfig.Id, alertKey, alertMessage) { + a.alertToProvider(ctx, alertSenderConfig, alertKey, alertMessage) + } + } + } + } +} + func (a *Alerter) alertToProvider(ctx context.Context, alertSenderConfig AlertSenderConfig, alertKey string, alertMessage string) { err := alertSenderConfig.Sender.sendAlert(ctx, alertKey, alertMessage) if err != nil { @@ -266,7 +339,7 @@ func (a *Alerter) checkAndAddAlertToCatalog(ctx context.Context, alertConfigId i return false } - row := a.catalogPool.QueryRow(ctx, + row := a.CatalogPool.QueryRow(ctx, `SELECT created_timestamp FROM peerdb_stats.alerts_v1 WHERE alert_key=$1 AND alert_config_id=$2 ORDER BY created_timestamp DESC LIMIT 1`, alertKey, alertConfigId) @@ -278,7 +351,7 @@ func (a *Alerter) checkAndAddAlertToCatalog(ctx context.Context, alertConfigId i } if time.Since(createdTimestamp) >= dur { - _, err = a.catalogPool.Exec(ctx, + _, err = a.CatalogPool.Exec(ctx, "INSERT INTO peerdb_stats.alerts_v1(alert_key,alert_message,alert_config_id) VALUES($1,$2,$3)", alertKey, alertMessage, alertConfigId) if err != nil { @@ -295,18 +368,29 @@ func (a *Alerter) checkAndAddAlertToCatalog(ctx context.Context, alertConfigId i } func (a *Alerter) sendTelemetryMessage(ctx context.Context, flowName string, more string, level telemetry.Level) { - if a.telemetrySender != nil { - details := fmt.Sprintf("[%s] %s", flowName, more) - _, err := a.telemetrySender.SendMessage(ctx, details, details, telemetry.Attributes{ - Level: level, - DeploymentUID: peerdbenv.PeerDBDeploymentUID(), - Tags: []string{flowName, peerdbenv.PeerDBDeploymentUID()}, - Type: flowName, - }) + details := fmt.Sprintf("[%s] %s", flowName, more) + attributes := telemetry.Attributes{ + Level: level, + DeploymentUID: peerdbenv.PeerDBDeploymentUID(), + Tags: []string{flowName, peerdbenv.PeerDBDeploymentUID()}, + Type: flowName, + } + + if a.snsTelemetrySender != nil { + _, err := a.snsTelemetrySender.SendMessage(ctx, details, details, attributes) + if err != nil { + logger.LoggerFromCtx(ctx).Warn("failed to send message to snsTelemetrySender", slog.Any("error", err)) + return + } + } + + if a.incidentIoTelemetrySender != nil { + status, err := a.incidentIoTelemetrySender.SendMessage(ctx, details, details, attributes) if err != nil { - logger.LoggerFromCtx(ctx).Warn("failed to send message to telemetrySender", slog.Any("error", err)) + logger.LoggerFromCtx(ctx).Warn("failed to send message to incidentIoTelemetrySender", slog.Any("error", err)) return } + logger.LoggerFromCtx(ctx).Info("received status from incident.io", slog.String("status", *status)) } } @@ -335,7 +419,7 @@ func (a *Alerter) LogFlowError(ctx context.Context, flowName string, err error) logger := logger.LoggerFromCtx(ctx) errorWithStack := fmt.Sprintf("%+v", err) logger.Error(err.Error(), slog.Any("stack", errorWithStack)) - _, err = a.catalogPool.Exec(ctx, + _, err = a.CatalogPool.Exec(ctx, "INSERT INTO peerdb_stats.flow_errors(flow_name,error_message,error_type) VALUES($1,$2,$3)", flowName, errorWithStack, "error") if err != nil { @@ -353,7 +437,7 @@ func (a *Alerter) LogFlowEvent(ctx context.Context, flowName string, info string func (a *Alerter) LogFlowInfo(ctx context.Context, flowName string, info string) { logger := logger.LoggerFromCtx(ctx) logger.Info(info) - _, err := a.catalogPool.Exec(ctx, + _, err := a.CatalogPool.Exec(ctx, "INSERT INTO peerdb_stats.flow_errors(flow_name,error_message,error_type) VALUES($1,$2,$3)", flowName, info, "info") if err != nil { diff --git a/flow/auth/middleware.go b/flow/auth/middleware.go index e3fdfabb7..bb3ee34da 100644 --- a/flow/auth/middleware.go +++ b/flow/auth/middleware.go @@ -68,24 +68,35 @@ func AuthGrpcMiddleware(unauthenticatedMethods []string) ([]grpc.ServerOption, e for _, method := range unauthenticatedMethods { unauthenticatedMethodsMap[method] = struct{}{} } - return []grpc.ServerOption{ grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + slog.Info("Received gRPC request", slog.String("method", info.FullMethod)) + if _, unauthorized := unauthenticatedMethodsMap[info.FullMethod]; !unauthorized { var authHeader string authHeaders := metadata.ValueFromIncomingContext(ctx, "Authorization") if len(authHeaders) == 1 { authHeader = authHeaders[0] } else if len(authHeaders) > 1 { + slog.Warn("Multiple Authorization headers supplied, request rejected", slog.String("method", info.FullMethod)) return nil, status.Errorf(codes.Unauthenticated, "multiple Authorization headers supplied, request rejected") } _, err := validateRequestToken(authHeader, cfg.OauthJwtCustomClaims, ip...) if err != nil { - slog.Debug("failed to validate request token", slog.Any("error", err)) + slog.Debug("Failed to validate request token", slog.String("method", info.FullMethod), slog.Any("error", err)) return nil, status.Errorf(codes.Unauthenticated, "%s", err.Error()) } } - return handler(ctx, req) + + resp, err := handler(ctx, req) + + if err != nil { + slog.Error("gRPC request failed", slog.String("method", info.FullMethod), slog.Any("error", err)) + } else { + slog.Info("gRPC request completed successfully", slog.String("method", info.FullMethod)) + } + + return resp, err }), }, nil } diff --git a/flow/cmd/alerts.go b/flow/cmd/alerts.go index 863aeab79..93026652f 100644 --- a/flow/cmd/alerts.go +++ b/flow/cmd/alerts.go @@ -23,7 +23,7 @@ func (h *FlowRequestHandler) GetAlertConfigs(ctx context.Context, req *protos.Ge if err := row.Scan(&config.Id, &config.ServiceType, &serviceConfigPayload, &encKeyID, &config.AlertForMirrors); err != nil { return nil, err } - serviceConfig, err := peerdbenv.Decrypt(encKeyID, serviceConfigPayload) + serviceConfig, err := peerdbenv.Decrypt(ctx, encKeyID, serviceConfigPayload) if err != nil { return nil, err } @@ -38,7 +38,7 @@ func (h *FlowRequestHandler) GetAlertConfigs(ctx context.Context, req *protos.Ge } func (h *FlowRequestHandler) PostAlertConfig(ctx context.Context, req *protos.PostAlertConfigRequest) (*protos.PostAlertConfigResponse, error) { - key, err := peerdbenv.PeerDBCurrentEncKey() + key, err := peerdbenv.PeerDBCurrentEncKey(ctx) if err != nil { return nil, err } diff --git a/flow/cmd/api.go b/flow/cmd/api.go index a0530d9db..04b7eb80d 100644 --- a/flow/cmd/api.go +++ b/flow/cmd/api.go @@ -54,7 +54,7 @@ func recryptDatabase( updateSql string, ) { newKeyID := peerdbenv.PeerDBCurrentEncKeyID() - keys := peerdbenv.PeerDBEncKeys() + keys := peerdbenv.PeerDBEncKeys(ctx) if newKeyID == "" { if len(keys) == 0 { slog.Warn("Encryption disabled. This is not recommended.") @@ -195,7 +195,7 @@ func APIMain(ctx context.Context, args *APIServerParams) error { if peerdbenv.PeerDBTemporalEnableCertAuth() { slog.Info("Using temporal certificate/key for authentication") - certs, err := parseTemporalCertAndKey() + certs, err := parseTemporalCertAndKey(ctx) if err != nil { return fmt.Errorf("unable to base64 decode certificate and key: %w", err) } diff --git a/flow/cmd/cert.go b/flow/cmd/cert.go index e1a1d3e7f..60c36c8df 100644 --- a/flow/cmd/cert.go +++ b/flow/cmd/cert.go @@ -1,19 +1,20 @@ package cmd import ( + "context" "crypto/tls" "fmt" "github.com/PeerDB-io/peer-flow/peerdbenv" ) -func parseTemporalCertAndKey() ([]tls.Certificate, error) { - certBytes, err := peerdbenv.PeerDBTemporalClientCert() +func parseTemporalCertAndKey(ctx context.Context) ([]tls.Certificate, error) { + certBytes, err := peerdbenv.PeerDBTemporalClientCert(ctx) if err != nil { return nil, fmt.Errorf("unable to get temporal certificate: %w", err) } - keyBytes, err := peerdbenv.PeerDBTemporalClientKey() + keyBytes, err := peerdbenv.PeerDBTemporalClientKey(ctx) if err != nil { return nil, fmt.Errorf("unable to get temporal key: %w", err) } diff --git a/flow/cmd/handler.go b/flow/cmd/handler.go index 2b48a2926..d9f5c27d9 100644 --- a/flow/cmd/handler.go +++ b/flow/cmd/handler.go @@ -140,11 +140,9 @@ func (h *FlowRequestHandler) CreateCDCFlow( workflowID := fmt.Sprintf("%s-peerflow-%s", cfg.FlowJobName, uuid.New()) workflowOptions := client.StartWorkflowOptions{ - ID: workflowID, - TaskQueue: h.peerflowTaskQueueID, - SearchAttributes: map[string]interface{}{ - shared.MirrorNameSearchAttribute: cfg.FlowJobName, - }, + ID: workflowID, + TaskQueue: h.peerflowTaskQueueID, + TypedSearchAttributes: shared.NewSearchAttributes(cfg.FlowJobName), } err := h.createCdcJobEntry(ctx, req, workflowID) @@ -208,11 +206,9 @@ func (h *FlowRequestHandler) CreateQRepFlow( cfg := req.QrepConfig workflowID := fmt.Sprintf("%s-qrepflow-%s", cfg.FlowJobName, uuid.New()) workflowOptions := client.StartWorkflowOptions{ - ID: workflowID, - TaskQueue: h.peerflowTaskQueueID, - SearchAttributes: map[string]interface{}{ - shared.MirrorNameSearchAttribute: cfg.FlowJobName, - }, + ID: workflowID, + TaskQueue: h.peerflowTaskQueueID, + TypedSearchAttributes: shared.NewSearchAttributes(cfg.FlowJobName), } if req.CreateCatalogEntry { if err := h.createQRepJobEntry(ctx, req, workflowID); err != nil { @@ -290,8 +286,7 @@ func (h *FlowRequestHandler) shutdownFlow( slog.String("workflowId", workflowID), ) - err = h.handleCancelWorkflow(ctx, workflowID, "") - if err != nil { + if err := h.handleCancelWorkflow(ctx, workflowID, ""); err != nil { slog.Error("unable to cancel workflow", logs, slog.Any("error", err)) return fmt.Errorf("unable to wait for PeerFlow workflow to close: %w", err) } @@ -308,11 +303,9 @@ func (h *FlowRequestHandler) shutdownFlow( } workflowID := fmt.Sprintf("%s-dropflow-%s", flowJobName, uuid.New()) workflowOptions := client.StartWorkflowOptions{ - ID: workflowID, - TaskQueue: h.peerflowTaskQueueID, - SearchAttributes: map[string]interface{}{ - shared.MirrorNameSearchAttribute: flowJobName, - }, + ID: workflowID, + TaskQueue: h.peerflowTaskQueueID, + TypedSearchAttributes: shared.NewSearchAttributes(flowJobName), } dropFlowHandle, err := h.temporalClient.ExecuteWorkflow(ctx, workflowOptions, @@ -323,9 +316,7 @@ func (h *FlowRequestHandler) shutdownFlow( DropFlowStats: deleteStats, }) if err != nil { - slog.Error("unable to start DropFlow workflow", - logs, - slog.Any("error", err)) + slog.Error("unable to start DropFlow workflow", logs, slog.Any("error", err)) return fmt.Errorf("unable to start DropFlow workflow: %w", err) } @@ -340,10 +331,7 @@ func (h *FlowRequestHandler) shutdownFlow( select { case err := <-errChan: if err != nil { - slog.Error("DropFlow workflow did not execute successfully", - logs, - slog.Any("error", err), - ) + slog.Error("DropFlow workflow did not execute successfully", logs, slog.Any("error", err)) return fmt.Errorf("DropFlow workflow did not execute successfully: %w", err) } case <-time.After(5 * time.Minute): @@ -355,10 +343,7 @@ func (h *FlowRequestHandler) shutdownFlow( } if err := h.removeFlowEntryInCatalog(ctx, flowJobName); err != nil { - slog.Error("unable to remove flow job entry", - slog.String(string(shared.FlowNameKey), flowJobName), - slog.Any("error", err), - slog.String("workflowId", workflowID)) + slog.Error("unable to remove flow job entry", logs, slog.Any("error", err)) return err } @@ -369,20 +354,21 @@ func (h *FlowRequestHandler) FlowStateChange( ctx context.Context, req *protos.FlowStateChangeRequest, ) (*protos.FlowStateChangeResponse, error) { - slog.Info("FlowStateChange called", slog.String("flowJobName", req.FlowJobName), slog.Any("req", req)) + logs := slog.String("flowJobName", req.FlowJobName) + slog.Info("FlowStateChange called", logs, slog.Any("req", req)) workflowID, err := h.getWorkflowID(ctx, req.FlowJobName) if err != nil { - slog.Error("[flow-state-change]unable to get workflowID", slog.Any("error", err)) + slog.Error("[flow-state-change] unable to get workflowID", logs, slog.Any("error", err)) return nil, err } currState, err := h.getWorkflowStatus(ctx, workflowID) if err != nil { - slog.Error("[flow-state-change]unable to get workflow status", slog.Any("error", err)) + slog.Error("[flow-state-change] unable to get workflow status", logs, slog.Any("error", err)) return nil, err } if req.FlowConfigUpdate != nil && req.FlowConfigUpdate.GetCdcFlowConfigUpdate() != nil { - err = model.CDCDynamicPropertiesSignal.SignalClientWorkflow( + err := model.CDCDynamicPropertiesSignal.SignalClientWorkflow( ctx, h.temporalClient, workflowID, @@ -390,7 +376,7 @@ func (h *FlowRequestHandler) FlowStateChange( req.FlowConfigUpdate.GetCdcFlowConfigUpdate(), ) if err != nil { - slog.Error("unable to signal workflow", slog.Any("error", err)) + slog.Error("unable to signal workflow", logs, slog.Any("error", err)) return nil, fmt.Errorf("unable to signal workflow: %w", err) } } @@ -398,7 +384,7 @@ func (h *FlowRequestHandler) FlowStateChange( if req.RequestedFlowState != protos.FlowStatus_STATUS_UNKNOWN { if req.RequestedFlowState == protos.FlowStatus_STATUS_PAUSED && currState == protos.FlowStatus_STATUS_RUNNING { - slog.Info("[flow-state-change]: received pause request") + slog.Info("[flow-state-change] received pause request", logs) err = model.FlowSignal.SignalClientWorkflow( ctx, h.temporalClient, @@ -408,7 +394,7 @@ func (h *FlowRequestHandler) FlowStateChange( ) } else if req.RequestedFlowState == protos.FlowStatus_STATUS_RUNNING && currState == protos.FlowStatus_STATUS_PAUSED { - slog.Info("[flow-state-change]: received resume request") + slog.Info("[flow-state-change] received resume request", logs) err = model.FlowSignal.SignalClientWorkflow( ctx, h.temporalClient, @@ -418,16 +404,16 @@ func (h *FlowRequestHandler) FlowStateChange( ) } else if req.RequestedFlowState == protos.FlowStatus_STATUS_TERMINATED && (currState != protos.FlowStatus_STATUS_TERMINATED) { - slog.Info("[flow-state-change]: received drop mirror request") + slog.Info("[flow-state-change] received drop mirror request", logs) err = h.shutdownFlow(ctx, req.FlowJobName, req.DropMirrorStats) } else if req.RequestedFlowState != currState { - slog.Error("illegal state change requested", slog.Any("requestedFlowState", req.RequestedFlowState), + slog.Error("illegal state change requested", logs, slog.Any("requestedFlowState", req.RequestedFlowState), slog.Any("currState", currState)) return nil, fmt.Errorf("illegal state change requested: %v, current state is: %v", req.RequestedFlowState, currState) } if err != nil { - slog.Error("unable to signal workflow", slog.Any("error", err)) + slog.Error("unable to signal workflow", logs, slog.Any("error", err)) return nil, fmt.Errorf("unable to signal workflow: %w", err) } } @@ -438,11 +424,9 @@ func (h *FlowRequestHandler) FlowStateChange( func (h *FlowRequestHandler) handleCancelWorkflow(ctx context.Context, workflowID, runID string) error { errChan := make(chan error, 1) - // Create a new context with timeout for CancelWorkflow ctxWithTimeout, cancel := context.WithTimeout(ctx, 2*time.Minute) defer cancel() - // Call CancelWorkflow in a goroutine go func() { err := h.temporalClient.CancelWorkflow(ctxWithTimeout, workflowID, runID) errChan <- err @@ -458,8 +442,7 @@ func (h *FlowRequestHandler) handleCancelWorkflow(ctx context.Context, workflowI } } case <-time.After(1 * time.Minute): - // If 1 minute has passed and we haven't received an error, terminate the workflow - slog.Error("Timeout reached while trying to cancel PeerFlow workflow. Attempting to terminate.") + slog.Error("Timeout reached while trying to cancel PeerFlow workflow. Attempting to terminate.", slog.String("workflowId", workflowID)) terminationReason := fmt.Sprintf("workflow %s did not cancel in time.", workflowID) if err := h.temporalClient.TerminateWorkflow(ctx, workflowID, runID, terminationReason); err != nil { return fmt.Errorf("unable to terminate PeerFlow workflow: %w", err) diff --git a/flow/cmd/mirror_status.go b/flow/cmd/mirror_status.go index abf6c95f8..70efa7597 100644 --- a/flow/cmd/mirror_status.go +++ b/flow/cmd/mirror_status.go @@ -507,6 +507,10 @@ func (h *FlowRequestHandler) GetCDCBatches(ctx context.Context, req *protos.GetC return nil, err } + if batches == nil { + batches = []*protos.CDCBatch{} + } + return &protos.GetCDCBatchesResponse{ CdcBatches: batches, }, nil @@ -545,6 +549,10 @@ func (h *FlowRequestHandler) CDCTableTotalCounts( if err != nil { return nil, err } + + if tableCounts == nil { + tableCounts = []*protos.CDCTableRowCounts{} + } return &protos.CDCTableTotalCountsResponse{TotalData: &totalCount, TablesData: tableCounts}, nil } diff --git a/flow/cmd/peer_data.go b/flow/cmd/peer_data.go index 556fba6f8..3c23fb5f1 100644 --- a/flow/cmd/peer_data.go +++ b/flow/cmd/peer_data.go @@ -45,7 +45,7 @@ func (h *FlowRequestHandler) getPGPeerConfig(ctx context.Context, peerName strin return nil, err } - peerOptions, err := peerdbenv.Decrypt(encKeyID, encPeerOptions) + peerOptions, err := peerdbenv.Decrypt(ctx, encKeyID, encPeerOptions) if err != nil { return nil, fmt.Errorf("failed to load peer: %w", err) } @@ -91,12 +91,12 @@ func (h *FlowRequestHandler) GetPeerInfo( var version string versionConnector, err := connectors.GetAs[connectors.GetVersionConnector](ctx, nil, peer) if err != nil && !errors.Is(err, errors.ErrUnsupported) { - return nil, errors.New("failed to get version connector") + slog.Error("failed to get version connector", slog.Any("error", err)) } if versionConnector != nil { version, err = versionConnector.GetVersion(ctx) if err != nil { - return nil, errors.New("failed to get version") + slog.Error("failed to get version", slog.Any("error", err)) } } @@ -337,7 +337,7 @@ func (h *FlowRequestHandler) GetSlotInfo( return nil, err } - pgConnector, err := connpostgres.NewPostgresConnector(ctx, pgConfig) + pgConnector, err := connpostgres.NewPostgresConnector(ctx, nil, pgConfig) if err != nil { slog.Error("Failed to create postgres connector", slog.Any("error", err)) return nil, err diff --git a/flow/cmd/settings.go b/flow/cmd/settings.go index d351aea4f..12e072859 100644 --- a/flow/cmd/settings.go +++ b/flow/cmd/settings.go @@ -19,7 +19,7 @@ func (h *FlowRequestHandler) GetDynamicSettings( ) (*protos.GetDynamicSettingsResponse, error) { rows, err := h.pool.Query(ctx, "select config_name,config_value from dynamic_settings") if err != nil { - slog.Error("[GetDynamicConfigs]: failed to query settings", slog.Any("error", err)) + slog.Error("[GetDynamicConfigs] failed to query settings", slog.Any("error", err)) return nil, err } settings := slices.Clone(peerdbenv.DynamicSettings[:]) @@ -33,7 +33,7 @@ func (h *FlowRequestHandler) GetDynamicSettings( } return nil }); err != nil { - slog.Error("[GetDynamicConfigs]: failed to collect rows", slog.Any("error", err)) + slog.Error("[GetDynamicConfigs] failed to collect rows", slog.Any("error", err)) return nil, err } @@ -58,7 +58,7 @@ func (h *FlowRequestHandler) PostDynamicSetting( _, err := h.pool.Exec(ctx, `insert into dynamic_settings (config_name, config_value) values ($1, $2) on conflict (config_name) do update set config_value = $2`, req.Name, req.Value) if err != nil { - slog.Error("[PostDynamicConfig]: failed to execute update setting", slog.Any("error", err)) + slog.Error("[PostDynamicConfig] failed to execute update setting", slog.Any("error", err)) return nil, err } return &protos.PostDynamicSettingResponse{}, nil diff --git a/flow/cmd/snapshot_worker.go b/flow/cmd/snapshot_worker.go index d7cd70191..f32a39cda 100644 --- a/flow/cmd/snapshot_worker.go +++ b/flow/cmd/snapshot_worker.go @@ -32,7 +32,7 @@ func SnapshotWorkerMain(opts *SnapshotWorkerOptions) (client.Client, worker.Work if peerdbenv.PeerDBTemporalEnableCertAuth() { slog.Info("Using temporal certificate/key for authentication") - certs, err := parseTemporalCertAndKey() + certs, err := parseTemporalCertAndKey(context.Background()) if err != nil { return nil, nil, fmt.Errorf("unable to process certificate and key: %w", err) } diff --git a/flow/cmd/validate_mirror.go b/flow/cmd/validate_mirror.go index 99c9e9837..3e870aa66 100644 --- a/flow/cmd/validate_mirror.go +++ b/flow/cmd/validate_mirror.go @@ -55,7 +55,7 @@ func (h *FlowRequestHandler) ValidateCDCMirror( return nil, errors.New("source peer config is not postgres") } - pgPeer, err := connpostgres.NewPostgresConnector(ctx, sourcePeerConfig) + pgPeer, err := connpostgres.NewPostgresConnector(ctx, nil, sourcePeerConfig) if err != nil { displayErr := fmt.Errorf("failed to create postgres connector: %v", err) h.alerter.LogNonFlowWarning(ctx, telemetry.CreateMirror, req.ConnectionConfigs.FlowJobName, displayErr.Error()) diff --git a/flow/cmd/worker.go b/flow/cmd/worker.go index 7236c1091..4521ba3ca 100644 --- a/flow/cmd/worker.go +++ b/flow/cmd/worker.go @@ -92,7 +92,7 @@ func WorkerSetup(opts *WorkerSetupOptions) (*workerSetupResponse, error) { if peerdbenv.PeerDBTemporalEnableCertAuth() { slog.Info("Using temporal certificate/key for authentication") - certs, err := parseTemporalCertAndKey() + certs, err := parseTemporalCertAndKey(context.Background()) if err != nil { return nil, fmt.Errorf("unable to process certificate and key: %w", err) } diff --git a/flow/connectors/bigquery/bigquery.go b/flow/connectors/bigquery/bigquery.go index 094eaf05a..3b113410c 100644 --- a/flow/connectors/bigquery/bigquery.go +++ b/flow/connectors/bigquery/bigquery.go @@ -232,7 +232,7 @@ func (c *BigQueryConnector) ReplayTableSchemaDeltas( addedColumnBigQueryType := qValueKindToBigQueryTypeString(addedColumn, schemaDelta.NullableEnabled, false) query := c.queryWithLogging(fmt.Sprintf( - "ALTER TABLE %s ADD COLUMN IF NOT EXISTS `%s` %s", + "ALTER TABLE `%s` ADD COLUMN IF NOT EXISTS `%s` %s", dstDatasetTable.table, addedColumn.Name, addedColumnBigQueryType)) query.DefaultProjectID = c.projectID query.DefaultDatasetID = dstDatasetTable.dataset diff --git a/flow/connectors/clickhouse/clickhouse.go b/flow/connectors/clickhouse/clickhouse.go index aa3816a38..6328708b7 100644 --- a/flow/connectors/clickhouse/clickhouse.go +++ b/flow/connectors/clickhouse/clickhouse.go @@ -372,7 +372,7 @@ func (c *ClickHouseConnector) checkTablesEmptyAndEngine(ctx context.Context, tab queryInput = append(queryInput, table) } rows, err := c.query(ctx, - fmt.Sprintf("SELECT name,engine,total_rows FROM system.tables WHERE database=? AND table IN (%s)", + fmt.Sprintf("SELECT name,engine,total_rows FROM system.tables WHERE database=? AND name IN (%s)", strings.Join(slices.Repeat([]string{"?"}, len(tables)), ",")), queryInput...) if err != nil { return fmt.Errorf("failed to get information for destination tables: %w", err) diff --git a/flow/connectors/clickhouse/normalize.go b/flow/connectors/clickhouse/normalize.go index 372a69134..6e0bfb971 100644 --- a/flow/connectors/clickhouse/normalize.go +++ b/flow/connectors/clickhouse/normalize.go @@ -200,12 +200,16 @@ func getOrderedOrderByColumns( orderby := make([]*protos.ColumnSetting, 0) if tableMapping != nil { for _, col := range tableMapping.Columns { - if col.Ordering > 0 && !slices.Contains(pkeys, col.SourceName) { + if col.Ordering > 0 { orderby = append(orderby, col) } } } + if len(orderby) == 0 { + return pkeys + } + slices.SortStableFunc(orderby, func(a *protos.ColumnSetting, b *protos.ColumnSetting) int { return cmp.Compare(a.Ordering, b.Ordering) }) @@ -215,10 +219,6 @@ func getOrderedOrderByColumns( orderbyColumns[idx] = getColName(colNameMap, col.SourceName) } - // Typically primary keys are not what aggregates are performed on and hence - // having them at the start of the order by clause is not beneficial. - orderbyColumns = append(orderbyColumns, pkeys...) - return orderbyColumns } diff --git a/flow/connectors/core.go b/flow/connectors/core.go index ba2524bd3..f55385ff4 100644 --- a/flow/connectors/core.go +++ b/flow/connectors/core.go @@ -294,7 +294,7 @@ func LoadPeer(ctx context.Context, catalogPool *pgxpool.Pool, peerName string) ( return nil, fmt.Errorf("failed to load peer: %w", err) } - peerOptions, err := peerdbenv.Decrypt(encKeyID, encPeerOptions) + peerOptions, err := peerdbenv.Decrypt(ctx, encKeyID, encPeerOptions) if err != nil { return nil, fmt.Errorf("failed to load peer: %w", err) } @@ -382,7 +382,7 @@ func LoadPeer(ctx context.Context, catalogPool *pgxpool.Pool, peerName string) ( func GetConnector(ctx context.Context, env map[string]string, config *protos.Peer) (Connector, error) { switch inner := config.Config.(type) { case *protos.Peer_PostgresConfig: - return connpostgres.NewPostgresConnector(ctx, inner.PostgresConfig) + return connpostgres.NewPostgresConnector(ctx, env, inner.PostgresConfig) case *protos.Peer_BigqueryConfig: return connbigquery.NewBigQueryConnector(ctx, inner.BigqueryConfig) case *protos.Peer_SnowflakeConfig: diff --git a/flow/connectors/postgres/cdc.go b/flow/connectors/postgres/cdc.go index 9f43cd659..b21484ca1 100644 --- a/flow/connectors/postgres/cdc.go +++ b/flow/connectors/postgres/cdc.go @@ -680,7 +680,7 @@ func processMessage[Items model.Items]( }, nil default: - logger.Warn(fmt.Sprintf("%T not supported", msg)) + logger.Debug(fmt.Sprintf("%T not supported", msg)) } return nil, nil diff --git a/flow/connectors/postgres/client.go b/flow/connectors/postgres/client.go index 0ce9c5049..0d163f173 100644 --- a/flow/connectors/postgres/client.go +++ b/flow/connectors/postgres/client.go @@ -370,7 +370,7 @@ func (c *PostgresConnector) createSlotAndPublication( parsedSrcTableName, err := utils.ParseSchemaTable(srcTableName) if err != nil { signal.SlotCreated <- SlotCreationResult{ - Err: fmt.Errorf("[publication-creation]:source table identifier %s is invalid", srcTableName), + Err: fmt.Errorf("[publication-creation] source table identifier %s is invalid", srcTableName), } return } diff --git a/flow/connectors/postgres/postgres.go b/flow/connectors/postgres/postgres.go index 6240991f6..45730a87d 100644 --- a/flow/connectors/postgres/postgres.go +++ b/flow/connectors/postgres/postgres.go @@ -37,7 +37,6 @@ type PostgresConnector struct { config *protos.PostgresConfig ssh *SSHTunnel conn *pgx.Conn - replConfig *pgx.ConnConfig replConn *pgx.Conn replState *ReplState customTypesMapping map[uint32]string @@ -55,18 +54,23 @@ type ReplState struct { LastOffset atomic.Int64 } -func NewPostgresConnector(ctx context.Context, pgConfig *protos.PostgresConfig) (*PostgresConnector, error) { +func NewPostgresConnector(ctx context.Context, env map[string]string, pgConfig *protos.PostgresConfig) (*PostgresConnector, error) { logger := logger.LoggerFromCtx(ctx) - connectionString := shared.GetPGConnectionString(pgConfig) + flowNameInApplicationName, err := peerdbenv.PeerDBApplicationNamePerMirrorName(ctx, nil) + if err != nil { + logger.Error("Failed to get flow name from application name", slog.Any("error", err)) + } + var flowName string + if flowNameInApplicationName { + flowName, _ = ctx.Value(shared.FlowNameKey).(string) + } + connectionString := shared.GetPGConnectionString(pgConfig, flowName) - // create a separate connection pool for non-replication queries as replication connections cannot - // be used for extended query protocol, i.e. prepared statements connConfig, err := pgx.ParseConfig(connectionString) if err != nil { return nil, fmt.Errorf("failed to parse connection string: %w", err) } - replConfig := connConfig.Copy() runtimeParams := connConfig.Config.RuntimeParams runtimeParams["idle_in_transaction_session_timeout"] = "0" runtimeParams["statement_timeout"] = "0" @@ -83,11 +87,6 @@ func NewPostgresConnector(ctx context.Context, pgConfig *protos.PostgresConfig) return nil, fmt.Errorf("failed to create connection: %w", err) } - // ensure that replication is set to database - replConfig.Config.RuntimeParams["replication"] = "database" - replConfig.Config.RuntimeParams["bytea_output"] = "hex" - replConfig.Config.RuntimeParams["intervalstyle"] = "postgres" - customTypeMap, err := shared.GetCustomDataTypes(ctx, conn) if err != nil { logger.Error("failed to get custom type map", slog.Any("error", err)) @@ -104,7 +103,7 @@ func NewPostgresConnector(ctx context.Context, pgConfig *protos.PostgresConfig) config: pgConfig, ssh: tunnel, conn: conn, - replConfig: replConfig, + replConn: nil, replState: nil, replLock: sync.Mutex{}, customTypesMapping: customTypeMap, @@ -116,7 +115,22 @@ func NewPostgresConnector(ctx context.Context, pgConfig *protos.PostgresConfig) } func (c *PostgresConnector) CreateReplConn(ctx context.Context) (*pgx.Conn, error) { - conn, err := c.ssh.NewPostgresConnFromConfig(ctx, c.replConfig) + // create a separate connection pool for non-replication queries as replication connections cannot + // be used for extended query protocol, i.e. prepared statements + replConfig, err := pgx.ParseConfig(c.connStr) + if err != nil { + return nil, fmt.Errorf("failed to parse connection string: %w", err) + } + + runtimeParams := replConfig.Config.RuntimeParams + runtimeParams["idle_in_transaction_session_timeout"] = "0" + runtimeParams["statement_timeout"] = "0" + // ensure that replication is set to database + replConfig.Config.RuntimeParams["replication"] = "database" + replConfig.Config.RuntimeParams["bytea_output"] = "hex" + replConfig.Config.RuntimeParams["intervalstyle"] = "postgres" + + conn, err := c.ssh.NewPostgresConnFromConfig(ctx, replConfig) if err != nil { logger.LoggerFromCtx(ctx).Error("failed to create replication connection", "error", err) return nil, fmt.Errorf("failed to create replication connection: %w", err) @@ -1207,6 +1221,25 @@ func (c *PostgresConnector) HandleSlotInfo( attribute.String(peerdb_gauges.PeerNameKey, alertKeys.PeerName), attribute.String(peerdb_gauges.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()))) + var intervalSinceLastNormalize *time.Duration + err = alerter.CatalogPool.QueryRow(ctx, "SELECT now()-max(end_time) FROM peerdb_stats.cdc_batches WHERE flow_name=$1", + alertKeys.FlowName).Scan(&intervalSinceLastNormalize) + if err != nil { + logger.Warn("failed to get interval since last normalize", slog.Any("error", err)) + } + // what if the first normalize errors out/hangs? + if intervalSinceLastNormalize == nil { + logger.Warn("interval since last normalize is nil") + return nil + } + if intervalSinceLastNormalize != nil { + slotMetricGauges.IntervalSinceLastNormalizeGauge.Set(intervalSinceLastNormalize.Seconds(), attribute.NewSet( + attribute.String(peerdb_gauges.FlowNameKey, alertKeys.FlowName), + attribute.String(peerdb_gauges.PeerNameKey, alertKeys.PeerName), + attribute.String(peerdb_gauges.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()))) + alerter.AlertIfTooLongSinceLastNormalize(ctx, alertKeys, *intervalSinceLastNormalize) + } + return monitoring.AppendSlotSizeInfo(ctx, catalogPool, alertKeys.PeerName, slotInfo[0]) } @@ -1389,7 +1422,7 @@ func (c *PostgresConnector) RenameTables( c.logger.Info(fmt.Sprintf("handling soft-deletes for table '%s'...", dst)) _, err = c.execWithLoggingTx(ctx, fmt.Sprintf( - "INSERT INTO %s(%s) SELECT %s,true AS %s FROM %s original_table"+ + "INSERT INTO %s(%s) SELECT %s,true AS %s FROM %s original_table "+ "WHERE NOT EXISTS (SELECT 1 FROM %s resync_table WHERE %s)", src, fmt.Sprintf("%s,%s", allCols, QuoteIdentifier(req.SoftDeleteColName)), allCols, req.SoftDeleteColName, dst, src, pkeyColCompareStr), renameTablesTx) diff --git a/flow/connectors/postgres/postgres_schema_delta_test.go b/flow/connectors/postgres/postgres_schema_delta_test.go index 3ac0d3922..946b20eb3 100644 --- a/flow/connectors/postgres/postgres_schema_delta_test.go +++ b/flow/connectors/postgres/postgres_schema_delta_test.go @@ -25,7 +25,7 @@ type PostgresSchemaDeltaTestSuite struct { func SetupSuite(t *testing.T) PostgresSchemaDeltaTestSuite { t.Helper() - connector, err := NewPostgresConnector(context.Background(), peerdbenv.GetCatalogPostgresConfigFromEnv()) + connector, err := NewPostgresConnector(context.Background(), nil, peerdbenv.GetCatalogPostgresConfigFromEnv(context.Background())) require.NoError(t, err) setupTx, err := connector.conn.Begin(context.Background()) diff --git a/flow/connectors/postgres/qrep_bench_test.go b/flow/connectors/postgres/qrep_bench_test.go index 252b7520e..d880343f4 100644 --- a/flow/connectors/postgres/qrep_bench_test.go +++ b/flow/connectors/postgres/qrep_bench_test.go @@ -11,7 +11,7 @@ func BenchmarkQRepQueryExecutor(b *testing.B) { query := "SELECT * FROM bench.large_table" ctx := context.Background() - connector, err := NewPostgresConnector(ctx, peerdbenv.GetCatalogPostgresConfigFromEnv()) + connector, err := NewPostgresConnector(ctx, nil, peerdbenv.GetCatalogPostgresConfigFromEnv(ctx)) if err != nil { b.Fatalf("failed to create connection: %v", err) } diff --git a/flow/connectors/postgres/qrep_partition_test.go b/flow/connectors/postgres/qrep_partition_test.go index bcafbe2f7..0249b75fc 100644 --- a/flow/connectors/postgres/qrep_partition_test.go +++ b/flow/connectors/postgres/qrep_partition_test.go @@ -64,7 +64,7 @@ func newTestCaseForCTID(schema string, name string, rows uint32, expectedNum int } func TestGetQRepPartitions(t *testing.T) { - connStr := peerdbenv.GetCatalogConnectionStringFromEnv() + connStr := peerdbenv.GetCatalogConnectionStringFromEnv(context.Background()) // Setup the DB config, err := pgx.ParseConfig(connStr) diff --git a/flow/connectors/postgres/qrep_query_executor_test.go b/flow/connectors/postgres/qrep_query_executor_test.go index 187074907..d7932ba00 100644 --- a/flow/connectors/postgres/qrep_query_executor_test.go +++ b/flow/connectors/postgres/qrep_query_executor_test.go @@ -17,7 +17,8 @@ import ( func setupDB(t *testing.T) (*PostgresConnector, string) { t.Helper() - connector, err := NewPostgresConnector(context.Background(), peerdbenv.GetCatalogPostgresConfigFromEnv()) + connector, err := NewPostgresConnector(context.Background(), + nil, peerdbenv.GetCatalogPostgresConfigFromEnv(context.Background())) if err != nil { t.Fatalf("unable to create connector: %v", err) } diff --git a/flow/connectors/postgres/ssh_wrapped_pool.go b/flow/connectors/postgres/ssh_wrapped_pool.go index 15274272e..4e5868ccd 100644 --- a/flow/connectors/postgres/ssh_wrapped_pool.go +++ b/flow/connectors/postgres/ssh_wrapped_pool.go @@ -14,6 +14,7 @@ import ( "github.com/PeerDB-io/peer-flow/connectors/utils" "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/logger" + "github.com/PeerDB-io/peer-flow/peerdbenv" "github.com/PeerDB-io/peer-flow/shared" ) @@ -79,13 +80,21 @@ func (tunnel *SSHTunnel) NewPostgresConnFromPostgresConfig( ctx context.Context, pgConfig *protos.PostgresConfig, ) (*pgx.Conn, error) { - connectionString := shared.GetPGConnectionString(pgConfig) + flowNameInApplicationName, err := peerdbenv.PeerDBApplicationNamePerMirrorName(ctx, nil) + if err != nil { + logger.LoggerFromCtx(ctx).Error("Failed to get flow name from application name", slog.Any("error", err)) + } + + var flowName string + if flowNameInApplicationName { + flowName, _ = ctx.Value(shared.FlowNameKey).(string) + } + connectionString := shared.GetPGConnectionString(pgConfig, flowName) connConfig, err := pgx.ParseConfig(connectionString) if err != nil { return nil, err } - connConfig.RuntimeParams["application_name"] = "peerdb" return tunnel.NewPostgresConnFromConfig(ctx, connConfig) } diff --git a/flow/connectors/postgres/validate.go b/flow/connectors/postgres/validate.go index 0a44f8b25..97adf80af 100644 --- a/flow/connectors/postgres/validate.go +++ b/flow/connectors/postgres/validate.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "strconv" "strings" "github.com/jackc/pgx/v5" @@ -73,7 +72,7 @@ func (c *PostgresConnector) CheckReplicationPermissions(ctx context.Context, use var replicationRes bool err := c.conn.QueryRow(ctx, "SELECT rolreplication FROM pg_roles WHERE rolname = $1", username).Scan(&replicationRes) if err != nil { - if err == pgx.ErrNoRows { + if errors.Is(err, pgx.ErrNoRows) { c.logger.Warn("No rows in pg_roles for user. Skipping rolereplication check", "username", username) } else { @@ -85,7 +84,7 @@ func (c *PostgresConnector) CheckReplicationPermissions(ctx context.Context, use // RDS case: check pg_settings for rds.logical_replication var setting string err := c.conn.QueryRow(ctx, "SELECT setting FROM pg_settings WHERE name = 'rds.logical_replication'").Scan(&setting) - if err != pgx.ErrNoRows { + if !errors.Is(err, pgx.ErrNoRows) { if err != nil || setting != "on" { return errors.New("postgres user does not have replication role") } @@ -104,18 +103,14 @@ func (c *PostgresConnector) CheckReplicationPermissions(ctx context.Context, use } // max_wal_senders must be at least 2 - var maxWalSendersRes string - err = c.conn.QueryRow(ctx, "SHOW max_wal_senders").Scan(&maxWalSendersRes) + var insufficientMaxWalSenders bool + err = c.conn.QueryRow(ctx, + "SELECT setting::int<2 FROM pg_settings WHERE name='max_wal_senders'").Scan(&insufficientMaxWalSenders) if err != nil { return err } - maxWalSenders, err := strconv.Atoi(maxWalSendersRes) - if err != nil { - return err - } - - if maxWalSenders < 2 { + if insufficientMaxWalSenders { return errors.New("max_wal_senders must be at least 2") } diff --git a/flow/connectors/utils/peers.go b/flow/connectors/utils/peers.go index 36838c7bc..dd050b50f 100644 --- a/flow/connectors/utils/peers.go +++ b/flow/connectors/utils/peers.go @@ -104,7 +104,7 @@ func CreatePeerNoValidate( return nil, encodingErr } - encryptedConfig, keyID, err := encryptPeerOptions(encodedConfig) + encryptedConfig, keyID, err := encryptPeerOptions(ctx, encodedConfig) if err != nil { return nil, fmt.Errorf("failed to encrypt peer configuration: %w", err) } @@ -134,8 +134,8 @@ func CreatePeerNoValidate( }, nil } -func encryptPeerOptions(peerOptions []byte) ([]byte, string, error) { - key, err := peerdbenv.PeerDBCurrentEncKey() +func encryptPeerOptions(ctx context.Context, peerOptions []byte) ([]byte, string, error) { + key, err := peerdbenv.PeerDBCurrentEncKey(ctx) if err != nil { return nil, "", fmt.Errorf("failed to get current encryption key: %w", err) } diff --git a/flow/e2e/congen.go b/flow/e2e/congen.go index ccdd4a79e..bcb0bf48f 100644 --- a/flow/e2e/congen.go +++ b/flow/e2e/congen.go @@ -107,7 +107,8 @@ func setupPostgresSchema(t *testing.T, conn *pgx.Conn, suffix string) error { func SetupPostgres(t *testing.T, suffix string) (*connpostgres.PostgresConnector, error) { t.Helper() - connector, err := connpostgres.NewPostgresConnector(context.Background(), peerdbenv.GetCatalogPostgresConfigFromEnv()) + connector, err := connpostgres.NewPostgresConnector(context.Background(), + nil, peerdbenv.GetCatalogPostgresConfigFromEnv(context.Background())) if err != nil { return nil, fmt.Errorf("failed to create postgres connection: %w", err) } @@ -155,7 +156,7 @@ func GeneratePostgresPeer(t *testing.T) *protos.Peer { Name: "catalog", Type: protos.DBType_POSTGRES, Config: &protos.Peer_PostgresConfig{ - PostgresConfig: peerdbenv.GetCatalogPostgresConfigFromEnv(), + PostgresConfig: peerdbenv.GetCatalogPostgresConfigFromEnv(context.Background()), }, } CreatePeer(t, peer) diff --git a/flow/go.mod b/flow/go.mod index 935b9fc15..e71c937b3 100644 --- a/flow/go.mod +++ b/flow/go.mod @@ -6,11 +6,11 @@ require ( cloud.google.com/go v0.116.0 cloud.google.com/go/bigquery v1.63.1 cloud.google.com/go/pubsub v1.44.0 - cloud.google.com/go/storage v1.44.0 + cloud.google.com/go/storage v1.45.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 - github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.2 + github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.3 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.3.0 - github.com/ClickHouse/clickhouse-go/v2 v2.29.0 + github.com/ClickHouse/clickhouse-go/v2 v2.30.0 github.com/PeerDB-io/glua64 v1.0.1 github.com/PeerDB-io/gluabit32 v1.0.2 github.com/PeerDB-io/gluaflatbuffers v1.0.1 @@ -18,11 +18,11 @@ require ( github.com/PeerDB-io/gluamsgpack v1.0.4 github.com/PeerDB-io/gluautf8 v1.0.0 github.com/aws/aws-sdk-go-v2 v1.32.2 - github.com/aws/aws-sdk-go-v2/config v1.27.43 + github.com/aws/aws-sdk-go-v2/config v1.28.0 github.com/aws/aws-sdk-go-v2/credentials v1.17.41 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.32 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.33 github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 - github.com/aws/aws-sdk-go-v2/service/s3 v1.65.3 + github.com/aws/aws-sdk-go-v2/service/s3 v1.66.0 github.com/aws/aws-sdk-go-v2/service/ses v1.28.2 github.com/aws/aws-sdk-go-v2/service/sns v1.33.2 github.com/aws/smithy-go v1.22.0 @@ -43,11 +43,11 @@ require ( github.com/microsoft/go-mssqldb v1.7.2 github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/shopspring/decimal v1.4.0 - github.com/slack-go/slack v0.14.0 + github.com/slack-go/slack v0.15.0 github.com/snowflakedb/gosnowflake v1.11.2 github.com/stretchr/testify v1.9.0 - github.com/twmb/franz-go v1.17.1 - github.com/twmb/franz-go/pkg/kadm v1.13.0 + github.com/twmb/franz-go v1.18.0 + github.com/twmb/franz-go/pkg/kadm v1.14.0 github.com/twmb/franz-go/plugin/kslog v1.0.0 github.com/twpayne/go-geos v0.19.0 github.com/urfave/cli/v3 v3.0.0-alpha9.1 @@ -63,10 +63,9 @@ require ( go.temporal.io/sdk v1.29.1 go.uber.org/automaxprocs v1.6.0 golang.org/x/crypto v0.28.0 - golang.org/x/mod v0.21.0 golang.org/x/sync v0.8.0 - google.golang.org/api v0.200.0 - google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 + google.golang.org/api v0.201.0 + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.35.1 ) @@ -78,11 +77,11 @@ require ( cloud.google.com/go/monitoring v1.21.1 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect - github.com/ClickHouse/ch-go v0.62.0 // indirect + github.com/ClickHouse/ch-go v0.63.0 // indirect github.com/DataDog/zstd v1.5.6 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.2 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.3 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.3 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/apache/arrow/go/v15 v15.0.2 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect @@ -104,7 +103,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/dvsekhvalnov/jose2go v1.7.0 // indirect github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect - github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gabriel-vasile/mimetype v1.4.6 // indirect @@ -129,27 +128,28 @@ require ( github.com/paulmach/orb v0.11.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - github.com/prometheus/client_golang v1.20.4 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/twmb/franz-go/pkg/kmsg v1.8.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.30.0 // indirect + github.com/twmb/franz-go/pkg/kmsg v1.9.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.31.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect go.opentelemetry.io/otel/trace v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/term v0.25.0 // indirect - google.golang.org/grpc/stats/opentelemetry v0.0.0-20241014145745-ad81c20503be // indirect + google.golang.org/grpc/stats/opentelemetry v0.0.0-20241018153737-98959d9a4904 // indirect ) require ( cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.2.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 // indirect github.com/Azure/go-amqp v1.2.0 // indirect @@ -204,7 +204,7 @@ require ( golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect - google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/flow/go.sum b/flow/go.sum index df6f8b331..4d1ffcc0f 100644 --- a/flow/go.sum +++ b/flow/go.sum @@ -25,8 +25,8 @@ cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66 cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= cloud.google.com/go/pubsub v1.44.0 h1:pLaMJVDTlnUDIKT5L0k53YyLszfBbGoUBo/IqDK/fEI= cloud.google.com/go/pubsub v1.44.0/go.mod h1:BD4a/kmE8OePyHoa1qAHEw1rMzXX+Pc8Se54T/8mc3I= -cloud.google.com/go/storage v1.44.0 h1:abBzXf4UJKMmQ04xxJf9dYM/fNl24KHoTuBjyJDX2AI= -cloud.google.com/go/storage v1.44.0/go.mod h1:wpPblkIuMP5jCB/E48Pz9zIo2S/zD8g+ITmxKkPCITE= +cloud.google.com/go/storage v1.45.0 h1:5av0QcIVj77t+44mV4gffFC/LscFRUhto6UBMB5SimM= +cloud.google.com/go/storage v1.45.0/go.mod h1:wpPblkIuMP5jCB/E48Pz9zIo2S/zD8g+ITmxKkPCITE= cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew= cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= @@ -35,16 +35,16 @@ github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMb github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.2 h1:B+TQ/DzOEn9CsiiosdD/IAyZ5gZiyC+0T19iwxCCnaY= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.2/go.mod h1:qf3s/6aV9ePKYGeEYPsbndK6GGfeS7SrbA6OE/T7NIA= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.3 h1:6bVZts/82H+hax9b3vdmSpi7+Hw9uWvEaJHeKlafnW4= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.3/go.mod h1:qf3s/6aV9ePKYGeEYPsbndK6GGfeS7SrbA6OE/T7NIA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.3.0 h1:4hGvxD72TluuFIXVr8f4XkKZfqAa7Pj61t0jmQ7+kes= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.3.0/go.mod h1:TSH7DcFItwAufy0Lz+Ft2cyopExCpxbOxI5SkH4dRNo= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0 h1:2qsIIvxVT+uE6yrNldntJKlLRgxGbZ85kgtz5SNBhMw= @@ -66,20 +66,20 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mo github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/ClickHouse/ch-go v0.62.0 h1:eXH0hytXeCEEZHgMvOX9IiW7wqBb4w1MJMp9rArbkrc= -github.com/ClickHouse/ch-go v0.62.0/go.mod h1:uzso52/PD9+gZj7tL6XAo8/EYDrx7CIwNF4c6PnO6S0= -github.com/ClickHouse/clickhouse-go/v2 v2.29.0 h1:Dj1w59RssRyLgGHXtYaWU0eIM1pJsu9nGPi/btmvAqw= -github.com/ClickHouse/clickhouse-go/v2 v2.29.0/go.mod h1:bLookq6qZJ4Ush/6tOAnJGh1Sf3Sa/nQoMn71p7ZCUE= +github.com/ClickHouse/ch-go v0.63.0 h1:P038rAdzNo4BIwr/L2VOJ1Wr7nHu6XXvjExBwb1eDj0= +github.com/ClickHouse/ch-go v0.63.0/go.mod h1:I1kJJCL3WJcBMGe1m+HVK0+nREaG+JOYYBWjrDrF3R0= +github.com/ClickHouse/clickhouse-go/v2 v2.30.0 h1:AG4D/hW39qa58+JHQIFOSnxyL46H6h2lrmGGk17dhFo= +github.com/ClickHouse/clickhouse-go/v2 v2.30.0/go.mod h1:i9ZQAojcayW3RsdCb3YR+n+wC2h65eJsZCscZ1Z1wyo= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2 h1:cZpsGsWTIFKymTA0je7IIvi1O7Es7apb9CF3EQlOcfE= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.2 h1:RopCq1mZTydpZpWfeYDvsnKR5L8VeaNt5JR5wiMfh7Q= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.2/go.mod h1:tlLrnqq33OLuNnYbqswyI5ckZ0QjuM2DFIuaraxxDEU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.2 h1:ffI2ensdT33alWXmBDi/7cvCV7K3o7TF5oE44g8tiN0= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.2/go.mod h1:pNP/L2wDlaQnQlFvkDKGSruDoYRpmAxB6drgsskfYwg= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.2 h1:th/AQTVtV5u0WVQln/ks+jxhkZ433MeOevmka55fkeg= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.2/go.mod h1:wRbFgBQUVm1YXrvWKofAEmq9HNJTDphbAaJSSX01KUI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 h1:cb3br57K508pQEFgBxn9GDhPS9HefpyMPK1RzmtMNzk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.3 h1:xir5X8TS8UBVPWg2jHL+cSTf0jZgqYQSA54TscSt1/0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.3/go.mod h1:SsdWig2J5PMnfMvfJuEb1uZa8Y+kvNyvrULFo69gTFk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.3 h1:Nl7phYyHjnqofWDpD+6FYdiwtNIxebn0AHLry7Sxb0M= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.3/go.mod h1:pNP/L2wDlaQnQlFvkDKGSruDoYRpmAxB6drgsskfYwg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.3 h1:2vcVkrNdSMJpoOVAWi9ApsQR5iqNeFGt5Qx8Xlt3IoI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.3/go.mod h1:wRbFgBQUVm1YXrvWKofAEmq9HNJTDphbAaJSSX01KUI= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/PeerDB-io/glua64 v1.0.1 h1:biXLlFF/L5pnJCwDon7hkWkuQPozC8NjKS3J7Wzi69I= @@ -106,14 +106,14 @@ github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcT github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA= -github.com/aws/aws-sdk-go-v2/config v1.27.43 h1:p33fDDihFC390dhhuv8nOmX419wjOSDQRb+USt20RrU= -github.com/aws/aws-sdk-go-v2/config v1.27.43/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= +github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ= +github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.32 h1:C2hE+gJ40Cb4vzhFJ+tTzjvBpPloUq7XP6PD3A2Fk7g= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.32/go.mod h1:0OmMtVNp+10JFBTfmA2AIeqBDm0YthDXmE+N7poaptk= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.33 h1:X+4YY5kZRI/cOoSMVMGTqFXHAMg1bvvay7IBcqHpybQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.33/go.mod h1:DPynzu+cn92k5UQ6tZhX+wfTB4ah6QDU/NgdHqatmvk= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= @@ -132,8 +132,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 h1:t7iUP9+4wdc5lt github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2/go.mod h1:/niFCtmuQNxqx9v8WAPq5qh7EH25U4BF6tjoyq9bObM= github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 h1:tfBABi5R6aSZlhgTWHxL+opYUDOnIGoNcJLwVYv0jLM= github.com/aws/aws-sdk-go-v2/service/kms v1.37.2/go.mod h1:dZYFcQwuoh+cLOlFnZItijZptmyDhRIkOKWFO1CfzV8= -github.com/aws/aws-sdk-go-v2/service/s3 v1.65.3 h1:xxHGZ+wUgZNACQmxtdvP5tgzfsxGS3vPpTP5Hy3iToE= -github.com/aws/aws-sdk-go-v2/service/s3 v1.65.3/go.mod h1:cB6oAuus7YXRZhWCc1wIwPywwZ1XwweNp2TVAEGYeB8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.0 h1:xA6XhTF7PE89BCNHJbQi8VvPzcgMtmGC5dr8S8N7lHk= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.0/go.mod h1:cB6oAuus7YXRZhWCc1wIwPywwZ1XwweNp2TVAEGYeB8= github.com/aws/aws-sdk-go-v2/service/ses v1.28.2 h1:FtmzF/j5v++pa0tuuE0wwvWckHzad+vl/Dy5as0Ateo= github.com/aws/aws-sdk-go-v2/service/ses v1.28.2/go.mod h1:bSPQlnLDUiQy7XxmKqTBsCVkYrLfnYJbEyAmm/gWcaI= github.com/aws/aws-sdk-go-v2/service/sns v1.33.2 h1:GeVRrB1aJsGdXxdPY6VOv0SWs+pfdeDlKgiBxi0+V6I= @@ -198,8 +198,8 @@ github.com/elastic/go-elasticsearch/v8 v8.15.0/go.mod h1:HCON3zj4btpqs2N1jjsAy4a github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= @@ -399,8 +399,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= -github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= @@ -422,8 +422,8 @@ github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+D github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/slack-go/slack v0.14.0 h1:6c0UTfbRnvRssZUsZ2qe0Iu07VAMPjRqOa6oX8ewF4k= -github.com/slack-go/slack v0.14.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= +github.com/slack-go/slack v0.15.0 h1:LE2lj2y9vqqiOf+qIIy0GvEoxgF1N5yLGZffmEZykt0= +github.com/slack-go/slack v0.15.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/snowflakedb/gosnowflake v1.11.2 h1:eAMsxrCiC6ij5wX3dHx1TQCBOdDmCK062Ir8rndUkRg= github.com/snowflakedb/gosnowflake v1.11.2/go.mod h1:WFe+8mpsapDaQjHX6BqJBKtfQCGlGD3lHKeDsKfpx2A= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -444,12 +444,12 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/twmb/franz-go v1.17.1 h1:0LwPsbbJeJ9R91DPUHSEd4su82WJWcTY1Zzbgbg4CeQ= -github.com/twmb/franz-go v1.17.1/go.mod h1:NreRdJ2F7dziDY/m6VyspWd6sNxHKXdMZI42UfQ3GXM= -github.com/twmb/franz-go/pkg/kadm v1.13.0 h1:bJq4C2ZikUE2jh/wl9MtMTQ/kpmnBgVFh8XMQBEC+60= -github.com/twmb/franz-go/pkg/kadm v1.13.0/go.mod h1:VMvpfjz/szpH9WB+vGM+rteTzVv0djyHFimci9qm2C0= -github.com/twmb/franz-go/pkg/kmsg v1.8.0 h1:lAQB9Z3aMrIP9qF9288XcFf/ccaSxEitNA1CDTEIeTA= -github.com/twmb/franz-go/pkg/kmsg v1.8.0/go.mod h1:HzYEb8G3uu5XevZbtU0dVbkphaKTHk0X68N5ka4q6mU= +github.com/twmb/franz-go v1.18.0 h1:25FjMZfdozBywVX+5xrWC2W+W76i0xykKjTdEeD2ejw= +github.com/twmb/franz-go v1.18.0/go.mod h1:zXCGy74M0p5FbXsLeASdyvfLFsBvTubVqctIaa5wQ+I= +github.com/twmb/franz-go/pkg/kadm v1.14.0 h1:nAn1co1lXzJQocpzyIyOFOjUBf4WHWs5/fTprXy2IZs= +github.com/twmb/franz-go/pkg/kadm v1.14.0/go.mod h1:XjOPz6ZaXXjrW2jVCfLuucP8H1w2TvD6y3PT2M+aAM4= +github.com/twmb/franz-go/pkg/kmsg v1.9.0 h1:JojYUph2TKAau6SBtErXpXGC7E3gg4vGZMv9xFU/B6M= +github.com/twmb/franz-go/pkg/kmsg v1.9.0/go.mod h1:CMbfazviCyY6HM0SXuG5t9vOwYDHRCSrJJyBAe5paqg= github.com/twmb/franz-go/plugin/kslog v1.0.0 h1:I64oEmF+0PDvmyLgwrlOtg4mfpSE9GwlcLxM4af2t60= github.com/twmb/franz-go/plugin/kslog v1.0.0/go.mod h1:8pMjK3OJJJNNYddBSbnXZkIK5dCKFIk9GcVVCDgvnQc= github.com/twpayne/go-geos v0.19.0 h1:V7vnLe7gY7JOHLTg8+2oykZOw6wpBLHVNlcnzS2FlG0= @@ -478,8 +478,8 @@ go.einride.tech/aip v0.68.0/go.mod h1:7y9FF8VtPWqpxuAxl0KQWqaULxW4zFIesD6zF5RIHH go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/detectors/gcp v1.30.0 h1:GF+YVnUeJwOy+Ag2cTEpVZq+r2Tnci42FIiNwA2gjME= -go.opentelemetry.io/contrib/detectors/gcp v1.30.0/go.mod h1:p5Av42vWKPezk67MQwLYZwlo/z6xLnN/upaIyQNWBGg= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0 h1:G1JQOreVrfhRkner+l4mrGxmfqYCAuy76asTDAo0xsA= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= @@ -599,20 +599,20 @@ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhS golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= -google.golang.org/api v0.200.0 h1:0ytfNWn101is6e9VBoct2wrGDjOi5vn7jw5KtaQgDrU= -google.golang.org/api v0.200.0/go.mod h1:Tc5u9kcbjO7A8SwGlYj4IiVifJU01UqXtEgDMYmBmV8= +google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0= +google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 h1:nFS3IivktIU5Mk6KQa+v6RKkHUpdQpphqGNLxqNnbEk= -google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:tEzYTYZxbmVNOu0OAFH9HzdJtLn6h4Aj89zzlBCdHms= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= -google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -621,8 +621,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20241014145745-ad81c20503be h1:2V/TCnE7eaRGA5ZsbhWPzzzcrUNfy3OC9YuUjc4MYII= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20241014145745-ad81c20503be/go.mod h1:xwT0YrcBcgR1ZSSLJtUgCjF5QlvTOhiwA/I9TcYf3Gg= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20241018153737-98959d9a4904 h1:Lplo3VKrYtWeryBkDI4SZ4kJTFaWO4qUGs+xX7N2bFc= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20241018153737-98959d9a4904/go.mod h1:jzYlkSMbKypzuu6xoAEijsNVo9ZeDF1u/zCfFgsx7jg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/flow/otel_metrics/peerdb_gauges/gauges.go b/flow/otel_metrics/peerdb_gauges/gauges.go index 0e29b618e..6f8f4f0c5 100644 --- a/flow/otel_metrics/peerdb_gauges/gauges.go +++ b/flow/otel_metrics/peerdb_gauges/gauges.go @@ -9,12 +9,14 @@ const ( SlotLagGaugeName string = "cdc_slot_lag" OpenConnectionsGaugeName string = "open_connections" OpenReplicationConnectionsGaugeName string = "open_replication_connections" + IntervalSinceLastNormalizeGaugeName string = "interval_since_last_normalize" ) type SlotMetricGauges struct { SlotLagGauge *otel_metrics.Float64SyncGauge OpenConnectionsGauge *otel_metrics.Int64SyncGauge OpenReplicationConnectionsGauge *otel_metrics.Int64SyncGauge + IntervalSinceLastNormalizeGauge *otel_metrics.Float64SyncGauge } func BuildGaugeName(baseGaugeName string) string { diff --git a/flow/peerdbenv/catalog.go b/flow/peerdbenv/catalog.go index 1014e8306..f0d80b50c 100644 --- a/flow/peerdbenv/catalog.go +++ b/flow/peerdbenv/catalog.go @@ -22,7 +22,7 @@ func GetCatalogConnectionPoolFromEnv(ctx context.Context) (*pgxpool.Pool, error) poolMutex.Lock() defer poolMutex.Unlock() if pool == nil { - catalogConnectionString := GetCatalogConnectionStringFromEnv() + catalogConnectionString := GetCatalogConnectionStringFromEnv(ctx) pool, err = pgxpool.New(ctx, catalogConnectionString) if err != nil { return nil, fmt.Errorf("unable to establish connection with catalog: %w", err) @@ -37,16 +37,16 @@ func GetCatalogConnectionPoolFromEnv(ctx context.Context) (*pgxpool.Pool, error) return pool, nil } -func GetCatalogConnectionStringFromEnv() string { - return shared.GetPGConnectionString(GetCatalogPostgresConfigFromEnv()) +func GetCatalogConnectionStringFromEnv(ctx context.Context) string { + return shared.GetPGConnectionString(GetCatalogPostgresConfigFromEnv(ctx), "") } -func GetCatalogPostgresConfigFromEnv() *protos.PostgresConfig { +func GetCatalogPostgresConfigFromEnv(ctx context.Context) *protos.PostgresConfig { return &protos.PostgresConfig{ Host: PeerDBCatalogHost(), Port: uint32(PeerDBCatalogPort()), User: PeerDBCatalogUser(), - Password: PeerDBCatalogPassword(), + Password: PeerDBCatalogPassword(ctx), Database: PeerDBCatalogDatabase(), } } diff --git a/flow/peerdbenv/config.go b/flow/peerdbenv/config.go index 93a0de50a..ecd4b8427 100644 --- a/flow/peerdbenv/config.go +++ b/flow/peerdbenv/config.go @@ -1,6 +1,8 @@ package peerdbenv import ( + "context" + "encoding/json" "fmt" "log/slog" "strings" @@ -64,8 +66,8 @@ func PeerDBCatalogUser() string { } // PEERDB_CATALOG_PASSWORD -func PeerDBCatalogPassword() string { - val, err := GetKMSDecryptedEnvString("PEERDB_CATALOG_PASSWORD", "") +func PeerDBCatalogPassword(ctx context.Context) string { + val, err := GetKMSDecryptedEnvString(ctx, "PEERDB_CATALOG_PASSWORD", "") if err != nil { slog.Error("failed to decrypt PEERDB_CATALOG_PASSWORD", "error", err) panic(err) @@ -105,13 +107,24 @@ func PeerDBCurrentEncKeyID() string { return GetEnvString("PEERDB_CURRENT_ENC_KEY_ID", "") } -func PeerDBEncKeys() shared.PeerDBEncKeys { - return GetEnvJSON[shared.PeerDBEncKeys]("PEERDB_ENC_KEYS", nil) +func PeerDBEncKeys(ctx context.Context) shared.PeerDBEncKeys { + val, err := GetKMSDecryptedEnvString(ctx, "PEERDB_ENC_KEYS", "") + if err != nil { + slog.Error("failed to decrypt PEERDB_ENC_KEYS", "error", err) + panic(err) + } + + var result shared.PeerDBEncKeys + if err := json.Unmarshal([]byte(val), &result); err != nil { + return nil + } + + return result } -func PeerDBCurrentEncKey() (shared.PeerDBEncKey, error) { +func PeerDBCurrentEncKey(ctx context.Context) (shared.PeerDBEncKey, error) { encKeyID := PeerDBCurrentEncKeyID() - encKeys := PeerDBEncKeys() + encKeys := PeerDBEncKeys(ctx) return encKeys.Get(encKeyID) } @@ -128,10 +141,18 @@ func PeerDBTemporalEnableCertAuth() bool { return strings.TrimSpace(cert) != "" } -func PeerDBTemporalClientCert() ([]byte, error) { - return GetEnvBase64EncodedBytes("TEMPORAL_CLIENT_CERT", nil) +func PeerDBTemporalClientCert(ctx context.Context) ([]byte, error) { + return GetEnvBase64EncodedBytes(ctx, "TEMPORAL_CLIENT_CERT", nil) +} + +func PeerDBTemporalClientKey(ctx context.Context) ([]byte, error) { + return GetEnvBase64EncodedBytes(ctx, "TEMPORAL_CLIENT_KEY", nil) +} + +func PeerDBGetIncidentIoUrl() string { + return GetEnvString("PEERDB_INCIDENTIO_URL", "") } -func PeerDBTemporalClientKey() ([]byte, error) { - return GetEnvBase64EncodedBytes("TEMPORAL_CLIENT_KEY", nil) +func PeerDBGetIncidentIoToken() string { + return GetEnvString("PEERDB_INCIDENTIO_TOKEN", "") } diff --git a/flow/peerdbenv/crypt.go b/flow/peerdbenv/crypt.go index 289298413..34e4d34b3 100644 --- a/flow/peerdbenv/crypt.go +++ b/flow/peerdbenv/crypt.go @@ -1,11 +1,15 @@ package peerdbenv -func Decrypt(encKeyID string, payload []byte) ([]byte, error) { +import ( + "context" +) + +func Decrypt(ctx context.Context, encKeyID string, payload []byte) ([]byte, error) { if encKeyID == "" { return payload, nil } - keys := PeerDBEncKeys() + keys := PeerDBEncKeys(ctx) key, err := keys.Get(encKeyID) if err != nil { return nil, err diff --git a/flow/peerdbenv/dynamicconf.go b/flow/peerdbenv/dynamicconf.go index 0c36f0c9c..28b01af43 100644 --- a/flow/peerdbenv/dynamicconf.go +++ b/flow/peerdbenv/dynamicconf.go @@ -164,6 +164,22 @@ DROP AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4); END;`, ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, TargetForSetting: protos.DynconfTarget_CLICKHOUSE, }, + { + Name: "PEERDB_INTERVAL_SINCE_LAST_NORMALIZE_THRESHOLD_MINUTES", + Description: "Duration in minutes since last normalize to start alerting, 0 disables all alerting entirely", + DefaultValue: "240", + ValueType: protos.DynconfValueType_UINT, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, + TargetForSetting: protos.DynconfTarget_ALL, + }, + { + Name: "PEERDB_APPLICATION_NAME_PER_MIRROR_NAME", + Description: "Set Postgres application_name to have mirror name as suffix for each mirror", + DefaultValue: "false", + ValueType: protos.DynconfValueType_BOOL, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, + TargetForSetting: protos.DynconfTarget_ALL, + }, } var DynamicIndex = func() map[string]int { @@ -329,3 +345,12 @@ func PeerDBClickHouseAWSS3BucketName(ctx context.Context, env map[string]string) func PeerDBQueueForceTopicCreation(ctx context.Context, env map[string]string) (bool, error) { return dynamicConfBool(ctx, env, "PEERDB_QUEUE_FORCE_TOPIC_CREATION") } + +// PEERDB_INTERVAL_SINCE_LAST_NORMALIZE_THRESHOLD_MINUTES, 0 disables normalize gap alerting entirely +func PeerDBIntervalSinceLastNormalizeThresholdMinutes(ctx context.Context, env map[string]string) (uint32, error) { + return dynamicConfUnsigned[uint32](ctx, env, "PEERDB_INTERVAL_SINCE_LAST_NORMALIZE_THRESHOLD_MINUTES") +} + +func PeerDBApplicationNamePerMirrorName(ctx context.Context, env map[string]string) (bool, error) { + return dynamicConfBool(ctx, env, "PEERDB_APPLICATION_NAME_PER_MIRROR_NAME") +} diff --git a/flow/peerdbenv/env.go b/flow/peerdbenv/env.go index 71c766ac5..f8814bc31 100644 --- a/flow/peerdbenv/env.go +++ b/flow/peerdbenv/env.go @@ -3,7 +3,6 @@ package peerdbenv import ( "context" "encoding/base64" - "encoding/json" "fmt" "os" "reflect" @@ -64,20 +63,6 @@ func GetEnvString(name string, defaultValue string) string { return val } -func GetEnvJSON[T any](name string, defaultValue T) T { - val, ok := os.LookupEnv(name) - if !ok { - return defaultValue - } - - var result T - if err := json.Unmarshal([]byte(val), &result); err != nil { - return defaultValue - } - - return result -} - func decryptWithKMS(ctx context.Context, data []byte) ([]byte, error) { keyID, exists := os.LookupEnv(KMSKeyIDEnvVar) if !exists { @@ -101,7 +86,7 @@ func decryptWithKMS(ctx context.Context, data []byte) ([]byte, error) { return decrypted.Plaintext, nil } -func GetEnvBase64EncodedBytes(name string, defaultValue []byte) ([]byte, error) { +func GetEnvBase64EncodedBytes(ctx context.Context, name string, defaultValue []byte) ([]byte, error) { val, ok := os.LookupEnv(name) if !ok { return defaultValue, nil @@ -113,10 +98,10 @@ func GetEnvBase64EncodedBytes(name string, defaultValue []byte) ([]byte, error) return nil, fmt.Errorf("failed to decode base64 value for %s: %w", name, err) } - return decryptWithKMS(context.Background(), decoded) + return decryptWithKMS(ctx, decoded) } -func GetKMSDecryptedEnvString(name string, defaultValue string) (string, error) { +func GetKMSDecryptedEnvString(ctx context.Context, name string, defaultValue string) (string, error) { val, ok := os.LookupEnv(name) if !ok { return defaultValue, nil @@ -127,7 +112,7 @@ func GetKMSDecryptedEnvString(name string, defaultValue string) (string, error) return val, nil } - ret, err := GetEnvBase64EncodedBytes(name, []byte(defaultValue)) + ret, err := GetEnvBase64EncodedBytes(ctx, name, []byte(defaultValue)) if err != nil { return defaultValue, fmt.Errorf("failed to get base64 encoded bytes for %s: %w", name, err) } diff --git a/flow/shared/constants.go b/flow/shared/constants.go index 860c541c2..2dc5a8a64 100644 --- a/flow/shared/constants.go +++ b/flow/shared/constants.go @@ -1,5 +1,9 @@ package shared +import ( + "go.temporal.io/sdk/temporal" +) + type ( ContextKey string TaskQueueID string @@ -16,7 +20,11 @@ const ( FlowStatusQuery = "q-flow-status" ) -const MirrorNameSearchAttribute = "MirrorName" +var MirrorNameSearchAttribute = temporal.NewSearchAttributeKeyString("MirrorName") + +func NewSearchAttributes(mirrorName string) temporal.SearchAttributes { + return temporal.NewSearchAttributes(MirrorNameSearchAttribute.ValueSet(mirrorName)) +} const ( FlowNameKey ContextKey = "flowName" diff --git a/flow/shared/postgres.go b/flow/shared/postgres.go index f0f8e43d4..be3cf7d07 100644 --- a/flow/shared/postgres.go +++ b/flow/shared/postgres.go @@ -28,16 +28,22 @@ const ( POSTGRES_15 PGVersion = 150000 ) -func GetPGConnectionString(pgConfig *protos.PostgresConfig) string { +func GetPGConnectionString(pgConfig *protos.PostgresConfig, flowName string) string { passwordEscaped := url.QueryEscape(pgConfig.Password) + applicationName := "peerdb" + if flowName != "" { + applicationName = "peerdb_" + flowName + } + // for a url like postgres://user:password@host:port/dbname connString := fmt.Sprintf( - "postgres://%s:%s@%s:%d/%s?application_name=peerdb&client_encoding=UTF8", + "postgres://%s:%s@%s:%d/%s?application_name=%s&client_encoding=UTF8", pgConfig.User, passwordEscaped, pgConfig.Host, pgConfig.Port, pgConfig.Database, + applicationName, ) return connString } diff --git a/flow/shared/schema_helpers.go b/flow/shared/schema_helpers.go index 08babdd4c..7f48d3cbb 100644 --- a/flow/shared/schema_helpers.go +++ b/flow/shared/schema_helpers.go @@ -50,14 +50,19 @@ func BuildProcessedSchemaMapping( dstTableName = mapping.DestinationTableIdentifier if len(mapping.Exclude) != 0 { columns := make([]*protos.FieldDescription, 0, len(tableSchema.Columns)) + pkeyColumns := make([]string, 0, len(tableSchema.PrimaryKeyColumns)) for _, column := range tableSchema.Columns { if !slices.Contains(mapping.Exclude, column.Name) { columns = append(columns, column) } + if slices.Contains(tableSchema.PrimaryKeyColumns, column.Name) && + !slices.Contains(mapping.Exclude, column.Name) { + pkeyColumns = append(pkeyColumns, column.Name) + } } tableSchema = &protos.TableSchema{ TableIdentifier: tableSchema.TableIdentifier, - PrimaryKeyColumns: tableSchema.PrimaryKeyColumns, + PrimaryKeyColumns: pkeyColumns, IsReplicaIdentityFull: tableSchema.IsReplicaIdentityFull, NullableEnabled: tableSchema.NullableEnabled, System: tableSchema.System, diff --git a/flow/shared/telemetry/incidentio_message_sender.go b/flow/shared/telemetry/incidentio_message_sender.go new file mode 100644 index 000000000..7af302ddc --- /dev/null +++ b/flow/shared/telemetry/incidentio_message_sender.go @@ -0,0 +1,129 @@ +package telemetry + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" + + "go.temporal.io/sdk/activity" +) + +type IncidentIoAlert struct { + Metadata map[string]string `json:"metadata"` + Title string `json:"title"` + Description string `json:"description"` + DeduplicationKey string `json:"deduplication_key"` + Status string `json:"status"` +} + +type IncidentIoResponse struct { + Status string `json:"status"` + Message string `json:"message"` + DeduplicationKey string `json:"deduplication_key"` +} + +type IncidentIoMessageSender struct { + Sender +} + +type IncidentIoMessageSenderImpl struct { + http *http.Client + config IncidentIoMessageSenderConfig +} + +type IncidentIoMessageSenderConfig struct { + URL string + Token string +} + +func (i *IncidentIoMessageSenderImpl) SendMessage( + ctx context.Context, + subject string, + body string, + attributes Attributes, +) (*string, error) { + activityInfo := activity.Info{} + if activity.IsActivity(ctx) { + activityInfo = activity.GetInfo(ctx) + } + + deduplicationString := strings.Join([]string{ + "deployID", attributes.DeploymentUID, + "subject", subject, + "runID", activityInfo.WorkflowExecution.RunID, + "activityName", activityInfo.ActivityType.Name, + }, " || ") + h := sha256.New() + h.Write([]byte(deduplicationString)) + deduplicationHash := hex.EncodeToString(h.Sum(nil)) + + level := ResolveIncidentIoLevels(attributes.Level) + + alert := IncidentIoAlert{ + Title: subject, + Description: body, + DeduplicationKey: deduplicationHash, + Status: "firing", + Metadata: map[string]string{ + "alias": deduplicationHash, + "deploymentUUID": attributes.DeploymentUID, + "entity": attributes.DeploymentUID, + "level": string(level), + "tags": strings.Join(attributes.Tags, ","), + "type": attributes.Type, + }, + } + + alertJSON, err := json.Marshal(alert) + if err != nil { + return nil, fmt.Errorf("error serializing alert %w", err) + } + + req, err := http.NewRequest("POST", i.config.URL, bytes.NewBuffer(alertJSON)) + if err != nil { + return nil, err + } + req.Header.Add("Content-Type", "application/json") + req.Header.Add("Authorization", "Bearer "+i.config.Token) + + resp, err := i.http.Do(req) + if err != nil { + return nil, fmt.Errorf("incident.io request failed: %w", err) + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("reading incident.io response body failed: %w", err) + } + + if resp.StatusCode != http.StatusAccepted { + return nil, fmt.Errorf("unexpected response from incident.io. status: %d. body: %s", resp.StatusCode, respBody) + } + + var incidentResponse IncidentIoResponse + err = json.Unmarshal(respBody, &incidentResponse) + if err != nil { + return nil, fmt.Errorf("deserializing incident.io failed: %w", err) + } + + return &incidentResponse.Status, nil +} + +func NewIncidentIoMessageSender(_ context.Context, config IncidentIoMessageSenderConfig) (Sender, error) { + client := &http.Client{ + Timeout: time.Second * 5, + } + + return &IncidentIoMessageSenderImpl{ + config: config, + http: client, + }, nil +} diff --git a/flow/shared/telemetry/incidentio_message_sender_test.go b/flow/shared/telemetry/incidentio_message_sender_test.go new file mode 100644 index 000000000..238717b3c --- /dev/null +++ b/flow/shared/telemetry/incidentio_message_sender_test.go @@ -0,0 +1,187 @@ +package telemetry + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestIncidentIoMessageSenderImpl_SendMessage(t *testing.T) { + tests := []struct { + serverResponse IncidentIoResponse + name string + subject string + body string + attributes Attributes + serverResponseCode int + expectError bool + }{ + { + name: "successful send with info alert", + attributes: Attributes{ + DeploymentUID: uuid.New().String(), + Level: INFO, + Tags: []string{"tag1", "tag2"}, + Type: "incident", + }, + subject: "Test Incident", + body: "This is a test incident", + serverResponse: IncidentIoResponse{Status: "success", Message: "Event accepted for processing", DeduplicationKey: "stonik"}, + serverResponseCode: http.StatusAccepted, + expectError: false, + }, + { + name: "successful send with warn alert", + attributes: Attributes{ + DeploymentUID: uuid.New().String(), + Level: WARN, + Tags: []string{"tag1", "tag2"}, + Type: "incident", + }, + subject: "Test Incident", + body: "This is a test incident", + serverResponse: IncidentIoResponse{Status: "success", Message: "Event accepted for processing", DeduplicationKey: "stonik"}, + serverResponseCode: http.StatusAccepted, + expectError: false, + }, + { + name: "successful send with error alert", + attributes: Attributes{ + DeploymentUID: uuid.New().String(), + Level: ERROR, + Tags: []string{"tag1", "tag2"}, + Type: "incident", + }, + subject: "Test Incident", + body: "This is a test incident", + serverResponse: IncidentIoResponse{Status: "success", Message: "Event accepted for processing", DeduplicationKey: "stonik"}, + serverResponseCode: http.StatusAccepted, + expectError: false, + }, + { + name: "successful send with critical alert", + attributes: Attributes{ + DeploymentUID: uuid.New().String(), + Level: CRITICAL, + Tags: []string{"tag1", "tag2"}, + Type: "incident", + }, + subject: "Test Incident", + body: "This is a test incident", + serverResponse: IncidentIoResponse{Status: "success", Message: "Event accepted for processing", DeduplicationKey: "stonik"}, + serverResponseCode: http.StatusAccepted, + expectError: false, + }, + { + name: "unauthenticated", + attributes: Attributes{ + DeploymentUID: uuid.New().String(), + Level: "firing", + Tags: []string{"tag1", "tag2"}, + Type: "incident", + }, + subject: "Test Incident", + body: "This is a test incident", + serverResponse: IncidentIoResponse{Status: "authentication_error"}, + serverResponseCode: http.StatusUnauthorized, + expectError: true, + }, + { + name: "not found", + attributes: Attributes{ + DeploymentUID: uuid.New().String(), + Level: "firing", + Tags: []string{"tag1", "tag2"}, + Type: "incident", + }, + subject: "Test Incident", + body: "This is a test incident", + serverResponse: IncidentIoResponse{Status: "not_found"}, + serverResponseCode: http.StatusNotFound, + expectError: true, + }, + { + name: "server error", + attributes: Attributes{ + DeploymentUID: uuid.New().String(), + Level: "firing", + Tags: []string{"tag1", "tag2"}, + Type: "incident", + }, + subject: "Test Incident", + body: "This is a test incident", + serverResponse: IncidentIoResponse{Status: "error", Message: "Failed to create incident", DeduplicationKey: "stonik"}, + serverResponseCode: http.StatusInternalServerError, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeIncidentIoServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, "application/json", r.Header.Get("Content-Type")) //nolint:testifylint + require.Equal(t, "Bearer test-token", r.Header.Get("Authorization")) //nolint:testifylint + + var alert IncidentIoAlert + bodyBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) //nolint:testifylint + + err = json.Unmarshal(bodyBytes, &alert) + require.NoError(t, err) //nolint:testifylint + deduplicationString := strings.Join([]string{ + "deployID", tt.attributes.DeploymentUID, + "subject", tt.subject, + "runID", "", + "activityName", "", + }, " || ") + + h := sha256.New() + h.Write([]byte(deduplicationString)) + deduplicationHash := hex.EncodeToString(h.Sum(nil)) + + // Check deduplication hash was generated correctly + require.Equal(t, deduplicationHash, alert.DeduplicationKey) //nolint:testifylint + + // Check level was successfully mapped + require.Equal(t, string(ResolveIncidentIoLevels(tt.attributes.Level)), alert.Metadata["level"]) //nolint:testifylint + + // mock response + w.WriteHeader(tt.serverResponseCode) + err = json.NewEncoder(w).Encode(tt.serverResponse) + if err != nil { + require.Fail(t, "failed to mock response") //nolint:testifylint + } + })) + defer fakeIncidentIoServer.Close() + + config := IncidentIoMessageSenderConfig{ + URL: fakeIncidentIoServer.URL, + Token: "test-token", + } + sender := &IncidentIoMessageSenderImpl{ + http: &http.Client{Timeout: time.Second * 5}, + config: config, + } + + ctx := context.Background() + status, err := sender.SendMessage(ctx, tt.subject, tt.body, tt.attributes) + + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.serverResponse.Status, *status) + } + }) + } +} diff --git a/flow/shared/telemetry/interface.go b/flow/shared/telemetry/interface.go index 4287a7661..fc186b0c7 100644 --- a/flow/shared/telemetry/interface.go +++ b/flow/shared/telemetry/interface.go @@ -15,11 +15,35 @@ type Attributes struct { Tags []string } -type Level string +type ( + Level string + IncidentIoLevel string +) const ( INFO Level = "INFO" WARN Level = "WARN" ERROR Level = "ERROR" CRITICAL Level = "CRITICAL" + + // ClickHouse (incident.io) mapped alert levels + IncMedium IncidentIoLevel = "medium" + IncWarning IncidentIoLevel = "warning" + IncHigh IncidentIoLevel = "high" + IncCritical IncidentIoLevel = "critical" ) + +func ResolveIncidentIoLevels(level Level) IncidentIoLevel { + switch level { + case INFO: + return IncMedium + case WARN: + return IncWarning + case ERROR: + return IncHigh + case CRITICAL: + return IncCritical + default: + return IncMedium + } +} diff --git a/flow/shared/telemetry/sns_message_sender.go b/flow/shared/telemetry/sns_message_sender.go index cf44a5f7a..35c4075c4 100644 --- a/flow/shared/telemetry/sns_message_sender.go +++ b/flow/shared/telemetry/sns_message_sender.go @@ -105,13 +105,6 @@ func NewSNSMessageSenderWithNewClient(ctx context.Context, config *SNSMessageSen }, nil } -func NewSNSMessageSender(client *sns.Client, config *SNSMessageSenderConfig) SNSMessageSender { - return &SNSMessageSenderImpl{ - client: client, - topic: config.Topic, - } -} - func newSnsClient(ctx context.Context, region *string) (*sns.Client, error) { sdkConfig, err := aws_common.LoadSdkConfig(ctx, region) if err != nil { diff --git a/flow/workflows/cdc_flow.go b/flow/workflows/cdc_flow.go index a45ec2879..72e37b01f 100644 --- a/flow/workflows/cdc_flow.go +++ b/flow/workflows/cdc_flow.go @@ -87,7 +87,7 @@ func processCDCFlowConfigUpdate( logger log.Logger, cfg *protos.FlowConnectionConfigs, state *CDCFlowWorkflowState, - mirrorNameSearch map[string]interface{}, + mirrorNameSearch temporal.SearchAttributes, ) error { flowConfigUpdate := state.FlowConfigUpdate @@ -139,7 +139,7 @@ func processTableAdditions( logger log.Logger, cfg *protos.FlowConnectionConfigs, state *CDCFlowWorkflowState, - mirrorNameSearch map[string]interface{}, + mirrorNameSearch temporal.SearchAttributes, ) error { flowConfigUpdate := state.FlowConfigUpdate if len(flowConfigUpdate.AdditionalTables) == 0 { @@ -181,8 +181,8 @@ func processTableAdditions( RetryPolicy: &temporal.RetryPolicy{ MaximumAttempts: 20, }, - SearchAttributes: mirrorNameSearch, - WaitForCancellation: true, + TypedSearchAttributes: mirrorNameSearch, + WaitForCancellation: true, } childAdditionalTablesCDCFlowCtx := workflow.WithChildOptions(ctx, childAdditionalTablesCDCFlowOpts) childAdditionalTablesCDCFlowFuture := workflow.ExecuteChildWorkflow( @@ -329,9 +329,7 @@ func CDCFlowWorkflow( return state, fmt.Errorf("failed to set `%s` query handler: %w", shared.FlowStatusQuery, err) } - mirrorNameSearch := map[string]interface{}{ - shared.MirrorNameSearchAttribute: cfg.FlowJobName, - } + mirrorNameSearch := shared.NewSearchAttributes(cfg.FlowJobName) var syncCountLimit int if state.ActiveSignal == model.PauseSignal { @@ -416,8 +414,8 @@ func CDCFlowWorkflow( RetryPolicy: &temporal.RetryPolicy{ MaximumAttempts: 20, }, - SearchAttributes: mirrorNameSearch, - WaitForCancellation: true, + TypedSearchAttributes: mirrorNameSearch, + WaitForCancellation: true, } setupFlowCtx := workflow.WithChildOptions(ctx, childSetupFlowOpts) setupFlowFuture := workflow.ExecuteChildWorkflow(setupFlowCtx, SetupFlowWorkflow, cfg) @@ -438,9 +436,9 @@ func CDCFlowWorkflow( RetryPolicy: &temporal.RetryPolicy{ MaximumAttempts: 20, }, - TaskQueue: taskQueue, - SearchAttributes: mirrorNameSearch, - WaitForCancellation: true, + TaskQueue: taskQueue, + TypedSearchAttributes: mirrorNameSearch, + WaitForCancellation: true, } snapshotFlowCtx := workflow.WithChildOptions(ctx, childSnapshotFlowOpts) snapshotFlowFuture := workflow.ExecuteChildWorkflow( @@ -502,8 +500,8 @@ func CDCFlowWorkflow( RetryPolicy: &temporal.RetryPolicy{ MaximumAttempts: 20, }, - SearchAttributes: mirrorNameSearch, - WaitForCancellation: true, + TypedSearchAttributes: mirrorNameSearch, + WaitForCancellation: true, } syncCtx := workflow.WithChildOptions(ctx, syncFlowOpts) @@ -513,8 +511,8 @@ func CDCFlowWorkflow( RetryPolicy: &temporal.RetryPolicy{ MaximumAttempts: 20, }, - SearchAttributes: mirrorNameSearch, - WaitForCancellation: true, + TypedSearchAttributes: mirrorNameSearch, + WaitForCancellation: true, } normCtx := workflow.WithChildOptions(ctx, normalizeFlowOpts) diff --git a/flow/workflows/drop_flow.go b/flow/workflows/drop_flow.go index 84de56d53..fe822dd66 100644 --- a/flow/workflows/drop_flow.go +++ b/flow/workflows/drop_flow.go @@ -19,8 +19,7 @@ func DropFlowWorkflow(ctx workflow.Context, config *protos.DropFlowInput) error StartToCloseTimeout: 5 * time.Minute, }) ctx = workflow.WithValue(ctx, shared.FlowNameKey, config.FlowJobName) - ctx = workflow.WithDataConverter(ctx, - converter.NewCompositeDataConverter(converter.NewJSONPayloadConverter())) + ctx = workflow.WithDataConverter(ctx, converter.NewCompositeDataConverter(converter.NewJSONPayloadConverter())) dropStatsCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ StartToCloseTimeout: 5 * time.Minute, diff --git a/flow/workflows/qrep_flow.go b/flow/workflows/qrep_flow.go index 9363b30b6..c7348eefa 100644 --- a/flow/workflows/qrep_flow.go +++ b/flow/workflows/qrep_flow.go @@ -239,9 +239,7 @@ func (q *QRepFlowExecution) startChildWorkflow( RetryPolicy: &temporal.RetryPolicy{ MaximumAttempts: 20, }, - SearchAttributes: map[string]interface{}{ - shared.MirrorNameSearchAttribute: q.config.FlowJobName, - }, + TypedSearchAttributes: shared.NewSearchAttributes(q.config.FlowJobName), }) return workflow.ExecuteChildWorkflow(partFlowCtx, QRepPartitionWorkflow, q.config, partitions, q.runUUID) @@ -326,10 +324,8 @@ func (q *QRepFlowExecution) waitForNewRows( lastPartition *protos.QRepPartition, ) error { ctx = workflow.WithChildOptions(ctx, workflow.ChildWorkflowOptions{ - ParentClosePolicy: enums.PARENT_CLOSE_POLICY_REQUEST_CANCEL, - SearchAttributes: map[string]interface{}{ - shared.MirrorNameSearchAttribute: q.config.FlowJobName, - }, + ParentClosePolicy: enums.PARENT_CLOSE_POLICY_REQUEST_CANCEL, + TypedSearchAttributes: shared.NewSearchAttributes(q.config.FlowJobName), }) future := workflow.ExecuteChildWorkflow(ctx, QRepWaitForNewRowsWorkflow, q.config, lastPartition) diff --git a/nexus/Cargo.lock b/nexus/Cargo.lock index 90075a856..e55e5c117 100644 --- a/nexus/Cargo.lock +++ b/nexus/Cargo.lock @@ -144,9 +144,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95" [[package]] name = "ar" @@ -168,9 +168,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-compression" -version = "0.4.15" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e26a9844c659a2a293d239c7910b752f8487fe122c6c8bd1659bf85a6507c302" +checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" dependencies = [ "flate2", "futures-core", @@ -187,7 +187,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -209,7 +209,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -220,7 +220,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -481,7 +481,7 @@ dependencies = [ "http-body 0.4.6", "http-body 1.0.1", "httparse", - "hyper 0.14.30", + "hyper 0.14.31", "hyper-rustls 0.24.2", "once_cell", "pin-project-lite", @@ -672,7 +672,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -731,7 +731,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", "syn_derive", ] @@ -865,9 +865,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.30" +version = "1.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945" +checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" dependencies = [ "jobserver", "libc", @@ -987,7 +987,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -1172,10 +1172,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", + "der_derive", + "flagset", "pem-rfc7468", "zeroize", ] +[[package]] +name = "der_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.82", +] + [[package]] name = "deranged" version = "0.3.11" @@ -1194,7 +1207,7 @@ checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -1310,6 +1323,12 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "flagset" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3ea1ec5f8307826a5b71094dd91fc04d4ae75d5709b20ad351c7fb4815c86ec" + [[package]] name = "flate2" version = "1.0.34" @@ -1415,7 +1434,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -1458,7 +1477,7 @@ dependencies = [ "async-trait", "dyn-clone", "flate2", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "log", "prost", @@ -1687,9 +1706,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" dependencies = [ "bytes", "futures-channel", @@ -1711,9 +1730,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -1738,7 +1757,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "log", "rustls 0.21.12", "rustls-native-certs 0.6.3", @@ -1754,9 +1773,9 @@ checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", - "rustls 0.23.14", + "rustls 0.23.15", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -1771,7 +1790,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "pin-project-lite", "tokio", @@ -1789,7 +1808,7 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.1", - "hyper 1.4.1", + "hyper 1.5.0", "pin-project-lite", "socket2", "tokio", @@ -1940,9 +1959,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.159" +version = "0.2.161" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" [[package]] name = "libloading" @@ -2686,7 +2705,7 @@ checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -2776,7 +2795,7 @@ version = "0.1.0" dependencies = [ "anyhow", "pt", - "rustls 0.23.14", + "rustls 0.23.15", "tokio", "tokio-postgres", "tokio-postgres-rustls", @@ -2845,12 +2864,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.22" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +checksum = "910d41a655dac3b764f1ade94821093d3610248694320cd072303a8eedcf221d" dependencies = [ "proc-macro2", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -2887,9 +2906,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.87" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" +checksum = "7c3a7fc5db1e57d5a779a352c8cdb57b29aa4c40cc69c3a68a7fedc815fbf2f9" dependencies = [ "unicode-ident", ] @@ -2923,7 +2942,7 @@ dependencies = [ "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.79", + "syn 2.0.82", "tempfile", ] @@ -2937,7 +2956,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -2987,9 +3006,9 @@ dependencies = [ [[package]] name = "pulldown-cmark" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666f0f59e259aea2d72e6012290c09877a780935cc3c18b1ceded41f3890d59c" +checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14" dependencies = [ "bitflags", "memchr", @@ -3022,7 +3041,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.14", + "rustls 0.23.15", "socket2", "thiserror", "tokio", @@ -3039,7 +3058,7 @@ dependencies = [ "rand", "ring", "rustc-hash 2.0.0", - "rustls 0.23.14", + "rustls 0.23.15", "slab", "thiserror", "tinyvec", @@ -3173,7 +3192,7 @@ dependencies = [ "quote", "refinery-core", "regex", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -3249,7 +3268,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-rustls 0.27.3", "hyper-util", "ipnet", @@ -3260,7 +3279,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.14", + "rustls 0.23.15", "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", @@ -3428,9 +3447,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.14" +version = "0.23.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" +checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" dependencies = [ "log", "once_cell", @@ -3486,9 +3505,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-webpki" @@ -3513,9 +3532,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "ryu" @@ -3653,14 +3672,14 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", "memchr", @@ -3844,7 +3863,7 @@ source = "git+https://github.com/peerdb-io/sqlparser-rs.git?branch=main#8c341b80 dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -3899,9 +3918,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "83540f837a8afc019423a8edb95b52a8effe46957ee402287f4292fae35be021" dependencies = [ "proc-macro2", "quote", @@ -3917,7 +3936,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -3981,7 +4000,7 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -4042,6 +4061,27 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" +[[package]] +name = "tls_codec" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e78c9c330f8c85b2bae7c8368f2739157db9991235123aa1b15ef9502bfb6a" +dependencies = [ + "tls_codec_derive", + "zeroize", +] + +[[package]] +name = "tls_codec_derive" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9ef545650e79f30233c0003bcc2504d7efac6dad25fca40744de773fe2049c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.82", +] + [[package]] name = "tokio" version = "1.40.0" @@ -4068,7 +4108,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -4099,16 +4139,17 @@ dependencies = [ [[package]] name = "tokio-postgres-rustls" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04fb792ccd6bbcd4bba408eb8a292f70fc4a3589e5d793626f45190e6454b6ab" +checksum = "27d684bad428a0f2481f42241f821db42c54e2dc81d8c00db8536c506b0a0144" dependencies = [ + "const-oid", "ring", - "rustls 0.23.14", + "rustls 0.23.15", "tokio", "tokio-postgres", "tokio-rustls 0.26.0", - "x509-certificate", + "x509-cert", ] [[package]] @@ -4138,7 +4179,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.14", + "rustls 0.23.15", "rustls-pki-types", "tokio", ] @@ -4216,7 +4257,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-timeout", "hyper-util", "percent-encoding", @@ -4245,7 +4286,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -4351,7 +4392,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -4418,12 +4459,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" [[package]] name = "unicode-bidi" @@ -4485,7 +4523,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.14", + "rustls 0.23.15", "rustls-pki-types", "serde", "serde_json", @@ -4518,9 +4556,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "getrandom", "serde", @@ -4615,7 +4653,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", "wasm-bindgen-shared", ] @@ -4649,7 +4687,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4936,6 +4974,18 @@ dependencies = [ "tap", ] +[[package]] +name = "x509-cert" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1301e935010a701ae5f8655edc0ad17c44bad3ac5ce8c39185f75453b720ae94" +dependencies = [ + "const-oid", + "der", + "spki", + "tls_codec", +] + [[package]] name = "x509-certificate" version = "0.23.1" @@ -4982,12 +5032,12 @@ dependencies = [ "futures", "http 1.1.0", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-rustls 0.27.3", "hyper-util", "log", "percent-encoding", - "rustls 0.23.14", + "rustls 0.23.15", "rustls-pemfile 2.2.0", "seahash", "serde", @@ -5015,7 +5065,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5035,7 +5085,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] diff --git a/nexus/postgres-connection/Cargo.toml b/nexus/postgres-connection/Cargo.toml index cb8647201..93da58109 100644 --- a/nexus/postgres-connection/Cargo.toml +++ b/nexus/postgres-connection/Cargo.toml @@ -11,6 +11,6 @@ pt = { path = "../pt" } rustls = { version = "0.23", default-features = false, features = ["ring"] } urlencoding = "2" tokio-postgres = "0.7.2" -tokio-postgres-rustls = "0.12" +tokio-postgres-rustls = "0.13" tokio = { version = "1", features = ["full"] } tracing.workspace = true diff --git a/ui/app/mirrors/create/cdc/columnbox.tsx b/ui/app/mirrors/create/cdc/columnbox.tsx index dbc417fdd..066b1cdfc 100644 --- a/ui/app/mirrors/create/cdc/columnbox.tsx +++ b/ui/app/mirrors/create/cdc/columnbox.tsx @@ -3,7 +3,6 @@ import { TableMapRow } from '@/app/dto/MirrorsDTO'; import { Checkbox } from '@/lib/Checkbox'; import { Label } from '@/lib/Label'; import { RowWithCheckbox } from '@/lib/Layout'; -import { TextField } from '@/lib/TextField'; import { Dispatch, SetStateAction } from 'react'; interface ColumnProps { @@ -20,7 +19,6 @@ export default function ColumnBox({ rows, setRows, disabled, - showOrdering, }: ColumnProps) { const handleColumnExclusion = (column: string, include: boolean) => { const source = tableRow.source; @@ -41,36 +39,15 @@ export default function ColumnBox({ setRows(currRows); } }; - const handleColumnOrdering = (column: string, ordering: number) => { - const source = tableRow.source; - const currRows = [...rows]; - const rowIndex = currRows.findIndex((row) => row.source === source); - if (rowIndex !== -1) { - const sourceRow = currRows[rowIndex]; - const columns = [...sourceRow.columns]; - const colIndex = columns.findIndex((col) => col.sourceName === column); - if (colIndex !== -1) { - columns[colIndex] = { ...columns[colIndex], ordering }; - } else { - columns.push({ - sourceName: column, - destinationName: '', - destinationType: '', - nullableEnabled: false, - ordering, - }); - } - currRows[rowIndex] = { - ...sourceRow, - columns, - }; - setRows(currRows); - } - }; return columns.map((column) => { const [columnName, columnType, isPkeyStr] = column.split(':'); const isPkey = isPkeyStr === 'true'; + const partOfOrderingKey = rows + .find((row) => row.source == tableRow.source) + ?.columns.some( + (col) => col.sourceName === columnName && col.ordering <= 0 + ); return ( {columnType}

- {showOrdering && !disabled && !isPkey && ( - col.sourceName === columnName) - ?.ordering ?? 0 - } - onChange={(e: React.ChangeEvent) => - handleColumnOrdering(columnName, +e.target.value) - } - /> - )} } action={ handleColumnExclusion(columnName, state) diff --git a/ui/app/mirrors/create/cdc/guide.tsx b/ui/app/mirrors/create/cdc/guide.tsx index 5855af697..d0303e570 100644 --- a/ui/app/mirrors/create/cdc/guide.tsx +++ b/ui/app/mirrors/create/cdc/guide.tsx @@ -14,14 +14,15 @@ export default function GuideForDestinationSetup({ case 'BIGQUERY': return 'https://docs.peerdb.io/connect/bigquery'; case 'RDS POSTGRESQL': + return 'https://docs.peerdb.io/connect/postgres/rds_postgres'; case 'POSTGRESQL': - return 'https://docs.peerdb.io/connect/rds_postgres'; + return 'https://docs.peerdb.io/connect/postgres/generic_postgres'; case 'AZURE FLEXIBLE POSTGRESQL': - return 'https://docs.peerdb.io/connect/azure_flexible_server_postgres'; + return 'https://docs.peerdb.io/connect/postgres/azure_flexible_server_postgres'; case 'GOOGLE CLOUD POSTGRESQL': - return 'https://docs.peerdb.io/connect/cloudsql_postgres'; + return 'https://docs.peerdb.io/connect/postgres/cloudsql_postgres'; case 'CRUNCHY POSTGRES': - return 'https://docs.peerdb.io/connect/crunchy_bridge'; + return 'https://docs.peerdb.io/connect/postgres/crunchy_bridge'; case 'NEON': return 'https://docs.peerdb.io/connect/postgres/neon_postgres'; case 'CONFLUENT': diff --git a/ui/app/mirrors/create/cdc/schemabox.tsx b/ui/app/mirrors/create/cdc/schemabox.tsx index 0aa8252a9..04206854d 100644 --- a/ui/app/mirrors/create/cdc/schemabox.tsx +++ b/ui/app/mirrors/create/cdc/schemabox.tsx @@ -37,6 +37,7 @@ import { import { Divider } from '@tremor/react'; import ReactSelect from 'react-select'; +import SelectSortingKeys from './sortingkey'; interface SchemaBoxProps { sourcePeer: string; @@ -69,7 +70,6 @@ export default function SchemaBox({ const [tableQuery, setTableQuery] = useState(''); const [defaultTargetSchema, setDefaultTargetSchema] = useState(schema); - const searchedTables = useMemo(() => { const tableQueryLower = tableQuery.toLowerCase(); return rows @@ -389,25 +389,70 @@ export default function SchemaBox({ {row.selected && (
- + +
{columns ? ( - + <> + + {peerType?.toString() === + DBType[DBType.CLICKHOUSE].toString() && ( +
+ + { + const [ + columnName, + columnType, + isPkeyStr, + ] = column.split(':'); + return columnName; + }) ?? [] + } + loading={columnsLoading} + tableRow={row} + setRows={setRows} + /> +
+ )} + ) : columnsLoading ? ( ) : ( diff --git a/ui/app/mirrors/create/cdc/sortingkey.tsx b/ui/app/mirrors/create/cdc/sortingkey.tsx new file mode 100644 index 000000000..48e7be039 --- /dev/null +++ b/ui/app/mirrors/create/cdc/sortingkey.tsx @@ -0,0 +1,192 @@ +'use client'; +import { + Dispatch, + SetStateAction, + useCallback, + useEffect, + useState, +} from 'react'; +import ReactSelect from 'react-select'; + +import { TableMapRow } from '@/app/dto/MirrorsDTO'; +import SelectTheme from '@/app/styles/select'; +import { notifySortingKey } from '@/app/utils/notify'; +import { Button } from '@/lib/Button'; +import { Checkbox } from '@/lib/Checkbox'; +import { Icon } from '@/lib/Icon'; +import { Label } from '@/lib/Label'; +import { RowWithCheckbox } from '@/lib/Layout'; +import { ToastContainer } from 'react-toastify'; +import 'react-toastify/dist/ReactToastify.css'; +import { engineOptionStyles } from './styles'; + +interface SortingKeysProps { + columns: string[]; + tableRow: TableMapRow; + loading: boolean; + setRows: Dispatch>; +} + +const SelectSortingKeys = ({ + columns, + loading, + tableRow, + setRows, +}: SortingKeysProps) => { + const [sortingKeysSelections, setSortingKeysSelections] = useState( + [] + ); + const [showSortingKey, setShowSortingKey] = useState(false); + + const handleSortingKey = useCallback( + (col: string, action: 'add' | 'remove') => { + setSortingKeysSelections((prev) => { + if (action === 'add' && !prev.some((key) => key === col)) { + return [col, ...prev]; + } else if (action === 'remove') { + return prev.filter((prevCol) => prevCol !== col); + } + return prev; + }); + }, + [] + ); + + const registerSortingKeys = useCallback(() => { + setRows((prevRows) => { + const rowIndex = prevRows.findIndex( + (row) => row.source === tableRow.source + ); + if (rowIndex !== -1) { + const newColumns = prevRows[rowIndex].columns.map((col) => ({ + ...col, + ordering: + sortingKeysSelections.findIndex((key) => key === col.sourceName) + + 1, + })); + sortingKeysSelections.forEach((sortingKeyCol, orderingIndex) => { + if (!newColumns.some((col) => col.sourceName === sortingKeyCol)) { + newColumns.push({ + sourceName: sortingKeyCol, + destinationName: '', + destinationType: '', + ordering: orderingIndex + 1, + nullableEnabled: false, + }); + } + }); + const newRows = [...prevRows]; + newRows[rowIndex].columns = newColumns; + return newRows; + } + return prevRows; + }); + }, [sortingKeysSelections, setRows, tableRow.source]); + + const handleShowSortingKey = useCallback( + (state: boolean) => { + setShowSortingKey(state); + if (!state) { + setSortingKeysSelections([]); + registerSortingKeys(); + } else { + notifySortingKey(); + } + }, + [registerSortingKeys] + ); + + useEffect(() => { + if (showSortingKey) { + registerSortingKeys(); + } + }, [registerSortingKeys, showSortingKey]); + + return ( +
+ + + Use a custom sorting key + + } + action={ + + } + /> + {showSortingKey && ( +
+ { + val && handleSortingKey(val.value, 'add'); + }} + isLoading={loading} + value={null} + styles={engineOptionStyles} + options={columns.map((col) => ({ value: col, label: col }))} + theme={SelectTheme} + isClearable + /> +
+ {sortingKeysSelections.map((col: string) => { + return ( +
+

{col}

+ +
+ ); + })} +
+
+ )} +
+ ); +}; + +export default SelectSortingKeys; diff --git a/ui/app/mirrors/tables.tsx b/ui/app/mirrors/tables.tsx index 3a862d976..110eea407 100644 --- a/ui/app/mirrors/tables.tsx +++ b/ui/app/mirrors/tables.tsx @@ -1,5 +1,5 @@ 'use client'; -import { DropDialog } from '@/components/DropDialog'; +import DropDialog from '@/components/DropDialog'; import MirrorLink from '@/components/MirrorLink'; import NewButton from '@/components/NewButton'; import PeerButton from '@/components/PeerComponent'; diff --git a/ui/app/peers/peersTable.tsx b/ui/app/peers/peersTable.tsx index 5145617c0..6fb1895f6 100644 --- a/ui/app/peers/peersTable.tsx +++ b/ui/app/peers/peersTable.tsx @@ -1,5 +1,5 @@ 'use client'; -import { DropDialog } from '@/components/DropDialog'; +import DropDialog from '@/components/DropDialog'; import PeerButton from '@/components/PeerComponent'; import PeerTypeLabel, { DBTypeToGoodText, diff --git a/ui/app/scripts/list.tsx b/ui/app/scripts/list.tsx index 9dc2b8b48..dc16392c1 100644 --- a/ui/app/scripts/list.tsx +++ b/ui/app/scripts/list.tsx @@ -1,5 +1,5 @@ 'use client'; -import { DropDialog } from '@/components/DropDialog'; +import DropDialog from '@/components/DropDialog'; import { Script } from '@/grpc_generated/route'; import { Button } from '@/lib/Button/Button'; import { Label } from '@/lib/Label/Label'; diff --git a/ui/app/utils/notify.tsx b/ui/app/utils/notify.tsx index 7306a2460..80ade57c1 100644 --- a/ui/app/utils/notify.tsx +++ b/ui/app/utils/notify.tsx @@ -1,3 +1,4 @@ +import Link from 'next/link'; import { toast } from 'react-toastify'; export const notifyErr = (msg: string, ok?: boolean) => { @@ -11,3 +12,30 @@ export const notifyErr = (msg: string, ok?: boolean) => { }); } }; + +const SortingKeyToast = () => { + const orderingKeyDoc = 'https://docs.peerdb.io/mirror/ordering-key-different'; + + return ( +
+

+ Using ordering keys in ClickHouse that differ from the primary key in + Postgres has some caveats. Please read{' '} + + this doc + {' '} + carefully. +

+
+ ); +}; + +export const notifySortingKey = () => { + toast.warn(SortingKeyToast, { + position: 'bottom-center', + autoClose: false, + closeOnClick: false, + closeButton: true, + toastId: 'sorting_key_warning', + }); +}; diff --git a/ui/components/AlertDropdown.tsx b/ui/components/AlertDropdown.tsx index 56155bf8c..badf5fa25 100644 --- a/ui/components/AlertDropdown.tsx +++ b/ui/components/AlertDropdown.tsx @@ -2,7 +2,7 @@ import { Button } from '@/lib/Button/Button'; import { Icon } from '@/lib/Icon'; import * as DropdownMenu from '@radix-ui/react-dropdown-menu'; import { useState } from 'react'; -import { DropDialog } from './DropDialog'; +import DropDialog from './DropDialog'; const AlertDropdown = ({ disable, alertId, diff --git a/ui/components/DropDialog.tsx b/ui/components/DropDialog.tsx index 221260d03..b3063f3fa 100644 --- a/ui/components/DropDialog.tsx +++ b/ui/components/DropDialog.tsx @@ -28,12 +28,12 @@ interface deleteScriptArgs { scriptId: number; } -export const handleDropMirror = async ( +async function handleDropMirror( dropArgs: dropMirrorArgs, setLoading: Dispatch>, setMsg: Dispatch>, dropStats: boolean -) => { +) { setLoading(true); const res = await changeFlowState( dropArgs.flowJobName, @@ -50,15 +50,15 @@ export const handleDropMirror = async ( window.location.reload(); return true; -}; +} -export const DropDialog = ({ +export default function DropDialog({ mode, dropArgs, }: { mode: 'PEER' | 'MIRROR' | 'ALERT' | 'SCRIPT'; dropArgs: dropMirrorArgs | dropPeerArgs | deleteAlertArgs | deleteScriptArgs; -}) => { +}) { const [loading, setLoading] = useState(false); const [msg, setMsg] = useState(''); const [dropStats, setDropStats] = useState(true); @@ -209,4 +209,4 @@ export const DropDialog = ({ ); -}; +} diff --git a/ui/package-lock.json b/ui/package-lock.json index 3c3466fa0..00613c421 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -29,8 +29,7 @@ "@types/react": "^18.3.11", "@types/react-dom": "^18.3.0", "classnames": "^2.5.1", - "long": "^5.2.3", - "lucide-react": "^0.452.0", + "lucide-react": "^0.453.0", "material-symbols": "^0.25.0", "moment": "^2.30.1", "moment-timezone": "^0.5.46", @@ -2316,9 +2315,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.7.5", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.7.5.tgz", - "integrity": "sha512-jML7s2NAzMWc//QSJ1a3prpk78cOPchGvXJsC3C6R6PSMoooztvRVQEz89gmBTBY1SPMaqo5teB4uNHPdetShQ==", + "version": "22.7.7", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.7.7.tgz", + "integrity": "sha512-SRxCrrg9CL/y54aiMCG3edPKdprgMVGDXjA3gB8UmmBW5TcXzRUYAh8EWzTnSJFAd1rgImPELza+A3bJ+qxz8Q==", "license": "MIT", "dependencies": { "undici-types": "~6.19.2" @@ -2371,17 +2370,17 @@ "license": "MIT" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.8.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.8.1.tgz", - "integrity": "sha512-xfvdgA8AP/vxHgtgU310+WBnLB4uJQ9XdyP17RebG26rLtDrQJV3ZYrcopX91GrHmMoH8bdSwMRh2a//TiJ1jQ==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.10.0.tgz", + "integrity": "sha512-phuB3hoP7FFKbRXxjl+DRlQDuJqhpOnm5MmtROXyWi3uS/Xg2ZXqiQfcG2BJHiN4QKyzdOJi3NEn/qTnjUlkmQ==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.8.1", - "@typescript-eslint/type-utils": "8.8.1", - "@typescript-eslint/utils": "8.8.1", - "@typescript-eslint/visitor-keys": "8.8.1", + "@typescript-eslint/scope-manager": "8.10.0", + "@typescript-eslint/type-utils": "8.10.0", + "@typescript-eslint/utils": "8.10.0", + "@typescript-eslint/visitor-keys": "8.10.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", @@ -2405,16 +2404,16 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "8.8.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.8.1.tgz", - "integrity": "sha512-hQUVn2Lij2NAxVFEdvIGxT9gP1tq2yM83m+by3whWFsWC+1y8pxxxHUFE1UqDu2VsGi2i6RLcv4QvouM84U+ow==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.10.0.tgz", + "integrity": "sha512-E24l90SxuJhytWJ0pTQydFT46Nk0Z+bsLKo/L8rtQSL93rQ6byd1V/QbDpHUTdLPOMsBCcYXZweADNCfOCmOAg==", "dev": true, "license": "BSD-2-Clause", "dependencies": { - "@typescript-eslint/scope-manager": "8.8.1", - "@typescript-eslint/types": "8.8.1", - "@typescript-eslint/typescript-estree": "8.8.1", - "@typescript-eslint/visitor-keys": "8.8.1", + "@typescript-eslint/scope-manager": "8.10.0", + "@typescript-eslint/types": "8.10.0", + "@typescript-eslint/typescript-estree": "8.10.0", + "@typescript-eslint/visitor-keys": "8.10.0", "debug": "^4.3.4" }, "engines": { @@ -2434,14 +2433,14 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "8.8.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.8.1.tgz", - "integrity": "sha512-X4JdU+66Mazev/J0gfXlcC/dV6JI37h+93W9BRYXrSn0hrE64IoWgVkO9MSJgEzoWkxONgaQpICWg8vAN74wlA==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.10.0.tgz", + "integrity": "sha512-AgCaEjhfql9MDKjMUxWvH7HjLeBqMCBfIaBbzzIcBbQPZE7CPh1m6FF+L75NUMJFMLYhCywJXIDEMa3//1A0dw==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.8.1", - "@typescript-eslint/visitor-keys": "8.8.1" + "@typescript-eslint/types": "8.10.0", + "@typescript-eslint/visitor-keys": "8.10.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2452,14 +2451,14 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "8.8.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.8.1.tgz", - "integrity": "sha512-qSVnpcbLP8CALORf0za+vjLYj1Wp8HSoiI8zYU5tHxRVj30702Z1Yw4cLwfNKhTPWp5+P+k1pjmD5Zd1nhxiZA==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.10.0.tgz", + "integrity": "sha512-PCpUOpyQSpxBn230yIcK+LeCQaXuxrgCm2Zk1S+PTIRJsEfU6nJ0TtwyH8pIwPK/vJoA+7TZtzyAJSGBz+s/dg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/typescript-estree": "8.8.1", - "@typescript-eslint/utils": "8.8.1", + "@typescript-eslint/typescript-estree": "8.10.0", + "@typescript-eslint/utils": "8.10.0", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, @@ -2477,9 +2476,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "8.8.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.8.1.tgz", - "integrity": "sha512-WCcTP4SDXzMd23N27u66zTKMuEevH4uzU8C9jf0RO4E04yVHgQgW+r+TeVTNnO1KIfrL8ebgVVYYMMO3+jC55Q==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.10.0.tgz", + "integrity": "sha512-k/E48uzsfJCRRbGLapdZgrX52csmWJ2rcowwPvOZ8lwPUv3xW6CcFeJAXgx4uJm+Ge4+a4tFOkdYvSpxhRhg1w==", "dev": true, "license": "MIT", "engines": { @@ -2491,14 +2490,14 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.8.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.8.1.tgz", - "integrity": "sha512-A5d1R9p+X+1js4JogdNilDuuq+EHZdsH9MjTVxXOdVFfTJXunKJR/v+fNNyO4TnoOn5HqobzfRlc70NC6HTcdg==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.10.0.tgz", + "integrity": "sha512-3OE0nlcOHaMvQ8Xu5gAfME3/tWVDpb/HxtpUZ1WeOAksZ/h/gwrBzCklaGzwZT97/lBbbxJ16dMA98JMEngW4w==", "dev": true, "license": "BSD-2-Clause", "dependencies": { - "@typescript-eslint/types": "8.8.1", - "@typescript-eslint/visitor-keys": "8.8.1", + "@typescript-eslint/types": "8.10.0", + "@typescript-eslint/visitor-keys": "8.10.0", "debug": "^4.3.4", "fast-glob": "^3.3.2", "is-glob": "^4.0.3", @@ -2546,16 +2545,16 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "8.8.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.8.1.tgz", - "integrity": "sha512-/QkNJDbV0bdL7H7d0/y0qBbV2HTtf0TIyjSDTvvmQEzeVx8jEImEbLuOA4EsvE8gIgqMitns0ifb5uQhMj8d9w==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.10.0.tgz", + "integrity": "sha512-Oq4uZ7JFr9d1ZunE/QKy5egcDRXT/FrS2z/nlxzPua2VHFtmMvFNDvpq1m/hq0ra+T52aUezfcjGRIB7vNJF9w==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "8.8.1", - "@typescript-eslint/types": "8.8.1", - "@typescript-eslint/typescript-estree": "8.8.1" + "@typescript-eslint/scope-manager": "8.10.0", + "@typescript-eslint/types": "8.10.0", + "@typescript-eslint/typescript-estree": "8.10.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2569,13 +2568,13 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.8.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.8.1.tgz", - "integrity": "sha512-0/TdC3aeRAsW7MDvYRwEc1Uwm0TIBfzjPFgg60UU2Haj5qsCs9cc3zNgY71edqE3LbWfF/WoZQd3lJoDXFQpag==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.10.0.tgz", + "integrity": "sha512-k8nekgqwr7FadWk548Lfph6V3r9OVqjzAIVskE7orMZR23cGJjAOVazsZSJW+ElyjfTM4wx/1g88Mi70DDtG9A==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.8.1", + "@typescript-eslint/types": "8.10.0", "eslint-visitor-keys": "^3.4.3" }, "engines": { @@ -2769,9 +2768,9 @@ "license": "Apache-2.0" }, "node_modules/acorn": { - "version": "8.12.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.1.tgz", - "integrity": "sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.13.0.tgz", + "integrity": "sha512-8zSiw54Oxrdym50NlZ9sUusyO1Z1ZchgRLWRaK6c86XJFClyCgFKetdowBg5bKxyp/u+CDBJG4Mpp0m3HLZl9w==", "dev": true, "license": "MIT", "bin": { @@ -2939,13 +2938,13 @@ } }, "node_modules/aria-query": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.1.3.tgz", - "integrity": "sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ==", + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", + "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", "dev": true, "license": "Apache-2.0", - "dependencies": { - "deep-equal": "^2.0.5" + "engines": { + "node": ">= 0.4" } }, "node_modules/array-buffer-byte-length": { @@ -3168,9 +3167,9 @@ } }, "node_modules/axe-core": { - "version": "4.10.0", - "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.10.0.tgz", - "integrity": "sha512-Mr2ZakwQ7XUAjp7pAwQWRhhK8mQQ6JAaNWSjmjxil0R8BPioMtQsTLOolGYkji1rcL++3dCqZA3zWqpT+9Ew6g==", + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.10.1.tgz", + "integrity": "sha512-qPC9o+kD8Tir0lzNGLeghbOrWMr3ZJpaRlCIb6Uobt/7N4FiEDvqUMnxzCHRHmg8vOg14kr5gVNyScRmbMaJ9g==", "dev": true, "license": "MPL-2.0", "engines": { @@ -3342,9 +3341,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001668", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001668.tgz", - "integrity": "sha512-nWLrdxqCdblixUO+27JtGJJE/txpJlyUy5YN1u53wLZkP0emYCo5zgS6QYft7VUYR42LGgi/S5hdLZTrnyIddw==", + "version": "1.0.30001669", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001669.tgz", + "integrity": "sha512-DlWzFDJqstqtIVx1zeSpIMLjunf5SmwOw0N2Ck/QSQdS8PLS4+9HrLaYei4w8BIAL7IB/UEDu889d8vhCTPA0w==", "funding": [ { "type": "opencollective", @@ -3463,6 +3462,15 @@ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", "license": "MIT" }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", @@ -3815,39 +3823,6 @@ "integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==", "license": "MIT" }, - "node_modules/deep-equal": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-2.2.3.tgz", - "integrity": "sha512-ZIwpnevOurS8bpT4192sqAowWM76JDKSHYzMLty3BZGSswgq6pBaH3DhCSW5xVAZICZyKdOBPjwww5wfgT/6PA==", - "dev": true, - "license": "MIT", - "dependencies": { - "array-buffer-byte-length": "^1.0.0", - "call-bind": "^1.0.5", - "es-get-iterator": "^1.1.3", - "get-intrinsic": "^1.2.2", - "is-arguments": "^1.1.1", - "is-array-buffer": "^3.0.2", - "is-date-object": "^1.0.5", - "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", - "isarray": "^2.0.5", - "object-is": "^1.1.5", - "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.5.1", - "side-channel": "^1.0.4", - "which-boxed-primitive": "^1.0.2", - "which-collection": "^1.0.1", - "which-typed-array": "^1.1.13" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", @@ -3939,9 +3914,9 @@ "license": "MIT" }, "node_modules/electron-to-chromium": { - "version": "1.5.36", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.36.tgz", - "integrity": "sha512-HYTX8tKge/VNp6FGO+f/uVDmUkq+cEfcxYhKf15Akc4M5yxt5YmorwlAitKWjWhWQnKcDRBAQKXkhqqXMqcrjw==", + "version": "1.5.41", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.41.tgz", + "integrity": "sha512-dfdv/2xNjX0P8Vzme4cfzHqnPm5xsZXwsolTYr0eyW18IUmNyG08vL+fttvinTfhKfIKdRoqkDIC9e9iWQCNYQ==", "dev": true, "license": "ISC" }, @@ -4072,27 +4047,6 @@ "node": ">= 0.4" } }, - "node_modules/es-get-iterator": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/es-get-iterator/-/es-get-iterator-1.1.3.tgz", - "integrity": "sha512-sPZmqHBe6JIiTfN5q2pEi//TwxmAFHwj/XEuYjTuse78i8KxaqMTTzxPoFKuzRpDpTJ+0NAbpfenkmH2rePtuw==", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", - "has-symbols": "^1.0.3", - "is-arguments": "^1.1.1", - "is-map": "^2.0.2", - "is-set": "^2.0.2", - "is-string": "^1.0.7", - "isarray": "^2.0.5", - "stop-iteration-iterator": "^1.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/es-iterator-helpers": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.1.0.tgz", @@ -4456,13 +4410,13 @@ } }, "node_modules/eslint-plugin-jsx-a11y": { - "version": "6.10.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.10.0.tgz", - "integrity": "sha512-ySOHvXX8eSN6zz8Bywacm7CvGNhUtdjvqfQDVe6020TUK34Cywkw7m0KsCCk1Qtm9G1FayfTN1/7mMYnYO2Bhg==", + "version": "6.10.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.10.1.tgz", + "integrity": "sha512-zHByM9WTUMnfsDTafGXRiqxp6lFtNoSOWBY6FonVRn3A+BUwN1L/tdBXT40BcBJi0cZjOGTXZ0eD/rTG9fEJ0g==", "dev": true, "license": "MIT", "dependencies": { - "aria-query": "~5.1.3", + "aria-query": "^5.3.2", "array-includes": "^3.1.8", "array.prototype.flatmap": "^1.3.2", "ast-types-flow": "^0.0.8", @@ -4470,14 +4424,14 @@ "axobject-query": "^4.1.0", "damerau-levenshtein": "^1.0.8", "emoji-regex": "^9.2.2", - "es-iterator-helpers": "^1.0.19", + "es-iterator-helpers": "^1.1.0", "hasown": "^2.0.2", "jsx-ast-utils": "^3.3.5", "language-tags": "^1.0.9", "minimatch": "^3.1.2", "object.fromentries": "^2.0.8", "safe-regex-test": "^1.0.3", - "string.prototype.includes": "^2.0.0" + "string.prototype.includes": "^2.0.1" }, "engines": { "node": ">=4.0" @@ -4742,11 +4696,11 @@ "license": "MIT" }, "node_modules/fast-uri": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.0.2.tgz", - "integrity": "sha512-GR6f0hD7XXyNJa25Tb9BuIdN0tdr+0BMi6/CJPH3wJO1JjNG3n/VsSw38AwRdKZABm8lGbPfakLRkYzx2V9row==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.0.3.tgz", + "integrity": "sha512-aLrHthzCjH5He4Z2H9YZ+v6Ujb9ocRuW6ZzkJQOrTxleEijANq4v1TsaPaVG1PZcuurEzrLcWRyYBYXD5cEiaw==", "dev": true, - "license": "MIT" + "license": "BSD-3-Clause" }, "node_modules/fastq": { "version": "1.17.1", @@ -5356,23 +5310,6 @@ "loose-envify": "^1.0.0" } }, - "node_modules/is-arguments": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", - "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/is-array-buffer": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.4.tgz", @@ -6029,43 +5966,6 @@ "source-map": "~0.6.0" } }, - "node_modules/less/node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "license": "MIT", - "optional": true, - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/less/node_modules/pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true, - "license": "MIT", - "optional": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/less/node_modules/semver": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", - "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", - "dev": true, - "license": "ISC", - "optional": true, - "bin": { - "semver": "bin/semver" - } - }, "node_modules/levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -6171,18 +6071,44 @@ } }, "node_modules/lucide-react": { - "version": "0.452.0", - "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.452.0.tgz", - "integrity": "sha512-kNefjOUOGm+Mu3KDiryONyPba9r+nhcrz5oJs3N6JDzGboQNEXw5GB3yB8rnV9/FA4bPyggNU6CRSihZm9MvSw==", + "version": "0.453.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.453.0.tgz", + "integrity": "sha512-kL+RGZCcJi9BvJtzg2kshO192Ddy9hv3ij+cPrVPWSRzgCWCVazoQJxOjAwgK53NomL07HB7GPHW120FimjNhQ==", "license": "ISC", "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0-rc" } }, + "node_modules/make-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", + "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "pify": "^4.0.1", + "semver": "^5.6.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "dev": true, + "license": "ISC", + "optional": true, + "bin": { + "semver": "bin/semver" + } + }, "node_modules/material-symbols": { - "version": "0.25.0", - "resolved": "https://registry.npmjs.org/material-symbols/-/material-symbols-0.25.0.tgz", - "integrity": "sha512-YYT13NN+EO3UUVMnSnzdVTwonNa1OvohavS6rUQ/Bk8ShWjlZEL0HekCd11aWW1n6bI+r4Q+po3Ti+9gmkpinw==", + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/material-symbols/-/material-symbols-0.25.1.tgz", + "integrity": "sha512-0HopmXLjRs4H99LWajFWIXAt8DpaVMf9lyhKp35HQ+ocb7JJ3eXJTJNkOwccfbJ34qIuwYDwLJQtlzheMFmizw==", "license": "Apache-2.0" }, "node_modules/memoize-one": { @@ -6557,23 +6483,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/object-is": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.6.tgz", - "integrity": "sha512-F8cZ+KfGlSGi09lJT7/Nd6KJZ9ygtvYC0/UYYLI9nmQKLMnydpB9yvbv9K1uSkEu7FU9vYPmVwLg328tX+ot3Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.7", - "define-properties": "^1.2.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/object-keys": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", @@ -6865,9 +6774,9 @@ } }, "node_modules/picocolors": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", - "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", "license": "ISC" }, "node_modules/picomatch": { @@ -6883,12 +6792,14 @@ } }, "node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "dev": true, "license": "MIT", + "optional": true, "engines": { - "node": ">=0.10.0" + "node": ">=6" } }, "node_modules/pirates": { @@ -7422,9 +7333,9 @@ } }, "node_modules/react-transition-state": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/react-transition-state/-/react-transition-state-2.1.2.tgz", - "integrity": "sha512-RkDYBkj1V1ZqBA5AwQPrMt2Uagwsx6b//GVJdRDhs/t0o66w2nhQiyHyFGQEI60mgtbaIdLm8yhBRCvhA+FxEg==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/react-transition-state/-/react-transition-state-2.1.3.tgz", + "integrity": "sha512-3RyB6Qw1q3xYLhq1tU8q/l61sdAy6/qQVdz62puT7Fe7T2h4EinNyLEKuI8prI/LGKjUT7DBcVP1RTEFReUS9A==", "license": "MIT", "peerDependencies": { "react": ">=16.8.0", @@ -7440,6 +7351,15 @@ "pify": "^2.3.0" } }, + "node_modules/read-cache/node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/readdirp": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", @@ -7959,19 +7879,6 @@ "integrity": "sha512-HTEHMNieakEnoe33shBYcZ7NX83ACUjCu8c40iOGEZsngj9zRnkqS9j1pqQPXwobB0ZcVTk27REb7COQ0UR59w==", "license": "MIT" }, - "node_modules/stop-iteration-iterator": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.0.0.tgz", - "integrity": "sha512-iCGQj+0l0HOdZ2AEeBADlsRC+vsnDsZsbdSiH1yNSjcfKM7fdpCMfqAL/dwF5BLiw/XhRft/Wax6zQbhq2BcjQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "internal-slot": "^1.0.4" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/streamsearch": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", @@ -8056,14 +7963,18 @@ } }, "node_modules/string.prototype.includes": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.0.tgz", - "integrity": "sha512-E34CkBgyeqNDcrbU76cDjL5JLcVrtSdYq0MEh/B10r17pRP4ciHLwTgnuLV8Ay6cgEMLkcBkFCKyFZ43YldYzg==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.1.tgz", + "integrity": "sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==", "dev": true, "license": "MIT", "dependencies": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3" + }, + "engines": { + "node": ">= 0.4" } }, "node_modules/string.prototype.matchall": { @@ -8329,15 +8240,6 @@ "node": ">=16 || 14 >=14.17" } }, - "node_modules/sucrase/node_modules/commander": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, "node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", @@ -8393,9 +8295,9 @@ } }, "node_modules/tailwindcss": { - "version": "3.4.13", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.13.tgz", - "integrity": "sha512-KqjHOJKogOUt5Bs752ykCeiwvi0fKVkr5oqsFNt/8px/tA8scFPIlkygsf6jXrfCqGHz7VflA6+yytWuM+XhFw==", + "version": "3.4.14", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.14.tgz", + "integrity": "sha512-IcSvOcTRcUtQQ7ILQL5quRDg7Xs93PdJEk1ZLbhhvJc7uj/OAhYOnruEiwnGgBvUtaUAJ8/mhSw1o8L2jCiENA==", "license": "MIT", "dependencies": { "@alloc/quick-lru": "^5.2.0", @@ -8459,9 +8361,9 @@ } }, "node_modules/terser": { - "version": "5.34.1", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.34.1.tgz", - "integrity": "sha512-FsJZ7iZLd/BXkz+4xrRTGJ26o/6VTjQytUk8b8OxkwcD2I+79VPJlz7qss1+zE7h8GNIScFqXcDyJ/KqBYZFVA==", + "version": "5.36.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.36.0.tgz", + "integrity": "sha512-IYV9eNMuFAV4THUspIRXkLakHnV6XO7FEdtKjf/mDyrnqUg9LnlOn6/RwRvM9SZjR4GUq8Nk8zj67FzVARr74w==", "dev": true, "license": "BSD-2-Clause", "dependencies": { @@ -8626,9 +8528,9 @@ } }, "node_modules/tslib": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.7.0.tgz", - "integrity": "sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==", + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.0.tgz", + "integrity": "sha512-jWVzBLplnCmoaTr13V9dYbiQ99wvZRd0vNWaDRg+aVYRcjDF3nDksxFDE/+fkXnKhpnUUkmx5pK/v8mCtLVqZA==", "license": "0BSD" }, "node_modules/type-check": { diff --git a/ui/package.json b/ui/package.json index c25ab8167..816cbac3a 100644 --- a/ui/package.json +++ b/ui/package.json @@ -31,8 +31,7 @@ "@types/react": "^18.3.11", "@types/react-dom": "^18.3.0", "classnames": "^2.5.1", - "long": "^5.2.3", - "lucide-react": "^0.452.0", + "lucide-react": "^0.453.0", "material-symbols": "^0.25.0", "moment": "^2.30.1", "moment-timezone": "^0.5.46",