diff --git a/tool/common/common.go b/tool/common/common.go index 456a17f4992d3..0a7a13946e554 100644 --- a/tool/common/common.go +++ b/tool/common/common.go @@ -23,11 +23,11 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "sort" "strings" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/constants" @@ -36,6 +36,7 @@ import ( "github.com/gravitational/teleport/lib/asciitable" libevents "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) // ExitCodeError wraps an exit code as an error. @@ -85,7 +86,7 @@ func (e *SessionsCollection) WriteText(w io.Writer) error { target = session.DatabaseName timestamp = session.GetTime().Format(constants.HumanDateFormatSeconds) default: - log.Warn(trace.BadParameter("unsupported event type: expected SessionEnd, WindowsDesktopSessionEnd or DatabaseSessionEnd: got: %T", event)) + slog.WarnContext(context.Background(), "unsupported event type: expected SessionEnd, WindowsDesktopSessionEnd or DatabaseSessionEnd", "event_type", logutils.TypeAttr(event)) continue } diff --git a/tool/common/fido2/fido2.go b/tool/common/fido2/fido2.go index f9899224ac8b4..4a9b227d30617 100644 --- a/tool/common/fido2/fido2.go +++ b/tool/common/fido2/fido2.go @@ -25,6 +25,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "os" "github.com/alecthomas/kingpin/v2" @@ -32,7 +33,6 @@ import ( "github.com/go-webauthn/webauthn/protocol/webauthncbor" "github.com/go-webauthn/webauthn/protocol/webauthncose" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" wancli "github.com/gravitational/teleport/lib/auth/webauthncli" ) @@ -161,7 +161,10 @@ func (c *AttobjCommand) Run() error { cert, err := x509.ParseCertificate(certDER) if err != nil { - log.WithError(err).Warnf("Failed to parse X.509 from x5c[%v], continuing", i) + slog.WarnContext(context.Background(), "Failed to parse X.509 from x5c, continuing", + "index", i, + "error", err, + ) continue } diff --git a/tool/tctl/common/auth_command.go b/tool/tctl/common/auth_command.go index 8800f3aca7f72..ef290ce83ab8b 100644 --- a/tool/tctl/common/auth_command.go +++ b/tool/tctl/common/auth_command.go @@ -22,6 +22,7 @@ import ( "context" "fmt" "io" + "log/slog" "net/url" "os" "strings" @@ -31,7 +32,6 @@ import ( "github.com/alecthomas/kingpin/v2" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - log "github.com/sirupsen/logrus" "google.golang.org/protobuf/types/known/durationpb" "github.com/gravitational/teleport" @@ -996,7 +996,7 @@ func (a *AuthCommand) generateUserKeys(ctx context.Context, clusterAPI certifica // If we're in multiplexed mode get SNI name for kube from single multiplexed proxy addr kubeTLSServerName := "" if proxyListenerMode == types.ProxyListenerMode_Multiplex { - log.Debug("Using Proxy SNI for kube TLS server name") + slog.DebugContext(ctx, "Using Proxy SNI for kube TLS server name") u, err := parseURL(a.proxyAddr) if err != nil { return trace.Wrap(err) @@ -1008,7 +1008,7 @@ func (a *AuthCommand) generateUserKeys(ctx context.Context, clusterAPI certifica expires, err := keyRing.TeleportTLSCertValidBefore() if err != nil { - log.WithError(err).Warn("Failed to check TTL validity") + slog.WarnContext(ctx, "Failed to check TTL validity", "error", err) // err swallowed on purpose } else if reqExpiry.Sub(expires) > time.Minute { maxAllowedTTL := time.Until(expires).Round(time.Second) @@ -1165,7 +1165,11 @@ func (a *AuthCommand) checkProxyAddr(ctx context.Context, clusterAPI certificate _, err := utils.ParseAddr(addr) if err != nil { - log.Warningf("Invalid public address on the proxy %q: %q: %v.", p.GetName(), addr, err) + slog.WarnContext(ctx, "Invalid public address on the proxy", + "proxy", p.GetName(), + "public_address", addr, + "error", err, + ) continue } @@ -1178,7 +1182,11 @@ func (a *AuthCommand) checkProxyAddr(ctx context.Context, clusterAPI certificate }, ) if err != nil { - log.Warningf("Unable to ping proxy public address on the proxy %q: %q: %v.", p.GetName(), addr, err) + slog.WarnContext(ctx, "Unable to ping proxy public address on the proxy", + "proxy", p.GetName(), + "public_address", addr, + "error", err, + ) continue } diff --git a/tool/tctl/common/auth_rotate_command.go b/tool/tctl/common/auth_rotate_command.go index b0c5f3b31f5c4..d63a6ad914c68 100644 --- a/tool/tctl/common/auth_rotate_command.go +++ b/tool/tctl/common/auth_rotate_command.go @@ -37,7 +37,6 @@ import ( "github.com/charmbracelet/huh" "github.com/charmbracelet/lipgloss" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "golang.org/x/term" apiclient "github.com/gravitational/teleport/api/client" @@ -1288,7 +1287,6 @@ func setupLoggers(logWriter io.Writer) { logWriter, logutils.SlogTextHandlerConfig{EnableColors: true}, ))) - logrus.StandardLogger().SetOutput(logWriter) } func setupMFAPrompt(client *authclient.Client, pingResp proto.PingResponse, promptWriter io.Writer) { diff --git a/tool/tctl/common/bots_command.go b/tool/tctl/common/bots_command.go index 6a5de45f5afb5..1cd290cb1bcd2 100644 --- a/tool/tctl/common/bots_command.go +++ b/tool/tctl/common/bots_command.go @@ -25,6 +25,7 @@ import ( "errors" "fmt" "io" + "log/slog" "maps" "os" "strings" @@ -34,7 +35,6 @@ import ( "github.com/alecthomas/kingpin/v2" "github.com/google/uuid" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "google.golang.org/protobuf/types/known/fieldmaskpb" "github.com/gravitational/teleport" @@ -260,7 +260,7 @@ func (c *BotsCommand) AddBot(ctx context.Context, client *authclient.Client) err roles := splitEntries(c.botRoles) if len(roles) == 0 { - log.Warning("No roles specified. The bot will not be able to produce outputs until a role is added to the bot.") + slog.WarnContext(ctx, "No roles specified - the bot will not be able to produce outputs until a role is added to the bot") } var token types.ProvisionToken if c.tokenID == "" { @@ -386,7 +386,7 @@ func (c *BotsCommand) LockBot(ctx context.Context, client *authclient.Client) er // updateBotLogins applies updates from CLI arguments to a bot's logins trait, // updating the field mask if any updates were made. -func (c *BotsCommand) updateBotLogins(bot *machineidv1pb.Bot, mask *fieldmaskpb.FieldMask) error { +func (c *BotsCommand) updateBotLogins(ctx context.Context, bot *machineidv1pb.Bot, mask *fieldmaskpb.FieldMask) error { traits := map[string][]string{} for _, t := range bot.Spec.GetTraits() { traits[t.Name] = t.Values @@ -419,15 +419,15 @@ func (c *BotsCommand) updateBotLogins(bot *machineidv1pb.Bot, mask *fieldmaskpb. desiredLoginsArray := utils.StringsSliceFromSet(desiredLogins) if maps.Equal(currentLogins, desiredLogins) { - log.Infof("Logins will be left unchanged: %+v", desiredLoginsArray) + slog.InfoContext(ctx, "Logins will be left unchanged", "logins", desiredLoginsArray) return nil } - log.Infof("Desired logins for bot %q: %+v", c.botName, desiredLoginsArray) + slog.InfoContext(ctx, "Desired logins for bot", "bot", c.botName, "logins", desiredLoginsArray) if len(desiredLogins) == 0 { delete(traits, constants.TraitLogins) - log.Infof("Removing logins trait from bot user") + slog.InfoContext(ctx, "Removing logins trait from bot user") } else { traits[constants.TraitLogins] = desiredLoginsArray } @@ -477,11 +477,11 @@ func (c *BotsCommand) updateBotRoles(ctx context.Context, client clientRoleGette desiredRolesArray := utils.StringsSliceFromSet(desiredRoles) if maps.Equal(currentRoles, desiredRoles) { - log.Infof("Roles will be left unchanged: %+v", desiredRolesArray) + slog.InfoContext(ctx, "Roles will be left unchanged", "roles", desiredRolesArray) return nil } - log.Infof("Desired roles for bot %q: %+v", c.botName, desiredRolesArray) + slog.InfoContext(ctx, "Desired roles for bot", "bot", c.botName, "roles", desiredRolesArray) // Validate roles (server does not do this yet). for roleName := range desiredRoles { @@ -510,7 +510,7 @@ func (c *BotsCommand) UpdateBot(ctx context.Context, client *authclient.Client) } if c.setLogins != "" || c.addLogins != "" { - if err := c.updateBotLogins(bot, fieldMask); err != nil { + if err := c.updateBotLogins(ctx, bot, fieldMask); err != nil { return trace.Wrap(err) } } @@ -522,7 +522,7 @@ func (c *BotsCommand) UpdateBot(ctx context.Context, client *authclient.Client) } if len(fieldMask.Paths) == 0 { - log.Infof("No changes requested, nothing to do.") + slog.InfoContext(ctx, "No changes requested, nothing to do") return nil } @@ -534,7 +534,7 @@ func (c *BotsCommand) UpdateBot(ctx context.Context, client *authclient.Client) return trace.Wrap(err) } - log.Infof("Bot %q has been updated. Roles will take effect on its next renewal.", c.botName) + slog.InfoContext(ctx, "Bot has been updated, roles will take effect on its next renewal", "bot", c.botName) return nil } diff --git a/tool/tctl/common/bots_command_test.go b/tool/tctl/common/bots_command_test.go index 5d31e84e80961..ee8fba1cba112 100644 --- a/tool/tctl/common/bots_command_test.go +++ b/tool/tctl/common/bots_command_test.go @@ -123,7 +123,7 @@ func TestUpdateBotLogins(t *testing.T) { setLogins: tt.set, } - err = cmd.updateBotLogins(bot, fieldMask) + err = cmd.updateBotLogins(context.Background(), bot, fieldMask) tt.assert(t, bot, fieldMask, err) }) } diff --git a/tool/tctl/common/config/global.go b/tool/tctl/common/config/global.go index 458905d752e96..0b357db8e848b 100644 --- a/tool/tctl/common/config/global.go +++ b/tool/tctl/common/config/global.go @@ -19,6 +19,7 @@ package config import ( + "context" "errors" "io/fs" "log/slog" @@ -26,7 +27,6 @@ import ( "runtime" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/constants" @@ -65,13 +65,13 @@ type GlobalCLIFlags struct { // The returned authclient.Config has the credentials needed to dial the auth // server. func ApplyConfig(ccf *GlobalCLIFlags, cfg *servicecfg.Config) (*authclient.Config, error) { + ctx := context.TODO() // --debug flag if ccf.Debug { cfg.Debug = ccf.Debug utils.InitLogger(utils.LoggingForCLI, slog.LevelDebug) - log.Debugf("Debug logging has been enabled.") + slog.DebugContext(ctx, "Debug logging has been enabled") } - cfg.Log = log.StandardLogger() cfg.Logger = slog.Default() if cfg.Version == "" { @@ -126,9 +126,9 @@ func ApplyConfig(ccf *GlobalCLIFlags, cfg *servicecfg.Config) (*authclient.Confi if !localAuthSvcConf { // Try profile or identity file. if fileConf == nil { - log.Debug("no config file, loading auth config via extension") + slog.DebugContext(ctx, "no config file, loading auth config via extension") } else { - log.Debug("auth_service disabled in config file, loading auth config via extension") + slog.DebugContext(ctx, "auth_service disabled in config file, loading auth config via extension") } authConfig, err := LoadConfigFromProfile(ccf, cfg) if err == nil { diff --git a/tool/tctl/common/config/profile.go b/tool/tctl/common/config/profile.go index f39d85bf859fc..6e5da64ba1f84 100644 --- a/tool/tctl/common/config/profile.go +++ b/tool/tctl/common/config/profile.go @@ -19,11 +19,12 @@ package config import ( + "context" "errors" + "log/slog" "time" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/metadata" @@ -33,10 +34,12 @@ import ( "github.com/gravitational/teleport/lib/client/identityfile" "github.com/gravitational/teleport/lib/service/servicecfg" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) // LoadConfigFromProfile applies config from ~/.tsh/ profile if it's present func LoadConfigFromProfile(ccf *GlobalCLIFlags, cfg *servicecfg.Config) (*authclient.Config, error) { + ctx := context.TODO() proxyAddr := "" if len(ccf.AuthServerAddr) != 0 { proxyAddr = ccf.AuthServerAddr[0] @@ -68,7 +71,10 @@ func LoadConfigFromProfile(ccf *GlobalCLIFlags, cfg *servicecfg.Config) (*authcl } c := client.MakeDefaultConfig() - log.WithFields(log.Fields{"proxy": profile.ProxyURL.String(), "user": profile.Username}).Debugf("Found profile.") + slog.DebugContext(ctx, "Found profile", + "proxy", logutils.StringerAttr(&profile.ProxyURL), + "user", profile.Username, + ) if err := c.LoadProfile(clientStore, proxyAddr); err != nil { return nil, trace.Wrap(err) } @@ -106,7 +112,7 @@ func LoadConfigFromProfile(ccf *GlobalCLIFlags, cfg *servicecfg.Config) (*authcl if err != nil { return nil, trace.Wrap(err) } - log.Debugf("Setting auth server to web proxy %v.", webProxyAddr) + slog.DebugContext(ctx, "Setting auth server to web proxy", "web_proxy_addr", webProxyAddr) cfg.SetAuthServerAddress(*webProxyAddr) } authConfig.AuthServers = cfg.AuthServerAddresses() diff --git a/tool/tctl/common/devices.go b/tool/tctl/common/devices.go index f8bfc9412ab60..3924c4a798b32 100644 --- a/tool/tctl/common/devices.go +++ b/tool/tctl/common/devices.go @@ -21,6 +21,7 @@ package common import ( "context" "fmt" + "log/slog" "sort" "time" @@ -28,7 +29,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" "github.com/gravitational/trace/trail" - log "github.com/sirupsen/logrus" "google.golang.org/protobuf/types/known/timestamppb" devicepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/devicetrust/v1" @@ -490,10 +490,11 @@ func (c *canOperateOnCurrentDevice) setCurrentDevice() (bool, error) { c.osType = cdd.OsType c.assetTag = cdd.SerialNumber - log.Debugf( - "Running device command against current device: %q/%v", - c.assetTag, - devicetrust.FriendlyOSType(c.osType), + slog.DebugContext( + context.Background(), + "Running device command against current device", + "asset_tag", c.assetTag, + "os_type", devicetrust.FriendlyOSType(c.osType), ) return true, nil } diff --git a/tool/tctl/common/loadtest_command.go b/tool/tctl/common/loadtest_command.go index fb9075af180fd..3fa9f58063f90 100644 --- a/tool/tctl/common/loadtest_command.go +++ b/tool/tctl/common/loadtest_command.go @@ -33,7 +33,6 @@ import ( "github.com/alecthomas/kingpin/v2" "github.com/google/uuid" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" auditlogpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/auditlog/v1" @@ -186,7 +185,7 @@ func (c *LoadtestCommand) NodeHeartbeats(ctx context.Context, client *authclient return } if err != nil { - log.Debugf("Failed to upsert node: %v", err) + slog.DebugContext(ctx, "Failed to upsert node", "error", err) select { case errch <- err: default: diff --git a/tool/tctl/common/loginrule/command.go b/tool/tctl/common/loginrule/command.go index 022c389d88dd7..15a7f9521db54 100644 --- a/tool/tctl/common/loginrule/command.go +++ b/tool/tctl/common/loginrule/command.go @@ -22,11 +22,11 @@ import ( "context" "errors" "io" + "log/slog" "os" "github.com/alecthomas/kingpin/v2" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" kyaml "k8s.io/apimachinery/pkg/util/yaml" "github.com/gravitational/teleport" @@ -141,7 +141,7 @@ func (t *testCommand) run(ctx context.Context, c *authclient.Client) error { } if len(t.inputResourceFiles) > 0 { - logrus.Debugf("Loaded %d login rule(s) from input resource files", len(loginRules)) + slog.DebugContext(ctx, "Loaded login rule(s) from input resource files", "login_rule_count", len(loginRules)) } traits, err := parseTraitsFile(t.inputTraitsFile) diff --git a/tool/tctl/common/node_command.go b/tool/tctl/common/node_command.go index f07021e2daa1e..0be12463b8184 100644 --- a/tool/tctl/common/node_command.go +++ b/tool/tctl/common/node_command.go @@ -22,6 +22,7 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "os" "strings" "text/template" @@ -29,7 +30,6 @@ import ( "github.com/alecthomas/kingpin/v2" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/client" @@ -202,7 +202,7 @@ func (c *NodeCommand) Invite(ctx context.Context, client *authclient.Client) err pingResponse, err := client.Ping(ctx) if err != nil { - log.Debugf("unable to ping auth client: %s.", err.Error()) + slog.DebugContext(ctx, "unable to ping auth client", "error", err) } if err == nil && pingResponse.GetServerFeatures().Cloud { diff --git a/tool/tctl/common/resource_command.go b/tool/tctl/common/resource_command.go index cdb8115e9b9a6..23749bc14c528 100644 --- a/tool/tctl/common/resource_command.go +++ b/tool/tctl/common/resource_command.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "os" "slices" @@ -34,7 +35,6 @@ import ( "github.com/crewjam/saml/samlsp" "github.com/gravitational/trace" "github.com/gravitational/trace/trail" - log "github.com/sirupsen/logrus" "google.golang.org/protobuf/encoding/protojson" kyaml "k8s.io/apimachinery/pkg/util/yaml" @@ -71,6 +71,7 @@ import ( "github.com/gravitational/teleport/lib/service/servicecfg" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" commonclient "github.com/gravitational/teleport/tool/tctl/common/client" clusterconfigrec "github.com/gravitational/teleport/tool/tctl/common/clusterconfig" tctlcfg "github.com/gravitational/teleport/tool/tctl/common/config" @@ -1368,7 +1369,10 @@ func (rc *ResourceCommand) createSAMLIdPServiceProvider(ctx context.Context, cli // issue warning about unsupported ACS bindings. if err := services.FilterSAMLEntityDescriptor(ed, false /* quiet */); err != nil { - log.Warnf("Entity descriptor for SAML IdP service provider %q contains unsupported ACS bindings: %v", sp.GetEntityID(), err) + slog.WarnContext(ctx, "Entity descriptor for SAML IdP service provider contains unsupported ACS bindings", + "entity_id", sp.GetEntityID(), + "error", err, + ) } } @@ -2249,7 +2253,7 @@ func (rc *ResourceCommand) getCollection(ctx context.Context, client *authclient authorities, err := client.GetCertAuthorities(ctx, caType, rc.withSecrets) if err != nil { if trace.IsBadParameter(err) { - log.Warnf("failed to get certificate authority: %v; skipping", err) + slog.WarnContext(ctx, "failed to get certificate authority; skipping", "error", err) continue } return nil, trace.Wrap(err) @@ -2287,7 +2291,7 @@ func (rc *ResourceCommand) getCollection(ctx context.Context, client *authclient for _, r := range page { srv, ok := r.ResourceWithLabels.(types.Server) if !ok { - log.Warnf("expected types.Server but received unexpected type %T", r) + slog.WarnContext(ctx, "expected types.Server but received unexpected type", "resource_type", logutils.TypeAttr(r)) continue } diff --git a/tool/tctl/common/token_command.go b/tool/tctl/common/token_command.go index 5ac5a6225b126..c08ee1c122602 100644 --- a/tool/tctl/common/token_command.go +++ b/tool/tctl/common/token_command.go @@ -23,6 +23,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "os" "slices" "sort" @@ -33,7 +34,6 @@ import ( "github.com/alecthomas/kingpin/v2" "github.com/ghodss/yaml" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -348,7 +348,7 @@ func (c *TokensCommand) Add(ctx context.Context, client *authclient.Client) erro pingResponse, err := client.Ping(ctx) if err != nil { - log.Debugf("unable to ping auth client: %s.", err.Error()) + slog.DebugContext(ctx, "unable to ping auth client", "error", err) } if err == nil && pingResponse.GetServerFeatures().Cloud { diff --git a/tool/tctl/common/user_command.go b/tool/tctl/common/user_command.go index 83fe2f7e56643..9325335b9b4aa 100644 --- a/tool/tctl/common/user_command.go +++ b/tool/tctl/common/user_command.go @@ -23,6 +23,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/url" "os" "strconv" @@ -31,7 +32,6 @@ import ( "github.com/alecthomas/kingpin/v2" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/constants" @@ -482,7 +482,11 @@ func (u *UserCommand) Update(ctx context.Context, client *authclient.Client) err for _, roleName := range user.GetRoles() { if _, err := client.GetRole(ctx, roleName); err != nil { - log.Warnf("Error checking role %q when upserting user %q: %v", roleName, user.GetName(), err) + slog.WarnContext(ctx, "Error checking role when upserting user", + "role", roleName, + "user", user.GetName(), + "error", err, + ) } } if _, err := client.UpsertUser(ctx, user); err != nil { diff --git a/tool/tctl/sso/configure/command.go b/tool/tctl/sso/configure/command.go index 77e4a75e6aa9e..18e11c337ccdc 100644 --- a/tool/tctl/sso/configure/command.go +++ b/tool/tctl/sso/configure/command.go @@ -20,11 +20,11 @@ package configure import ( "context" + "log/slog" "os" "github.com/alecthomas/kingpin/v2" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/auth/authclient" @@ -40,7 +40,7 @@ type SSOConfigureCommand struct { Config *servicecfg.Config ConfigureCmd *kingpin.CmdClause AuthCommands []*AuthKindCommand - Logger *logrus.Entry + Logger *slog.Logger } type AuthKindCommand struct { @@ -52,7 +52,7 @@ type AuthKindCommand struct { // argument parsing func (cmd *SSOConfigureCommand) Initialize(app *kingpin.Application, flags *tctlcfg.GlobalCLIFlags, cfg *servicecfg.Config) { cmd.Config = cfg - cmd.Logger = cfg.Log.WithField(teleport.ComponentKey, teleport.ComponentClient) + cmd.Logger = cfg.Logger.With(teleport.ComponentKey, teleport.ComponentClient) sso := app.Command("sso", "A family of commands for configuring and testing auth connectors (SSO).") cmd.ConfigureCmd = sso.Command("configure", "Create auth connector configuration.") @@ -67,10 +67,12 @@ func (cmd *SSOConfigureCommand) TryRun(ctx context.Context, selectedCommand stri // the default tctl logging behavior is to ignore all logs, unless --debug is present. // we want different behavior: log messages as normal, but with compact format (no time, no caller info). if !cmd.Config.Debug { - formatter := logutils.NewDefaultTextFormatter(utils.IsTerminal(os.Stderr)) - formatter.FormatCaller = func() (caller string) { return "" } - cmd.Logger.Logger.SetFormatter(formatter) - cmd.Logger.Logger.SetOutput(os.Stderr) + cmd.Logger = slog.New(logutils.NewSlogTextHandler(os.Stderr, logutils.SlogTextHandlerConfig{ + Level: cmd.Config.GetLogLevel(), + EnableColors: utils.IsTerminal(os.Stderr), + ConfiguredFields: []string{logutils.LevelField, logutils.ComponentField}, + })) + } client, closeFn, err := clientFunc(ctx) if err != nil { diff --git a/tool/tctl/sso/configure/github.go b/tool/tctl/sso/configure/github.go index 8369e7fe93fbd..09287ea6ebf7b 100644 --- a/tool/tctl/sso/configure/github.go +++ b/tool/tctl/sso/configure/github.go @@ -21,11 +21,11 @@ package configure import ( "context" "fmt" + "log/slog" "os" "github.com/alecthomas/kingpin/v2" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/auth/authclient" @@ -83,7 +83,7 @@ Examples: The values for --secret and --id are provided by GitHub. > tctl sso configure gh ... | tctl sso test - + Generate the configuration and immediately test it using "tctl sso test" command.`) preset := &AuthKindCommand{ @@ -104,7 +104,7 @@ func ghRunFunc(ctx context.Context, cmd *SSOConfigureCommand, spec *types.Github } if spec.RedirectURL == "" { - spec.RedirectURL = ResolveCallbackURL(cmd.Logger, clt, "RedirectURL", "https://%v/v1/webapi/github/callback") + spec.RedirectURL = ResolveCallbackURL(ctx, cmd.Logger, clt, "RedirectURL", "https://%v/v1/webapi/github/callback") } connector, err := types.NewGithubConnector(flags.connectorName, *spec) @@ -115,13 +115,13 @@ func ghRunFunc(ctx context.Context, cmd *SSOConfigureCommand, spec *types.Github } // ResolveCallbackURL deals with common pattern of resolving callback URL for IdP to use. -func ResolveCallbackURL(logger *logrus.Entry, clt *authclient.Client, fieldName string, callbackPattern string) string { +func ResolveCallbackURL(ctx context.Context, logger *slog.Logger, clt *authclient.Client, fieldName string, callbackPattern string) string { var callbackURL string - logger.Infof("%v empty, resolving automatically.", fieldName) + logger.InfoContext(ctx, "resolving callback url automatically", "field_name", fieldName) proxies, err := clt.GetProxies() if err != nil { - logger.WithError(err).Error("unable to get proxy list.") + logger.ErrorContext(ctx, "unable to get proxy list", "error", err) } // find first proxy with public addr @@ -135,17 +135,17 @@ func ResolveCallbackURL(logger *logrus.Entry, clt *authclient.Client, fieldName // check if successfully set. if callbackURL == "" { - logger.Warnf("Unable to fill %v automatically, cluster's public address unknown.", fieldName) + logger.WarnContext(ctx, "Unable to resolve callback url automatically, cluster's public address unknown", "field_name", fieldName) } else { - logger.Infof("%v set to %q", fieldName, callbackURL) + logger.InfoContext(ctx, "resolved callback url", "field_name", fieldName, "callback_url", callbackURL) } return callbackURL } -func specCheckRoles(ctx context.Context, logger *logrus.Entry, spec *types.GithubConnectorSpecV3, ignoreMissingRoles bool, clt *authclient.Client) error { +func specCheckRoles(ctx context.Context, logger *slog.Logger, spec *types.GithubConnectorSpecV3, ignoreMissingRoles bool, clt *authclient.Client) error { allRoles, err := clt.GetRoles(ctx) if err != nil { - logger.WithError(err).Warn("Unable to get roles list. Skipping teams-to-roles sanity checks.") + logger.WarnContext(ctx, "Unable to get roles list, skipping teams-to-roles sanity checks", "error", err) return nil } @@ -161,7 +161,10 @@ func specCheckRoles(ctx context.Context, logger *logrus.Entry, spec *types.Githu _, found := roleMap[role] if !found { if ignoreMissingRoles { - logger.Warnf("teams-to-roles references non-existing role: %q. Available roles: %v.", role, roleNames) + logger.WarnContext(ctx, "teams-to-roles references non-existing role", + "non_existent_role", role, + "available_roles", roleNames, + ) } else { return trace.BadParameter("teams-to-roles references non-existing role: %v. Correct the mapping, or add --ignore-missing-roles to ignore this error. Available roles: %v.", role, roleNames) } diff --git a/tool/tctl/sso/configure/oidc.go b/tool/tctl/sso/configure/oidc.go index c783d29192381..b846f87f1d1fc 100644 --- a/tool/tctl/sso/configure/oidc.go +++ b/tool/tctl/sso/configure/oidc.go @@ -19,6 +19,7 @@ package configure import ( "context" "fmt" + "log/slog" "net/url" "os" "strings" @@ -26,7 +27,6 @@ import ( "github.com/alecthomas/kingpin/v2" "github.com/coreos/go-oidc/v3/oidc" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -43,7 +43,7 @@ type oidcPreset struct { description string display string issuerURL string - modifySpec func(logger *logrus.Entry, spec *types.OIDCConnectorSpecV3) error + modifySpec func(ctx context.Context, logger *slog.Logger, spec *types.OIDCConnectorSpecV3) error } type oidcPresetList []oidcPreset @@ -73,7 +73,7 @@ var oidcPresets = oidcPresetList([]oidcPreset{ description: "Google Workspace", display: "Google", issuerURL: "https://accounts.google.com", - modifySpec: func(logger *logrus.Entry, spec *types.OIDCConnectorSpecV3) error { + modifySpec: func(ctx context.Context, logger *slog.Logger, spec *types.OIDCConnectorSpecV3) error { if !strings.HasSuffix(spec.ClientID, ".apps.googleusercontent.com") { return trace.BadParameter(`For Google Workspace the client ID have to use format ".apps.googleusercontent.com", got %q instead. Set full value with --id=... or shorthand with --google-id=...`, spec.ClientID) } @@ -95,14 +95,14 @@ var oidcPresets = oidcPresetList([]oidcPreset{ description: "GitLab", display: "GitLab", issuerURL: "https://gitlab.com", - modifySpec: func(logger *logrus.Entry, spec *types.OIDCConnectorSpecV3) error { + modifySpec: func(ctx context.Context, logger *slog.Logger, spec *types.OIDCConnectorSpecV3) error { switch spec.Prompt { case "none": break case "": spec.Prompt = "none" default: - logger.Warnf("GitLab requires the 'prompt' parameter to be set to 'none', but %q found", spec.Prompt) + logger.WarnContext(ctx, "GitLab 'prompt' parameter was not set to required value of 'none'", "prompt", spec.Prompt) } return nil @@ -114,13 +114,13 @@ var oidcPresets = oidcPresetList([]oidcPreset{ description: "Okta", display: "Okta", issuerURL: "https://oktaice.okta.com", - modifySpec: func(logger *logrus.Entry, spec *types.OIDCConnectorSpecV3) error { + modifySpec: func(ctx context.Context, logger *slog.Logger, spec *types.OIDCConnectorSpecV3) error { if spec.Provider == "" { spec.Provider = teleport.Okta } if spec.Provider != teleport.Okta { - logger.Warnf("Okta requires %q provider, but %q found.", teleport.Okta, spec.Provider) + logger.WarnContext(ctx, "Configured provider was not okta", "provider", spec.Provider) } return nil @@ -186,12 +186,12 @@ Examples: > tctl sso configure oidc -n myauth -r groups,admin,access,editor,auditor -r group,developer,access --secret IDP_SECRET --id CLIENT_ID --issuer-url https://idp.example.com - Generate OIDC auth connector configuration called 'myauth'. Two mappings from OIDC claims to roles are defined: + Generate OIDC auth connector configuration called 'myauth'. Two mappings from OIDC claims to roles are defined: - members of 'admin' group will receive 'access', 'editor' and 'auditor' roles. - members of 'developer' group will receive 'access' role. The values for --secret, --id and --issuer-url are provided by IdP. - + > tctl sso configure oidc --preset okta --scope groups -r groups,okta-admin,access,editor,auditor --secret IDP_SECRET --id CLIENT_ID --issuer-url dev-123456.oktapreview.com Generate OIDC auth connector with Okta preset, enabled 'groups' scope, mapping group 'okta-admin' to roles 'access', 'editor', 'auditor'. @@ -202,7 +202,7 @@ Examples: Generate OIDC auth connector with Google preset. Service account credentials are set to be loaded from /var/lib/teleport/gacc.json with --google-acc-uri. > tctl sso configure oidc ... | tctl sso test - + Generate the configuration and immediately test it using "tctl sso test" command.`, presets)) preset := &AuthKindCommand{ @@ -237,7 +237,7 @@ func oidcRunFunc(ctx context.Context, cmd *SSOConfigureCommand, spec *types.OIDC // automatically switch to 'google' preset if google-specific flags are set. if flags.chosenPreset == "" { if spec.GoogleAdminEmail != "" || spec.GoogleServiceAccount != "" || spec.GoogleServiceAccountURI != "" { - cmd.Logger.Infof("Google-specific flags detected, enabling preset %q.", presetGoogle) + cmd.Logger.InfoContext(ctx, "Google-specific flags detected, enabling google preset") flags.chosenPreset = presetGoogle } } @@ -258,7 +258,7 @@ func oidcRunFunc(ctx context.Context, cmd *SSOConfigureCommand, spec *types.OIDC } if p.modifySpec != nil { - if err := p.modifySpec(cmd.Logger, spec); err != nil { + if err := p.modifySpec(ctx, cmd.Logger, spec); err != nil { return trace.Wrap(err) } } @@ -280,7 +280,7 @@ func oidcRunFunc(ctx context.Context, cmd *SSOConfigureCommand, spec *types.OIDC switch strings.ToLower(parse.Scheme) { case "": spec.IssuerURL = "https://" + spec.IssuerURL - cmd.Logger.Infof("Missing scheme for issuer URL. Defaulting to %q. New value: %q", "https://", spec.IssuerURL) + cmd.Logger.InfoContext(ctx, "Missing scheme for issuer URL, using https", "issuer_url", spec.IssuerURL) case "https": break default: @@ -290,7 +290,7 @@ func oidcRunFunc(ctx context.Context, cmd *SSOConfigureCommand, spec *types.OIDC // verify .well-known/openid-configuration is reachable if _, err := oidc.NewProvider(ctx, spec.IssuerURL); err != nil { if cmd.Config.Debug { - cmd.Logger.WithError(err).Warnf("Failed to load .well-known/openid-configuration for issuer URL %q", spec.IssuerURL) + cmd.Logger.WarnContext(ctx, "Failed to load .well-known/openid-configuration for issuer URL", "issuer_url", spec.IssuerURL, "error", err) } return trace.BadParameter("Failed to load .well-known/openid-configuration for issuer URL %q. Check expected --issuer-url against IdP configuration. Rerun with --debug to see the error.", spec.IssuerURL) } @@ -301,7 +301,7 @@ func oidcRunFunc(ctx context.Context, cmd *SSOConfigureCommand, spec *types.OIDC allRoles, err := clt.GetRoles(ctx) if err != nil { - cmd.Logger.WithError(err).Warn("unable to get roles list. Skipping attributes_to_roles sanity checks.") + cmd.Logger.WarnContext(ctx, "unable to get roles list, skipping attributes_to_roles sanity checks", "error", err) } else { roleMap := map[string]bool{} var roleNames []string @@ -315,7 +315,7 @@ func oidcRunFunc(ctx context.Context, cmd *SSOConfigureCommand, spec *types.OIDC _, found := roleMap[role] if !found { if flags.ignoreMissingRoles { - cmd.Logger.Warnf("claims-to-roles references non-existing role: %q. Available roles: %v.", role, roleNames) + cmd.Logger.WarnContext(ctx, "claims-to-roles references non-existing role", "role", role, "available_roles", roleNames) } else { return trace.BadParameter("claims-to-roles references non-existing role: %v. Correct the mapping, or add --ignore-missing-roles to ignore this error. Available roles: %v.", role, roleNames) } @@ -325,7 +325,7 @@ func oidcRunFunc(ctx context.Context, cmd *SSOConfigureCommand, spec *types.OIDC } if len(spec.RedirectURLs) == 0 { - spec.RedirectURLs = []string{ResolveCallbackURL(cmd.Logger, clt, "RedirectURLs", "https://%v/v1/webapi/oidc/callback")} + spec.RedirectURLs = []string{ResolveCallbackURL(ctx, cmd.Logger, clt, "RedirectURLs", "https://%v/v1/webapi/oidc/callback")} } connector, err := types.NewOIDCConnector(flags.connectorName, *spec) diff --git a/tool/tctl/sso/configure/saml.go b/tool/tctl/sso/configure/saml.go index fa9fa26b8e044..ee48f76885859 100644 --- a/tool/tctl/sso/configure/saml.go +++ b/tool/tctl/sso/configure/saml.go @@ -19,12 +19,12 @@ package configure import ( "context" "fmt" + "log/slog" "net/url" "os" "github.com/alecthomas/kingpin/v2" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" @@ -132,9 +132,9 @@ Presets: Examples: - > tctl sso configure saml -n myauth -r groups,admin,access,editor,auditor -r groups,developer,access -e entity-desc.xml + > tctl sso configure saml -n myauth -r groups,admin,access,editor,auditor -r groups,developer,access -e entity-desc.xml - Generate SAML auth connector configuration called 'myauth'. Two mappings from SAML attributes to roles are defined: + Generate SAML auth connector configuration called 'myauth'. Two mappings from SAML attributes to roles are defined: - members of 'admin' group will receive 'access', 'editor' and 'auditor' roles. - members of 'developer' group will receive 'access' role. The IdP metadata will be read from 'entity-desc.xml' file. @@ -147,7 +147,7 @@ Examples: > tctl sso configure saml -p okta -r groups,developer,access -e entity-desc.xml | tctl sso test - + Generate the configuration and immediately test it using "tctl sso test" command. `, presets)) @@ -197,7 +197,7 @@ func samlRunFunc( allRoles, err := clt.GetRoles(ctx) if err != nil { - cmd.Logger.WithError(err).Warn("unable to get roles list. Skipping attributes-to-roles sanity checks.") + cmd.Logger.WarnContext(ctx, "unable to get roles list, skipping attributes-to-roles sanity checks", "error", err) } else { roleMap := map[string]bool{} var roleNames []string @@ -211,7 +211,7 @@ func samlRunFunc( _, found := roleMap[role] if !found { if flags.ignoreMissingRoles { - cmd.Logger.Warnf("attributes-to-roles references non-existing role: %q. Available roles: %v.", role, roleNames) + cmd.Logger.WarnContext(ctx, "attributes-to-roles references non-existing role", "role", role, "available_roles", roleNames) } else { return trace.BadParameter("attributes-to-roles references non-existing role: %v. Correct the mapping, or add --ignore-missing-roles to ignore this error. Available roles: %v.", role, roleNames) } @@ -241,12 +241,12 @@ func samlRunFunc( } if spec.AssertionConsumerService == "" { - spec.AssertionConsumerService = ResolveCallbackURL(cmd.Logger, clt, "ACS", "https://%v/v1/webapi/saml/acs/"+flags.connectorName) + spec.AssertionConsumerService = ResolveCallbackURL(ctx, cmd.Logger, clt, "ACS", "https://%v/v1/webapi/saml/acs/"+flags.connectorName) } // figure out the actual meaning of entityDescriptorFlag. Can be: URL, file, plain XML. if flags.entityDescriptorFlag != "" { - if err = processEntityDescriptorFlag(spec, flags.entityDescriptorFlag, cmd.Logger); err != nil { + if err = processEntityDescriptorFlag(ctx, spec, flags.entityDescriptorFlag, cmd.Logger); err != nil { return trace.Wrap(err) } } @@ -278,20 +278,20 @@ func keyPairFromFlags(flags types.AsymmetricKeyPair) *types.AsymmetricKeyPair { return &flags } -func processEntityDescriptorFlag(spec *types.SAMLConnectorSpecV2, entityDescriptorFlag string, log *logrus.Entry) error { +func processEntityDescriptorFlag(ctx context.Context, spec *types.SAMLConnectorSpecV2, entityDescriptorFlag string, log *slog.Logger) error { var err error // case: URL var parsedURL *url.URL if parsedURL, err = url.Parse(entityDescriptorFlag); err == nil && parsedURL.Scheme != "" { spec.EntityDescriptorURL = entityDescriptorFlag - log.Infof("Entity descriptor looks like URL, entity_descriptor_url set to %q.", spec.EntityDescriptorURL) + log.InfoContext(ctx, "Using entity descriptor URL", "entity_descriptor_url", spec.EntityDescriptorURL) return nil } if parsedURL.Scheme == "" { - log.Infof("Cannot parse entity descriptor as URL, missing scheme: %q.", entityDescriptorFlag) + log.InfoContext(ctx, "entity descriptor URL missing scheme", "entity_descriptor", entityDescriptorFlag) } else { - log.WithError(err).Infof("Cannot parse entity descriptor as URL: %q.", entityDescriptorFlag) + log.InfoContext(ctx, "invalid entity descriptor URL", "entity_descriptor", entityDescriptorFlag, "error", err) } // case: file @@ -301,18 +301,18 @@ func processEntityDescriptorFlag(spec *types.SAMLConnectorSpecV2, entityDescript return trace.WrapWithMessage(err, "Validating entity descriptor from file %q failed. Check that XML is valid or download the file directly.", entityDescriptorFlag) } spec.EntityDescriptor = string(bytes) - log.Infof("Entity descriptor read from file %q.", entityDescriptorFlag) + log.InfoContext(ctx, "Entity descriptor read from file", "file", entityDescriptorFlag) return nil } - log.WithError(err).Infof("Cannot read entity descriptor from file: %q.", entityDescriptorFlag) + log.InfoContext(ctx, "Cannot read entity descriptor from file", "file", entityDescriptorFlag, "error", err) // case: verbatim XML if err = validateEntityDescriptor([]byte(entityDescriptorFlag), spec.Cert); err == nil { spec.EntityDescriptor = entityDescriptorFlag - log.Infof("Entity descriptor is valid XML, EntityDescriptor set to flag value.") + log.InfoContext(ctx, "Entity descriptor is valid XML, EntityDescriptor set to flag value") return nil } - log.WithError(trace.Unwrap(err)).Infof("Cannot parse entity descriptor as verbatim XML: %q.", entityDescriptorFlag) + log.InfoContext(ctx, "Cannot parse entity descriptor as verbatim XML", "entity_descriptor", entityDescriptorFlag, "error", err) return trace.Errorf("failed to process -e/--entity-descriptor flag. Valid values: XML file, URL, verbatim XML") } diff --git a/tool/tctl/sso/configure/saml_test.go b/tool/tctl/sso/configure/saml_test.go index ec9e6be0ca9f3..5187efbe4d5bd 100644 --- a/tool/tctl/sso/configure/saml_test.go +++ b/tool/tctl/sso/configure/saml_test.go @@ -17,15 +17,16 @@ package configure import ( - "io" + "context" + "log/slog" "os" "path/filepath" "testing" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" + logutils "github.com/gravitational/teleport/lib/utils/log" ) func Test_processEntityDescriptorFlag(t *testing.T) { @@ -59,7 +60,6 @@ V115UGOwvjOOxmOFbYBn865SHgMndFtr tests := []struct { name string entityDescriptor string - log *logrus.Entry wantErr bool want types.SAMLConnectorSpecV2 @@ -99,13 +99,10 @@ V115UGOwvjOOxmOFbYBn865SHgMndFtr }, } - log := logrus.New() - log.Out = io.Discard - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { spec := types.SAMLConnectorSpecV2{} - err := processEntityDescriptorFlag(&spec, tt.entityDescriptor, logrus.NewEntry(log)) + err := processEntityDescriptorFlag(context.Background(), &spec, tt.entityDescriptor, slog.New(logutils.DiscardHandler{})) if tt.wantErr { require.Error(t, err) } else { diff --git a/tool/teleport/common/sftp.go b/tool/teleport/common/sftp.go index 4d3e46f4decf7..2f81da0466c15 100644 --- a/tool/teleport/common/sftp.go +++ b/tool/teleport/common/sftp.go @@ -21,11 +21,13 @@ package common import ( "bufio" "bytes" + "context" "encoding/json" "errors" "fmt" "io" "io/fs" + "log/slog" "os" "os/user" "path" @@ -37,7 +39,6 @@ import ( "github.com/gogo/protobuf/jsonpb" "github.com/gravitational/trace" "github.com/pkg/sftp" - log "github.com/sirupsen/logrus" "golang.org/x/sys/unix" "github.com/gravitational/teleport" @@ -45,7 +46,6 @@ import ( "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/srv" - "github.com/gravitational/teleport/lib/utils" ) const ( @@ -89,7 +89,7 @@ type allowedOps struct { // sftpHandler provides handlers for a SFTP server. type sftpHandler struct { - logger *log.Entry + logger *slog.Logger allowed *allowedOps // mtx protects files @@ -121,7 +121,7 @@ func (t *trackedFile) Close() error { return t.file.Close() } -func newSFTPHandler(logger *log.Entry, req *srv.FileTransferRequest, events chan<- apievents.AuditEvent) (*sftpHandler, error) { +func newSFTPHandler(logger *slog.Logger, req *srv.FileTransferRequest, events chan<- apievents.AuditEvent) (*sftpHandler, error) { var allowed *allowedOps if req != nil { allowed = &allowedOps{ @@ -460,6 +460,7 @@ func (s *sftpHandler) Lstat(req *sftp.Request) (sftp.ListerAt, error) { } func (s *sftpHandler) sendSFTPEvent(req *sftp.Request, reqErr error) { + ctx := context.TODO() event := &apievents.SFTP{ Metadata: apievents.Metadata{ Type: events.SFTPEvent, @@ -532,13 +533,13 @@ func (s *sftpHandler) sendSFTPEvent(req *sftp.Request, reqErr error) { } event.Action = apievents.SFTPAction_LINK default: - s.logger.Warnf("Unknown SFTP request %q", req.Method) + s.logger.WarnContext(ctx, "Unknown SFTP request", "request", req.Method) return } wd, err := os.Getwd() if err != nil { - s.logger.WithError(err).Warn("Failed to get working dir.") + s.logger.WarnContext(ctx, "Failed to get working dir", "error", err) } event.WorkingDirectory = wd @@ -569,7 +570,7 @@ func (s *sftpHandler) sendSFTPEvent(req *sftp.Request, reqErr error) { } } if reqErr != nil { - s.logger.Debugf("%s: %v", req.Method, reqErr) + s.logger.DebugContext(ctx, "failed handling SFTP request", "request", req.Method, "error", reqErr) // If possible, strip the filename from the error message. The // path will be included in audit events already, no need to // make the error message longer than it needs to be. @@ -605,8 +606,7 @@ func onSFTP() error { defer auditFile.Close() // Ensure the parent process will receive log messages from us - l := utils.NewLogger() - logger := l.WithField(teleport.ComponentKey, teleport.ComponentSubsystemSFTP) + logger := slog.With(teleport.ComponentKey, teleport.ComponentSubsystemSFTP) currentUser, err := user.Current() if err != nil { @@ -653,6 +653,7 @@ func onSFTP() error { } sftpSrv := sftp.NewRequestServer(ch, handler, sftp.WithStartDirectory(currentUser.HomeDir)) + ctx := context.TODO() // Start a goroutine to marshal and send audit events to the parent // process to avoid blocking the SFTP connection on event handling done := make(chan struct{}) @@ -662,13 +663,13 @@ func onSFTP() error { for event := range sftpEvents { oneOfEvent, err := apievents.ToOneOf(event) if err != nil { - logger.WithError(err).Warn("Failed to convert SFTP event to OneOf.") + logger.WarnContext(ctx, "Failed to convert SFTP event to OneOf", "error", err) continue } buf.Reset() if err := m.Marshal(&buf, oneOfEvent); err != nil { - logger.WithError(err).Warn("Failed to marshal SFTP event.") + logger.WarnContext(ctx, "Failed to marshal SFTP event", "error", err) continue } @@ -677,7 +678,7 @@ func onSFTP() error { buf.WriteByte(0x0) _, err = io.Copy(auditFile, &buf) if err != nil { - logger.WithError(err).Warn("Failed to send SFTP event to parent.") + logger.WarnContext(ctx, "Failed to send SFTP event to parent", "error", err) } } diff --git a/tool/teleport/common/teleport_test.go b/tool/teleport/common/teleport_test.go index 62f77dc4eccca..7b1292f1e625c 100644 --- a/tool/teleport/common/teleport_test.go +++ b/tool/teleport/common/teleport_test.go @@ -20,13 +20,14 @@ package common import ( "bytes" + "context" "fmt" + "log/slog" "os" "path/filepath" "testing" "github.com/gravitational/trace" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" @@ -84,7 +85,7 @@ func TestTeleportMain(t *testing.T) { require.True(t, conf.SSH.Enabled) require.True(t, conf.Proxy.Enabled) require.Equal(t, os.Stdout, conf.Console) - require.Equal(t, log.ErrorLevel, log.GetLevel()) + require.True(t, slog.Default().Handler().Enabled(context.Background(), slog.LevelError)) }) t.Run("RolesFlag", func(t *testing.T) { @@ -125,7 +126,7 @@ func TestTeleportMain(t *testing.T) { require.True(t, conf.SSH.Enabled) require.False(t, conf.Auth.Enabled) require.False(t, conf.Proxy.Enabled) - require.Equal(t, log.DebugLevel, conf.Log.GetLevel()) + require.True(t, slog.Default().Handler().Enabled(context.Background(), slog.LevelDebug)) require.Equal(t, "hvostongo.example.org", conf.Hostname) token, err := conf.Token() diff --git a/tool/tsh/common/aliases.go b/tool/tsh/common/aliases.go index 946842bbb28ba..a6cce4e892287 100644 --- a/tool/tsh/common/aliases.go +++ b/tool/tsh/common/aliases.go @@ -29,6 +29,8 @@ import ( "github.com/google/shlex" "github.com/gravitational/trace" + + logutils "github.com/gravitational/teleport/lib/utils/log" ) // tshAliasEnvKey is an env variable storing the aliases that, so far, has been expanded, and should not be expanded again. @@ -205,7 +207,7 @@ func (ar *aliasRunner) runAliasCommand(ctx context.Context, currentExecPath, exe // if execPath is our path, skip re-execution and run main directly instead. // this makes for better error messages in case of failures. if execPath == currentExecPath { - log.Debugf("Self re-exec command: tsh %v.", arguments) + logger.DebugContext(ctx, "tsh re-exec command", "arguments", arguments) return trace.Wrap(ar.runTshMain(ctx, arguments)) } @@ -214,7 +216,7 @@ func (ar *aliasRunner) runAliasCommand(ctx context.Context, currentExecPath, exe cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr - log.Debugf("Running external command: %v", cmd) + logger.DebugContext(ctx, "Running external command", "command", logutils.StringerAttr(cmd)) err = ar.runExternalCommand(cmd) if err == nil { return nil diff --git a/tool/tsh/common/app.go b/tool/tsh/common/app.go index be4b56cadf74d..e01935cb05212 100644 --- a/tool/tsh/common/app.go +++ b/tool/tsh/common/app.go @@ -40,6 +40,7 @@ import ( "github.com/gravitational/teleport/lib/client" "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) // onAppLogin implements "tsh apps login" command. @@ -160,7 +161,7 @@ func printAppCommand(cf *CLIConf, tc *client.TeleportClient, app types.Applicati cmd.Stderr = cf.Stderr() cmd.Stdout = output - log.Debugf("Running automatic az login: %v", cmd.String()) + logger.DebugContext(cf.Context, "Running automatic az login", "command", logutils.StringerAttr(cmd)) err := cf.RunCommand(cmd) if err != nil { return trace.Wrap(err, "failed to automatically login with `az login` using identity %q; run with --debug for details", routeToApp.AzureIdentity) @@ -334,7 +335,9 @@ func onAppLogout(cf *CLIConf) error { // remove generated local files for the provided app. err := utils.RemoveFileIfExist(profile.AppLocalCAPath(tc.SiteName, app.Name)) if err != nil { - log.WithError(err).Warnf("Failed to remove %v", profile.AppLocalCAPath(tc.SiteName, app.Name)) + logger.WarnContext(cf.Context, "Failed to clean up app session", + "error", err, + "profile", profile.AppLocalCAPath(tc.SiteName, app.Name)) } } @@ -592,7 +595,7 @@ func (a *appInfo) pickCloudAppLogin(cf *CLIConf, logins []string) error { if err != nil { return trace.Wrap(err) } - log.Debugf("Azure identity is %q", azureIdentity) + logger.DebugContext(cf.Context, "Retrieved azure identity", "azure_identity", azureIdentity) a.AzureIdentity = azureIdentity case a.app.IsGCP(): @@ -600,7 +603,7 @@ func (a *appInfo) pickCloudAppLogin(cf *CLIConf, logins []string) error { if err != nil { return trace.Wrap(err) } - log.Debugf("GCP service account is %q", gcpServiceAccount) + logger.DebugContext(cf.Context, "Retrieved GCP service account", "service_account", gcpServiceAccount) a.GCPServiceAccount = gcpServiceAccount } @@ -660,7 +663,7 @@ func getApp(ctx context.Context, clt apiclient.GetResourcesClient, name string) appServer, ok := res.Resources[0].ResourceWithLabels.(types.AppServer) if !ok { - log.Warnf("expected types.AppServer but received unexpected type %T", res.Resources[0].ResourceWithLabels) + logger.WarnContext(ctx, "expected types.AppServer but received unexpected type", "resource_type", logutils.TypeAttr(res.Resources[0].ResourceWithLabels)) return nil, nil, trace.NotFound("app %q not found, use `tsh apps ls` to see registered apps", name) } diff --git a/tool/tsh/common/app_aws.go b/tool/tsh/common/app_aws.go index c09f1a69039db..74d575fb3ab0f 100644 --- a/tool/tsh/common/app_aws.go +++ b/tool/tsh/common/app_aws.go @@ -39,6 +39,7 @@ import ( "github.com/gravitational/teleport/lib/srv/alpnproxy" "github.com/gravitational/teleport/lib/tlsca" awsutils "github.com/gravitational/teleport/lib/utils/aws" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -58,7 +59,7 @@ func onAWS(cf *CLIConf) error { defer func() { if err := awsApp.Close(); err != nil { - log.WithError(err).Error("Failed to close AWS app.") + logger.ErrorContext(cf.Context, "Failed to close AWS app", "error", err) } }() @@ -207,7 +208,7 @@ func (a *awsApp) RunCommand(cmd *exec.Cmd) error { return trace.Wrap(err) } - log.Debugf("Running command: %q", cmd) + logger.DebugContext(a.cf.Context, "Running AWS command", "command", logutils.StringerAttr(cmd)) cmd.Stdout = a.cf.Stdout() cmd.Stderr = a.cf.Stderr() @@ -246,7 +247,7 @@ func getARNFromFlags(cf *CLIConf, app types.Application, logins []string) (strin if cf.AWSRole == "" { if len(roles) == 1 { - log.Infof("AWS Role %v is selected by default as it is the only role configured for this AWS app.", roles[0].Display) + logger.InfoContext(cf.Context, "AWS Role is selected by default as it is the only role configured for this AWS app", "role", roles[0].Display) return roles[0].ARN, nil } @@ -289,13 +290,13 @@ func getARNFromFlags(cf *CLIConf, app types.Application, logins []string) (strin func getARNFromRoles(cf *CLIConf, roleGetter services.CurrentUserRoleGetter, profile *client.ProfileStatus, siteName string, app types.Application) []string { accessChecker, err := services.NewAccessCheckerForRemoteCluster(cf.Context, profile.AccessInfo(), siteName, roleGetter) if err != nil { - log.WithError(err).Debugf("Failed to fetch user roles.") + logger.DebugContext(cf.Context, "Failed to fetch user roles", "error", err) return profile.AWSRolesARNs } logins, err := accessChecker.GetAllowedLoginsForResource(app) if err != nil { - log.WithError(err).Debugf("Failed to fetch app logins.") + logger.DebugContext(cf.Context, "Failed to fetch app logins", "error", err) return profile.AWSRolesARNs } diff --git a/tool/tsh/common/app_azure.go b/tool/tsh/common/app_azure.go index d61ea04a8d072..74a70e770115d 100644 --- a/tool/tsh/common/app_azure.go +++ b/tool/tsh/common/app_azure.go @@ -39,6 +39,7 @@ import ( "github.com/gravitational/teleport/lib/srv/alpnproxy" "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -58,7 +59,7 @@ func onAzure(cf *CLIConf) error { defer func() { if err := app.Close(); err != nil { - log.WithError(err).Error("Failed to close Azure app.") + logger.ErrorContext(cf.Context, "Failed to close Azure app", "error", err) } }() @@ -149,7 +150,7 @@ func (a *azureApp) StartLocalProxies(ctx context.Context) error { if ok { azureMiddleware.SetPrivateKey(signer) } else { - log.Warn("Provided tls.Certificate has no valid private key.") + logger.WarnContext(ctx, "Provided tls.Certificate has no valid private key") } }), ) @@ -190,7 +191,7 @@ func (a *azureApp) RunCommand(cmd *exec.Cmd) error { return trace.Wrap(err) } - log.Debugf("Running command: %q", cmd) + logger.DebugContext(a.cf.Context, "Running azure command", "command", logutils.StringerAttr(cmd)) cmd.Stdout = a.cf.Stdout() cmd.Stderr = a.cf.Stderr() @@ -236,7 +237,7 @@ func getAzureIdentityFromFlags(cf *CLIConf, profile *client.ProfileStatus) (stri // if flag is missing, try to find singleton identity; failing that, print available options. if reqIdentity == "" { if len(identities) == 1 { - log.Infof("Azure identity %v is selected by default as it is the only identity available for this Azure app.", identities[0]) + logger.InfoContext(cf.Context, "Azure identity is selected by default as it is the only identity available for this Azure app", "identity", identities[0]) return identities[0], nil } diff --git a/tool/tsh/common/app_gcp.go b/tool/tsh/common/app_gcp.go index aa30c445085d4..3e3cea00575b3 100644 --- a/tool/tsh/common/app_gcp.go +++ b/tool/tsh/common/app_gcp.go @@ -39,6 +39,7 @@ import ( "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" "github.com/gravitational/teleport/lib/utils/gcp" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -59,7 +60,7 @@ func onGcloud(cf *CLIConf) error { defer func() { if err := app.Close(); err != nil { - log.WithError(err).Error("Failed to close GCP app.") + logger.ErrorContext(cf.Context, "Failed to close GCP app", "error", err) } }() @@ -82,7 +83,7 @@ func onGsutil(cf *CLIConf) error { defer func() { if err := app.Close(); err != nil { - log.WithError(err).Error("Failed to close GCP app.") + logger.ErrorContext(cf.Context, "Failed to close GCP app", "error", err) } }() @@ -269,7 +270,7 @@ func (a *gcpApp) RunCommand(cmd *exec.Cmd) error { return trace.Wrap(err) } - log.Debugf("Running command: %q", cmd) + logger.DebugContext(a.cf.Context, "Running gcp command", "command", logutils.StringerAttr(cmd)) cmd.Stdout = a.cf.Stdout() cmd.Stderr = a.cf.Stderr() @@ -326,7 +327,7 @@ func getGCPServiceAccountFromFlags(cf *CLIConf, profile *client.ProfileStatus) ( // if flag is missing, try to find singleton service account; failing that, print available options. if reqAccount == "" { if len(accounts) == 1 { - log.Infof("GCP service account %v is selected by default as it is the only one available for this GCP app.", accounts[0]) + logger.InfoContext(cf.Context, "GCP service account is selected by default as it is the only one available for this GCP app", "service_account", accounts[0]) return validate(accounts[0]) } diff --git a/tool/tsh/common/app_local_proxy.go b/tool/tsh/common/app_local_proxy.go index d613e2750012a..34dd16ec2ea97 100644 --- a/tool/tsh/common/app_local_proxy.go +++ b/tool/tsh/common/app_local_proxy.go @@ -209,7 +209,7 @@ func (a *localProxyApp) startLocalALPNProxy(ctx context.Context, portMapping cli go func() { if err = a.localALPNProxy.Start(ctx); err != nil { - log.WithError(err).Errorf("Failed to start local ALPN proxy.") + logger.ErrorContext(ctx, "Failed to start local ALPN proxy", "error", err) } }() return nil @@ -252,7 +252,7 @@ func (a *localProxyApp) startLocalForwardProxy(ctx context.Context, port int, fo go func() { if err := a.localForwardProxy.Start(); err != nil { - log.WithError(err).Errorf("Failed to start local forward proxy.") + logger.ErrorContext(ctx, "Failed to start local forward proxy", "error", err) } }() return nil diff --git a/tool/tsh/common/db.go b/tool/tsh/common/db.go index 10d039d83d62d..bc888ba9c98c0 100644 --- a/tool/tsh/common/db.go +++ b/tool/tsh/common/db.go @@ -55,6 +55,7 @@ import ( "github.com/gravitational/teleport/lib/srv/db/common/role" "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" + logutils "github.com/gravitational/teleport/lib/utils/log" ) // onListDatabases implements "tsh db ls" command. @@ -90,7 +91,7 @@ func onListDatabases(cf *CLIConf) error { accessChecker, err := services.NewAccessCheckerForRemoteCluster(cf.Context, profile.AccessInfo(), tc.SiteName, clusterClient.AuthClient) if err != nil { - log.Debugf("Failed to fetch user roles: %v.", err) + logger.DebugContext(cf.Context, "Failed to fetch user roles", "error", err) } activeDatabases, err := profile.DatabasesForCluster(tc.SiteName) @@ -170,10 +171,10 @@ func listDatabasesAllClusters(cf *CLIConf) error { oteltrace.WithAttributes(attribute.String("cluster", cluster.name))) defer span.End() - logger := log.WithField("cluster", cluster.name) + logger := logger.With("cluster", cluster.name) databases, err := apiclient.GetAllResources[types.DatabaseServer](ctx, cluster.auth, &cluster.req) if err != nil { - logger.Errorf("Failed to get databases: %v.", err) + logger.ErrorContext(ctx, "Failed to get databases", "error", err) mu.Lock() errors = append(errors, trace.ConnectionProblem(err, "failed to list databases for cluster %s: %v", cluster.name, err)) @@ -183,7 +184,7 @@ func listDatabasesAllClusters(cf *CLIConf) error { accessChecker, err := services.NewAccessCheckerForRemoteCluster(ctx, cluster.profile.AccessInfo(), cluster.name, cluster.auth) if err != nil { - log.Debugf("Failed to fetch user roles: %v.", err) + logger.DebugContext(ctx, "Failed to fetch user roles", "error", err) } localDBListings := make(databaseListings, 0, len(databases)) @@ -296,7 +297,10 @@ func protocolSupportsInteractiveMode(dbProtocol string) bool { } func databaseLogin(cf *CLIConf, tc *client.TeleportClient, dbInfo *databaseInfo) error { - log.Debugf("Fetching database access certificate for %s on cluster %v.", dbInfo.RouteToDatabase, tc.SiteName) + logger.DebugContext(cf.Context, "Fetching database access certificate", + "database", dbInfo.RouteToDatabase, + "cluster", tc.SiteName, + ) profile, err := tc.ProfileStatus() if err != nil { @@ -307,7 +311,7 @@ func databaseLogin(cf *CLIConf, tc *client.TeleportClient, dbInfo *databaseInfo) // Identity files themselves act as the database credentials (if any), so // don't bother fetching new certs. if profile.IsVirtual { - log.Info("Note: already logged in due to an identity file (`-i ...`); will only update database config files.") + logger.InfoContext(cf.Context, "Note: already logged in due to an identity file (`-i ...`); will only update database config files") } else { if err = client.RetryWithRelogin(cf.Context, tc, func() error { keyRing, err = tc.IssueUserCertsWithMFA(cf.Context, client.ReissueParams{ @@ -378,7 +382,7 @@ func onDatabaseLogout(cf *CLIConf) error { } if profile.IsVirtual { - log.Info("Note: an identity file is in use (`-i ...`); will only update database config files.") + logger.InfoContext(cf.Context, "Note: an identity file is in use (`-i ...`); will only update database config files.") } for _, db := range databases { @@ -611,9 +615,9 @@ func maybeStartLocalProxy(ctx context.Context, cf *CLIConf, return nil, nil } if requires.tunnel { - log.Debugf("Starting local proxy tunnel because: %v", strings.Join(requires.tunnelReasons, ", ")) + logger.DebugContext(ctx, "Starting local proxy tunnel", "reasons", requires.tunnelReasons) } else { - log.Debugf("Starting local proxy because: %v", strings.Join(requires.localProxyReasons, ", ")) + logger.DebugContext(ctx, "Starting local proxy", "reasons", requires.localProxyReasons) } listener, err := createLocalProxyListener("localhost:0", dbInfo.RouteToDatabase, profile) @@ -640,7 +644,7 @@ func maybeStartLocalProxy(ctx context.Context, cf *CLIConf, go func() { defer listener.Close() if err := lp.Start(ctx); err != nil { - log.WithError(err).Errorf("Failed to start local proxy") + logger.ErrorContext(cf.Context, "Failed to start local proxy", "error", err) } }() @@ -802,7 +806,7 @@ func onDatabaseConnect(cf *CLIConf) error { if err != nil { return trace.Wrap(err) } - log.Debug(cmd.String()) + logger.DebugContext(ctx, "executing command", "command", logutils.StringerAttr(cmd)) cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin @@ -1001,7 +1005,7 @@ func (d *databaseInfo) checkAndSetDefaults(cf *CLIConf, tc *client.TeleportClien if err != nil { return trace.Wrap(err) } - log.Debugf("Defaulting to the allowed database user %q\n", dbUser) + logger.DebugContext(cf.Context, "Defaulting to the allowed database user", "database_user", dbUser) d.Username = dbUser } if needDBName { @@ -1009,7 +1013,7 @@ func (d *databaseInfo) checkAndSetDefaults(cf *CLIConf, tc *client.TeleportClien if err != nil { return trace.Wrap(err) } - log.Debugf("Defaulting to the allowed database name %q\n", dbName) + logger.DebugContext(cf.Context, "Defaulting to the allowed database name", "database_name", dbName) d.Database = dbName } return nil @@ -1062,7 +1066,7 @@ func chooseOneDatabase(cf *CLIConf, databases types.Databases) (types.Database, // that database over any others. for _, db := range databases { if db.GetName() == selectors.name { - log.Debugf("Selected database %q by exact name match", db.GetName()) + logger.DebugContext(cf.Context, "Selected database by exact name match", "database", db.GetName()) return db, nil } } @@ -1072,11 +1076,11 @@ func chooseOneDatabase(cf *CLIConf, databases types.Databases) (types.Database, for _, db := range dbs { names = append(names, db.GetName()) } - log.Debugf("Choosing amongst databases (%v) by discovered name", names) + logger.DebugContext(cf.Context, "Choosing amongst databases by discovered name", "databases", names) databases = dbs } if len(databases) == 1 { - log.Debugf("Selected database %q", databases[0].GetName()) + logger.DebugContext(cf.Context, "Selected database", "database", databases[0].GetName()) return databases[0], nil } @@ -1124,7 +1128,7 @@ func getDatabaseServers(ctx context.Context, tc *client.TeleportClient, name str var err error predicate := makePredicateConjunction(matchName, tc.PredicateExpression) - log.Debugf("Listing databases with predicate (%v) and labels %v", predicate, tc.Labels) + logger.DebugContext(ctx, "Listing databases with predicate and labels", "predicate", predicate, "labels", tc.Labels) databases, err = tc.ListDatabaseServersWithFilters(ctx, &proto.ListResourcesRequest{ Namespace: tc.Namespace, @@ -1152,7 +1156,7 @@ func getDatabaseByNameOrDiscoveredName(cf *CLIConf, tc *client.TeleportClient, a for _, db := range activeDBs { names = append(names, db.GetName()) } - log.Debugf("Choosing a database amongst active databases (%v)", names) + logger.DebugContext(cf.Context, "Choosing a database amongst active databases", "databases", names) // preferentially choose from active databases if any of them match. return chooseOneDatabase(cf, activeDBs) } @@ -1187,7 +1191,7 @@ func listDatabasesWithPredicate(ctx context.Context, tc *client.TeleportClient, err := client.RetryWithRelogin(ctx, tc, func() error { var err error predicate := makePredicateConjunction(predicate, tc.PredicateExpression) - log.Debugf("Listing databases with predicate (%v) and labels %v", predicate, tc.Labels) + logger.DebugContext(ctx, "Listing databases with predicate and labels", "predicate", predicate, "labels", tc.Labels) databases, err = tc.ListDatabases(ctx, &proto.ListResourcesRequest{ Namespace: tc.Namespace, ResourceType: types.KindDatabaseServer, @@ -1421,16 +1425,25 @@ func dbInfoHasChanged(cf *CLIConf, certPath string) (bool, error) { } if cf.DatabaseUser != "" && cf.DatabaseUser != identity.RouteToDatabase.Username { - log.Debugf("Will reissue database certificate for user %s (was %s)", cf.DatabaseUser, identity.RouteToDatabase.Username) + logger.DebugContext(cf.Context, "Will reissue database certificate for user", + "current_user", cf.DatabaseUser, + "previous_user", identity.RouteToDatabase.Username, + ) return true, nil } if cf.DatabaseName != "" && cf.DatabaseName != identity.RouteToDatabase.Database { - log.Debugf("Will reissue database certificate for database name %s (was %s)", cf.DatabaseName, identity.RouteToDatabase.Database) + logger.DebugContext(cf.Context, "Will reissue database certificate for database name", + "current_database", cf.DatabaseName, + "previous_database", identity.RouteToDatabase.Database, + ) return true, nil } if !apiutils.ContainSameUniqueElements(dbRoles, identity.RouteToDatabase.Roles) { - log.Debugf("Will reissue database certificate for database roles %v (was %v)", dbRoles, identity.RouteToDatabase.Roles) + logger.DebugContext(cf.Context, "Will reissue database certificate for database roles", + "current_roles", dbRoles, + "previous_roles", identity.RouteToDatabase.Roles, + ) return true, nil } return false, nil @@ -1502,14 +1515,14 @@ func maybePickActiveDatabase(cf *CLIConf, activeRoutes []tlsca.RouteToDatabase) case 0: return nil, trace.NotFound(formatDBNotLoggedIn(cf.SiteName, selectors)) case 1: - log.Debugf("Auto-selecting the only active database %q", activeRoutes[0].ServiceName) + logger.DebugContext(cf.Context, "Auto-selecting the only active database", "database", activeRoutes[0].ServiceName) return &activeRoutes[0], nil default: return nil, trace.BadParameter(formatChooseActiveDB(activeRoutes)) } } if route, ok := findActiveDatabase(selectors.name, activeRoutes); ok { - log.Debugf("Selected active database %q by name", route.ServiceName) + logger.DebugContext(cf.Context, "Selected active database by name", "database", route.ServiceName) return &route, nil } } @@ -1733,8 +1746,10 @@ func getDBConnectLocalProxyRequirement(ctx context.Context, tc *client.TeleportC // Call API and check if a user needs to use MFA to connect to the database. mfaRequired, err := isMFADatabaseAccessRequired(ctx, tc, route) if err != nil { - log.WithError(err).Debugf("error getting MFA requirement for database %v", - route.ServiceName) + logger.DebugContext(ctx, "error getting MFA requirement for database", + "database", route.ServiceName, + "error", err, + ) } else if mfaRequired { // When MFA is required, we should require a local proxy tunnel, // because the local proxy tunnel can hold database MFA certs in-memory diff --git a/tool/tsh/common/db_print.go b/tool/tsh/common/db_print.go index 68bd9012688a1..a840f92bdcdba 100644 --- a/tool/tsh/common/db_print.go +++ b/tool/tsh/common/db_print.go @@ -19,6 +19,7 @@ package common import ( + "context" "fmt" "io" "reflect" @@ -126,7 +127,10 @@ func formatDatabaseRolesForDB(database types.Database, accessChecker services.Ac autoUser, err := accessChecker.DatabaseAutoUserMode(database) if err != nil { - log.Warnf("Failed to get DatabaseAutoUserMode for database %v: %v.", database.GetName(), err) + logger.WarnContext(context.Background(), "Failed to get DatabaseAutoUserMode for database", + "database", database.GetName(), + "error", err, + ) return "" } else if !autoUser.IsEnabled() { return "" @@ -134,7 +138,10 @@ func formatDatabaseRolesForDB(database types.Database, accessChecker services.Ac roles, err := accessChecker.CheckDatabaseRoles(database, nil) if err != nil { - log.Warnf("Failed to CheckDatabaseRoles for database %v: %v.", database.GetName(), err) + logger.WarnContext(context.Background(), "Failed to CheckDatabaseRoles for database", + "database", database.GetName(), + "error", err, + ) return "" } return fmt.Sprintf("%v", roles) diff --git a/tool/tsh/common/device.go b/tool/tsh/common/device.go index ac9ec7eea4f12..28ee9907bb1e6 100644 --- a/tool/tsh/common/device.go +++ b/tool/tsh/common/device.go @@ -27,6 +27,7 @@ import ( "github.com/gravitational/trace" "google.golang.org/protobuf/encoding/protojson" + "github.com/gravitational/teleport" devicepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/devicetrust/v1" "github.com/gravitational/teleport/lib/client" "github.com/gravitational/teleport/lib/devicetrust" @@ -247,7 +248,7 @@ func (c *deviceActivateCredentialCommand) run(cf *CLIConf) error { // On error, wait for user input before executing. This is because this // opens in a second window. If we return the error immediately, then // this window closes before the user can inspect it. - log.WithError(err).Error("An error occurred during credential activation. Press enter to close this window.") + logger.ErrorContext(cf.Context, "An error occurred during credential activation, press enter to close this window", "error", err) _, _ = fmt.Scanln() } return trace.Wrap(err) @@ -260,7 +261,10 @@ type deviceDMIReadCommand struct { func (c *deviceDMIReadCommand) run(cf *CLIConf) error { dmiInfo, err := linux.DMIInfoFromSysfs() if err != nil { - log.WithError(err).Warn("Device Trust: Failed to read DMI information") + logger.WarnContext(cf.Context, "Failed to read DMI information", + teleport.ComponentKey, "DeviceTrust", + "error", err, + ) // err swallowed on purpose. } if dmiInfo != nil { diff --git a/tool/tsh/common/kube.go b/tool/tsh/common/kube.go index 9ba2606260cfe..f14ef5b420b23 100644 --- a/tool/tsh/common/kube.go +++ b/tool/tsh/common/kube.go @@ -37,7 +37,6 @@ import ( "github.com/ghodss/yaml" "github.com/gravitational/trace" dockerterm "github.com/moby/term" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -163,7 +162,7 @@ func (c *kubeJoinCommand) run(cf *CLIConf) error { return trace.Wrap(err) } if crt != nil && time.Until(crt.NotAfter) > time.Minute { - log.Debugf("Re-using existing TLS cert for kubernetes cluster %q", kubeCluster) + logger.DebugContext(cf.Context, "Re-using existing TLS cert for kubernetes cluster", "cluster", kubeCluster) } else { err = client.RetryWithRelogin(cf.Context, tc, func() error { var err error @@ -585,7 +584,7 @@ func takeKubeCredLock(ctx context.Context, homePath, proxy string, lockTimeout t // If kube credentials lockfile already exists, it means last time kube credentials was called // we had an error while trying to issue certificate, return an error asking user to login manually. if _, err := os.Stat(kubeCredLockfilePath); err == nil { - log.Debugf("Kube credentials lockfile was found at %q, aborting.", kubeCredLockfilePath) + logger.DebugContext(ctx, "Kube credentials lock file was found, aborting", "lock_file", kubeCredLockfilePath) return nil, trace.Wrap(errKubeCredLockfileFound) } @@ -595,7 +594,7 @@ func takeKubeCredLock(ctx context.Context, homePath, proxy string, lockTimeout t // Take a lock while we're trying to issue certificate and possibly relogin unlock, err := utils.FSTryWriteLockTimeout(ctx, kubeCredLockfilePath, lockTimeout) if err != nil { - log.Debugf("could not take kube credentials lock: %v", err.Error()) + logger.DebugContext(ctx, "could not take kube credentials lock", "error", err) return nil, trace.Wrap(errKubeCredLockfileFound) } @@ -603,14 +602,17 @@ func takeKubeCredLock(ctx context.Context, homePath, proxy string, lockTimeout t // We must unlock the lockfile before removing it, otherwise unlock operation will fail // on Windows. if err := unlock(); err != nil { - log.WithError(err).Warnf("could not unlock kube credentials lock") + logger.WarnContext(ctx, "could not unlock kube credentials lock", "error", err) } if !removeFile { return } // Remove kube credentials lockfile. if err = os.Remove(kubeCredLockfilePath); err != nil && !os.IsNotExist(err) { - log.WithError(err).Warnf("could not remove kube credentials lockfile %q", kubeCredLockfilePath) + logger.WarnContext(ctx, "could not remove kube credentials lock file", + "lock_file", kubeCredLockfilePath, + "error", err, + ) } }, nil } @@ -646,7 +648,7 @@ func (c *kubeCredentialsCommand) run(cf *CLIConf) error { ); err == nil { crt, _ := tlsca.ParseCertificatePEM(certPEM) if crt != nil && time.Until(crt.NotAfter) > time.Minute { - log.Debugf("Re-using existing TLS cert for Kubernetes cluster %q", c.kubeCluster) + logger.DebugContext(cf.Context, "Re-using existing TLS cert for Kubernetes cluster", "cluster", c.kubeCluster) return c.writeByteResponse(cf.Stdout(), certPEM, keyPEM, crt.NotAfter) } } @@ -681,7 +683,7 @@ func (c *kubeCredentialsCommand) issueCert(cf *CLIConf) error { return trace.Wrap(err) } if crt != nil && time.Until(crt.NotAfter) > time.Minute { - log.Debugf("Re-using existing TLS cert for Kubernetes cluster %q", c.kubeCluster) + logger.DebugContext(cf.Context, "Re-using existing TLS cert for Kubernetes cluster", "cluster", c.kubeCluster) return c.writeKeyResponse(cf.Stdout(), k, c.kubeCluster) } @@ -689,7 +691,7 @@ func (c *kubeCredentialsCommand) issueCert(cf *CLIConf) error { // a new one. } - log.Debugf("Requesting TLS cert for Kubernetes cluster %q", c.kubeCluster) + logger.DebugContext(cf.Context, "Requesting TLS cert for Kubernetes cluster", "cluster", c.kubeCluster) var unlockKubeCred func(bool) deleteKubeCredsLock := false defer func() { @@ -1050,7 +1052,7 @@ func (c *kubeLSCommand) runAllClusters(cf *CLIConf) error { group.Go(func() error { kc, err := kubeutils.ListKubeClustersWithFilters(groupCtx, cluster.auth, cluster.req) if err != nil { - logrus.Errorf("Failed to get kube clusters: %v.", err) + logger.ErrorContext(groupCtx, "Failed to get kube clusters", "error", err) mu.Lock() errors = append(errors, trace.ConnectionProblem(err, "failed to list kube clusters for cluster %s: %v", cluster.name, err)) mu.Unlock() @@ -1498,7 +1500,7 @@ func buildKubeConfigUpdate(cf *CLIConf, kubeStatus *kubernetesStatus, overrideCo if len(kubeStatus.kubeClusters) == 0 { // If there are no registered k8s clusters, we may have an older teleport cluster. // Fall back to the old kubeconfig, with static credentials from v.Credentials. - log.Debug("Disabling exec plugin mode for kubeconfig because this Teleport cluster has no Kubernetes clusters.") + logger.DebugContext(cf.Context, "Disabling exec plugin mode for kubeconfig because this Teleport cluster has no Kubernetes clusters") return v, nil } diff --git a/tool/tsh/common/kube_proxy.go b/tool/tsh/common/kube_proxy.go index 75c8c120b223e..5e627a6303dbf 100644 --- a/tool/tsh/common/kube_proxy.go +++ b/tool/tsh/common/kube_proxy.go @@ -24,7 +24,6 @@ import ( "encoding/pem" "fmt" "io" - "log/slog" "net" "os" "os/exec" @@ -265,7 +264,7 @@ func (c *proxyKubeCommand) printPrepare(cf *CLIConf, title string, clusters kube for _, cluster := range clusters { contextName, err := kubeconfig.ContextNameFromTemplate(c.overrideContextName, cluster.TeleportCluster, cluster.KubeCluster) if err != nil { - slog.WarnContext(cf.Context, "Failed to generate context name.", "error", err) + logger.WarnContext(cf.Context, "Failed to generate context name", "error", err) contextName = kubeconfig.ContextName(cluster.TeleportCluster, cluster.KubeCluster) } table.AddRow([]string{cluster.TeleportCluster, cluster.KubeCluster, contextName}) @@ -495,7 +494,7 @@ func loadKubeUserCerts(ctx context.Context, tc *client.TeleportClient, clusters if key := kubeKeys[cluster.TeleportCluster]; key != nil { cert, err := kubeCertFromKeyRing(key, cluster.KubeCluster) if err == nil { - log.Debugf("Client cert loaded from keystore for %v.", cluster) + logger.DebugContext(ctx, "Client cert loaded from keystore for cluster", "cluster", cluster) certs.Add(cluster.TeleportCluster, cluster.KubeCluster, cert) continue } @@ -510,7 +509,7 @@ func loadKubeUserCerts(ctx context.Context, tc *client.TeleportClient, clusters return nil, trace.Wrap(err) } - log.Debugf("Client cert issued for %v.", cluster) + logger.DebugContext(ctx, "Client cert issued for cluster", "cluster", cluster) certs.Add(cluster.TeleportCluster, cluster.KubeCluster, cert) } return certs, nil diff --git a/tool/tsh/common/kubectl.go b/tool/tsh/common/kubectl.go index 69604a19700a5..55efc7a0d2b57 100644 --- a/tool/tsh/common/kubectl.go +++ b/tool/tsh/common/kubectl.go @@ -240,7 +240,7 @@ func runKubectlAndCollectRun(cf *CLIConf, fullArgs, args []string) error { writer.CloseWithError(io.EOF) if scanErr := group.Wait(); scanErr != nil { - log.WithError(scanErr).Warn("unable to scan stderr payload") + logger.WarnContext(cf.Context, "unable to scan stderr payload", "error", scanErr) } if err == nil { diff --git a/tool/tsh/common/mfa.go b/tool/tsh/common/mfa.go index 404be014e1c0f..31c6cb48a5195 100644 --- a/tool/tsh/common/mfa.go +++ b/tool/tsh/common/mfa.go @@ -233,7 +233,7 @@ func (c *mfaAddCommand) run(cf *CLIConf) error { if !slices.Contains(defaultDeviceTypes, touchIDDeviceType) { diag, err := touchid.Diag() if err == nil && diag.IsClamshellFailure() { - log.Warn("Touch ID support disabled, is your MacBook lid closed?") + logger.WarnContext(ctx, "Touch ID support disabled, is your MacBook lid closed?") } } @@ -280,7 +280,7 @@ func (c *mfaAddCommand) run(cf *CLIConf) error { // Touch ID is always a resident key/passwordless c.allowPasswordless = true } - log.Debugf("tsh using passwordless registration? %v", c.allowPasswordless) + logger.DebugContext(ctx, "tsh using passwordless registration?", "allow_passwordless", c.allowPasswordless) dev, err := c.addDeviceRPC(ctx, tc) if err != nil { @@ -468,7 +468,7 @@ func promptTOTPRegisterChallenge(ctx context.Context, c *proto.TOTPRegisterChall var showingQRCode bool closeQR, err := showOTPQRCode(otpKey) if err != nil { - log.WithError(err).Debug("Failed to show QR code") + logger.DebugContext(ctx, "Failed to show QR code", "error", err) } else { showingQRCode = true defer closeQR() @@ -515,7 +515,10 @@ func promptTOTPRegisterChallenge(ctx context.Context, c *proto.TOTPRegisterChall } func promptWebauthnRegisterChallenge(ctx context.Context, origin string, cc *wantypes.CredentialCreation) (*proto.MFARegisterResponse, error) { - log.Debugf("WebAuthn: prompting MFA devices with origin %q", origin) + logger.DebugContext(ctx, "prompting MFA devices with origin", + teleport.ComponentKey, "WebAuthn", + "origin", origin, + ) prompt := wancli.NewDefaultPrompt(ctx, os.Stdout) prompt.PINMessage = "Enter your *new* security key PIN" @@ -527,7 +530,10 @@ func promptWebauthnRegisterChallenge(ctx context.Context, origin string, cc *wan } func promptTouchIDRegisterChallenge(origin string, cc *wantypes.CredentialCreation) (*proto.MFARegisterResponse, registerCallback, error) { - log.Debugf("Touch ID: prompting registration with origin %q", origin) + logger.DebugContext(context.TODO(), "prompting registration with origin", + teleport.ComponentKey, "TouchID", + "origin", origin, + ) reg, err := touchid.Register(origin, cc) if err != nil { @@ -662,19 +668,23 @@ func showOTPQRCode(k *otp.Key) (cleanup func(), retErr error) { if err := imageFile.Close(); err != nil { return nil, trace.ConvertSystemError(err) } - log.Debugf("Wrote OTP QR code to %s", imageFile.Name()) + ctx := context.TODO() + logger.DebugContext(ctx, "Wrote OTP QR code to file", "file", imageFile.Name()) cmd := exec.Command(imageViewer, append(imageViewerArgs, imageFile.Name())...) if err := cmd.Start(); err != nil { return nil, trace.ConvertSystemError(err) } - log.Debugf("Opened QR code via %q", imageViewer) + logger.DebugContext(ctx, "Opened QR code via image viewer", "image_viewer", imageViewer) return func() { if err := utils.RemoveSecure(imageFile.Name()); err != nil { - log.WithError(err).Debugf("Failed to clean up temporary QR code file %q", imageFile.Name()) + logger.DebugContext(ctx, "Failed to clean up temporary QR code file", + "file", imageFile.Name(), + "error", err, + ) } if err := cmd.Process.Kill(); err != nil { - log.WithError(err).Debug("Failed to stop the QR code image viewer") + logger.DebugContext(ctx, "Failed to stop the QR code image viewer", "error", err) } }, nil } @@ -684,6 +694,9 @@ func deleteTouchIDCredentialIfApplicable(credentialID string) { case errors.Is(err, &touchid.ErrAttemptFailed{}): // Nothing to do here, just proceed. case err != nil: - log.WithError(err).Errorf("Failed to delete credential: %s\n", credentialID) + logger.ErrorContext(context.Background(), "Failed to delete credential", + "error", err, + "credential", credentialID, + ) } } diff --git a/tool/tsh/common/play.go b/tool/tsh/common/play.go index 64d6ea97ceb47..6ff68eb495413 100644 --- a/tool/tsh/common/play.go +++ b/tool/tsh/common/play.go @@ -58,7 +58,7 @@ func onPlay(cf *CLIConf) error { return playSession(cf) } if cf.PlaySpeed != "1x" { - log.Warn("--speed is not applicable for formats other than pty") + logger.WarnContext(cf.Context, "--speed is not applicable for formats other than pty") } return exportSession(cf) } @@ -94,7 +94,7 @@ func playSession(cf *CLIConf) error { if err := tc.Play(cf.Context, cf.SessionID, speed, cf.NoWait); err != nil { if trace.IsNotFound(err) { - log.WithError(err).Debug("error playing session") + logger.DebugContext(cf.Context, "error playing session", "error", err) return trace.NotFound("Recording for session %s not found.", cf.SessionID) } return trace.Wrap(err) diff --git a/tool/tsh/common/proxy.go b/tool/tsh/common/proxy.go index 1fa5cc735ab08..dc357467e65e2 100644 --- a/tool/tsh/common/proxy.go +++ b/tool/tsh/common/proxy.go @@ -187,7 +187,7 @@ func onProxyCommandDB(cf *CLIConf) error { defer func() { if err := listener.Close(); err != nil { - log.WithError(err).Warnf("Failed to close listener.") + logger.WarnContext(cf.Context, "Failed to close listener", "error", err) } }() @@ -353,7 +353,7 @@ func maybeAddOracleOptions(ctx context.Context, tc *libclient.TeleportClient, db dbServers, err := getDatabaseServers(ctx, tc, dbInfo.ServiceName) if err != nil { // log, but treat this error as non-fatal. - log.Warnf("Error getting database servers: %s", err.Error()) + logger.WarnContext(ctx, "Error getting database servers", "error", err) return opts } @@ -362,7 +362,7 @@ func maybeAddOracleOptions(ctx context.Context, tc *libclient.TeleportClient, db for _, server := range dbServers { ver, err := semver.NewVersion(server.GetTeleportVersion()) if err != nil { - log.Debugf("Failed to parse teleport version %q: %v", server.GetTeleportVersion(), err) + logger.DebugContext(ctx, "Failed to parse teleport version", "version", server.GetTeleportVersion(), "error", err) continue } @@ -377,10 +377,17 @@ func maybeAddOracleOptions(ctx context.Context, tc *libclient.TeleportClient, db } } - log.Debugf("Agents for database %q with Oracle support: total %v, old %v, new %v.", dbInfo.ServiceName, len(dbServers), oldServers, newServers) + logger.DebugContext(ctx, "Retrieved agents for database with Oracle support", + "database", dbInfo.ServiceName, + "total", len(dbServers), + "old_count", oldServers, + "new_count", newServers, + ) if oldServers > 0 { - log.Warnf("Detected database agents older than %v. For improved client support upgrade all database agents in your cluster to a newer version.", cutoffVersion) + logger.WarnContext(ctx, "Detected outdated database agent, for improved client support upgrade all database agents in your cluster to a newer version", + "lowest_supported_version", cutoffVersion, + ) } opts = append(opts, dbcmd.WithOracleOpts(oldServers == 0, newServers > 0)) @@ -504,7 +511,7 @@ func onProxyCommandApp(cf *CLIConf) error { defer func() { if err := proxyApp.Close(); err != nil { - log.WithError(err).Error("Failed to close app proxy.") + logger.ErrorContext(cf.Context, "Failed to close app proxy", "error", err) } }() @@ -531,7 +538,7 @@ func onProxyCommandAWS(cf *CLIConf) error { defer func() { if err := awsApp.Close(); err != nil { - log.WithError(err).Error("Failed to close AWS app.") + logger.ErrorContext(cf.Context, "Failed to close AWS app", "error", err) } }() @@ -624,7 +631,7 @@ func onProxyCommandAzure(cf *CLIConf) error { defer func() { if err := azApp.Close(); err != nil { - log.WithError(err).Error("Failed to close Azure app.") + logger.ErrorContext(cf.Context, "Failed to close Azure app", "error", err) } }() @@ -655,7 +662,7 @@ func onProxyCommandGCloud(cf *CLIConf) error { defer func() { if err := gcpApp.Close(); err != nil { - log.WithError(err).Error("Failed to close GCP app.") + logger.ErrorContext(cf.Context, "Failed to close GCP app", "error", err) } }() @@ -795,7 +802,7 @@ Use the following command to connect to the Oracle database server using CLI: $ {{.command}} {{if .canUseTCP }}Other clients can use: - - a direct connection to {{.address}} without a username and password + - a direct connection to {{.address}} without a username and password - a custom JDBC connection string: {{.jdbcConnectionString}} {{else }}You can also connect using Oracle JDBC connection string: diff --git a/tool/tsh/common/recording_export.go b/tool/tsh/common/recording_export.go index 6d868557fca30..5a55447352a1d 100644 --- a/tool/tsh/common/recording_export.go +++ b/tool/tsh/common/recording_export.go @@ -37,6 +37,7 @@ import ( "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/session" "github.com/gravitational/teleport/lib/srv/desktop/tdp" + logutils "github.com/gravitational/teleport/lib/utils/log" ) const ( @@ -112,7 +113,7 @@ loop: return frameCount, ctx.Err() case evt, more := <-evts: if !more { - log.Warnln("reached end of stream before seeing session end event") + logger.WarnContext(ctx, "reached end of stream before seeing session end event") break loop } @@ -137,7 +138,7 @@ loop: case *apievents.DesktopRecording: msg, err := tdp.Decode(evt.Message) if err != nil { - log.Warnf("failed to decode desktop recording message: %v", err) + logger.WarnContext(ctx, "failed to decode desktop recording message", "error", err) break loop } @@ -153,7 +154,7 @@ loop: // Note: this works because we don't currently support resizing // the window during a session. If this changes, we'd have to // find the maximum window size first. - log.Debugf("allocating %dx%d screen", msg.Width, msg.Height) + logger.DebugContext(ctx, "allocating screen size", "width", msg.Width, "height", msg.Height) width, height = int32(msg.Width), int32(msg.Height) screen = image.NewNRGBA(image.Rectangle{ Min: image.Pt(0, 0), @@ -196,7 +197,10 @@ loop: delta := evt.DelayMilliseconds - lastEmitted framesToEmit := int64(float64(delta) / frameDelayMillis) if framesToEmit > 0 { - log.Debugf("%dms since last frame, emitting %d frames", delta, framesToEmit) + logger.DebugContext(ctx, "emitting frames", + "last_event_ms", delta, + "frames_to_emit", framesToEmit, + ) buf.Reset() if err := jpeg.Encode(buf, screen, nil); err != nil { return frameCount, trace.Wrap(err) @@ -231,7 +235,7 @@ loop: } default: - log.Debugf("got unexpected audit event %T", evt) + logger.DebugContext(ctx, "got unexpected audit event", "event", logutils.TypeAttr(evt)) } } } diff --git a/tool/tsh/common/resolve_default_addr.go b/tool/tsh/common/resolve_default_addr.go index 5767015e0dc72..fb2a87f384cb7 100644 --- a/tool/tsh/common/resolve_default_addr.go +++ b/tool/tsh/common/resolve_default_addr.go @@ -62,7 +62,7 @@ func raceRequest(ctx context.Context, cli *http.Client, addr string, waitgroup * rsp, err := cli.Do(request) if err != nil { - log.WithError(err).Debug("Proxy address test failed") + logger.DebugContext(ctx, "Proxy address test failed", "error", err) results <- raceResult{addr: addr, err: err} return } @@ -75,16 +75,18 @@ func raceRequest(ctx context.Context, cli *http.Client, addr string, waitgroup * resBody, err := io.ReadAll(io.LimitReader(rsp.Body, maxPingBodySize)) if err != nil { // Log but do not return. We could receive HTTP OK, and we should not fail on error here. - log.Debugf("Failed to read whole response body: %v", err) + logger.DebugContext(ctx, "Failed to read whole response body", "error", err) } // If the request returned a non-OK response then we're still going // to treat this as a failure and return an error to the race // aggregator. if rsp.StatusCode != http.StatusOK { + logger.DebugContext(ctx, "Proxy address test received non-OK response", + "status_code", rsp.StatusCode, + "response_body", string(resBody), + ) err = trace.BadParameter("Proxy address test received non-OK response: %03d", rsp.StatusCode) - log.Debugf("%v, response body: %s ", err, string(resBody)) - results <- raceResult{addr: addr, err: err} return } @@ -98,7 +100,7 @@ func raceRequest(ctx context.Context, cli *http.Client, addr string, waitgroup * func startRacer(ctx context.Context, cli *http.Client, host string, candidates []int, waitGroup *sync.WaitGroup, results chan<- raceResult) []int { port, tail := candidates[0], candidates[1:] addr := net.JoinHostPort(host, strconv.Itoa(port)) - log.Debugf("Trying %s...", addr) + logger.DebugContext(ctx, "Trying request", "addr", addr) waitGroup.Add(1) go raceRequest(ctx, cli, addr, waitGroup, results) return tail @@ -110,7 +112,7 @@ func startRacer(ctx context.Context, cli *http.Client, host string, candidates [ // 2. races the requests against one another, and finally // 3. selects the first to respond as the canonical proxy func pickDefaultAddr(ctx context.Context, insecure bool, host string, ports []int) (string, error) { - log.Debugf("Resolving default proxy port (insecure: %v)", insecure) + logger.DebugContext(ctx, "Resolving default proxy port", "insecure_mode", insecure) if len(ports) == 0 { return "", trace.BadParameter("port list may not be empty") @@ -138,7 +140,7 @@ func pickDefaultAddr(ctx context.Context, insecure bool, host string, ports []in // properly in error conditions. var racersInFlight sync.WaitGroup defer func() { - log.Debug("Waiting for all in-flight proxy address tests to finish") + logger.DebugContext(ctx, "Waiting for all in-flight proxy address tests to finish") racersInFlight.Wait() }() @@ -190,7 +192,7 @@ func pickDefaultAddr(ctx context.Context, insecure bool, host string, ports []in // Note that returning will implicitly cancel the inner context, telling // any outstanding racers that there is no point trying anymore, and they // should exit. - log.Debugf("Address %s succeeded. Selected as canonical proxy address", r.addr) + logger.DebugContext(ctx, "Request to address succeeded, selected as canonical proxy address", "address", r.addr) return r.addr, nil } errors = append(errors, r.err) diff --git a/tool/tsh/common/resolve_default_addr_test.go b/tool/tsh/common/resolve_default_addr_test.go index 7e45e727a2c98..27f728044f86e 100644 --- a/tool/tsh/common/resolve_default_addr_test.go +++ b/tool/tsh/common/resolve_default_addr_test.go @@ -31,17 +31,13 @@ import ( "github.com/stretchr/testify/require" - "github.com/gravitational/teleport" apihelpers "github.com/gravitational/teleport/api/testhelpers" "github.com/gravitational/teleport/integration/helpers" ) -var testLog = log.WithField(teleport.ComponentKey, "test") - func newWaitForeverHandler() (http.Handler, chan struct{}) { doneChannel := make(chan struct{}) handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - testLog.Debug("Waiting forever...") <-doneChannel }) @@ -50,7 +46,6 @@ func newWaitForeverHandler() (http.Handler, chan struct{}) { func newRespondingHandlerWithStatus(status int) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - testLog.Debug("Responding") w.Header().Add("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(status) io.WriteString(w, "Hello, world") @@ -201,11 +196,9 @@ func TestResolveUndeliveredBodyDoesNotBlockForever(t *testing.T) { handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { f, ok := w.(http.Flusher) if !ok { - testLog.Error("ResponseWriter must also be a Flusher, or the test is invalid") t.Fatal() } - testLog.Debugf("Writing response header to %T", w) w.Header().Set("Content-Length", "1048576") w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(http.StatusTeapot) @@ -213,10 +206,7 @@ func TestResolveUndeliveredBodyDoesNotBlockForever(t *testing.T) { w.Write([]byte("I'm a little teapot, short and stout.")) f.Flush() - testLog.Debug("Waiting forever instead of sending response body") <-doneChannel - - testLog.Debug("Exiting handler") }) servers := []*httptest.Server{apihelpers.MakeTestServer(t, handler)} diff --git a/tool/tsh/common/tlsmuxlistener.go b/tool/tsh/common/tlsmuxlistener.go index 2e9d43634c293..ec1c1c2e7ac27 100644 --- a/tool/tsh/common/tlsmuxlistener.go +++ b/tool/tsh/common/tlsmuxlistener.go @@ -18,6 +18,7 @@ package common import ( "bufio" + "context" "crypto/tls" "net" @@ -70,7 +71,7 @@ func (m *tlsMuxListener) Accept() (net.Conn, error) { switch buf[0] { case tlsFirstByte: - log.Debugf("Read 0x%x as first byte, assuming TLS connection.", buf[0]) + logger.DebugContext(context.Background(), "Read first byte, assuming TLS connection") return tls.Server(bufConn, m.tlsConfig), nil default: return bufConn, nil diff --git a/tool/tsh/common/tsh.go b/tool/tsh/common/tsh.go index ebe4cb66c027a..d65af0b7247db 100644 --- a/tool/tsh/common/tsh.go +++ b/tool/tsh/common/tsh.go @@ -49,7 +49,6 @@ import ( "github.com/google/uuid" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/attribute" oteltrace "go.opentelemetry.io/otel/trace" "golang.org/x/crypto/ssh" @@ -103,7 +102,6 @@ import ( ) var ( - log = logrus.WithField(teleport.ComponentKey, teleport.ComponentTSH) logger = logutils.NewPackageLogger(teleport.ComponentKey, teleport.ComponentTSH) ) @@ -1286,7 +1284,10 @@ func Run(ctx context.Context, args []string, opts ...CliOption) error { command, err := app.Parse(args) if errors.Is(err, kingpin.ErrExpectedCommand) { if _, ok := cf.TSHConfig.Aliases[aliasCommand]; ok { - log.Debugf("Failing due to recursive alias %q. Aliases seen: %v", aliasCommand, ar.getSeenAliases()) + logger.DebugContext(ctx, "Failing due to recursive alias", + "alias", aliasCommand, + "aliases_seen", ar.getSeenAliases(), + ) return trace.BadParameter("recursive alias %q; correct alias definition and try again", aliasCommand) } } @@ -1357,7 +1358,7 @@ func Run(ctx context.Context, args []string, opts ...CliOption) error { } if cpuProfile != "" { - log.Debugf("writing CPU profile to %v", cpuProfile) + logger.DebugContext(ctx, "writing CPU profile", "file", cpuProfile) f, err := os.Create(cpuProfile) if err != nil { return trace.Wrap(err) @@ -1370,24 +1371,24 @@ func Run(ctx context.Context, args []string, opts ...CliOption) error { } if memProfile != "" { - log.Debugf("writing memory profile to %v", memProfile) + logger.DebugContext(ctx, "writing memory profile", "file", memProfile) defer func() { f, err := os.Create(memProfile) if err != nil { - log.Errorf("could not open memory profile: %v", err) + logger.ErrorContext(ctx, "could not open memory profile", "error", err) return } defer f.Close() runtime.GC() if err := pprof.WriteHeapProfile(f); err != nil { - log.Errorf("could not write memory profile: %v", err) + logger.ErrorContext(ctx, "could not write memory profile", "error", err) return } }() } if traceProfile != "" { - log.Debugf("writing trace profile to %v", traceProfile) + logger.DebugContext(ctx, "writing trace profile", "file", traceProfile) f, err := os.Create(traceProfile) if err != nil { return trace.Wrap(err) @@ -1667,7 +1668,7 @@ func initializeTracing(cf *CLIConf) func() { defer cancel() err := provider.Shutdown(shutdownCtx) if err != nil && !errors.Is(err, context.DeadlineExceeded) { - log.WithError(err).Debug("failed to shutdown trace provider") + logger.DebugContext(shutdownCtx, "failed to shutdown trace provider", "error", err) } } } @@ -1698,7 +1699,10 @@ func initializeTracing(cf *CLIConf) func() { SamplingRate: samplingRate, }) if err != nil { - log.WithError(err).Debugf("failed to connect to trace exporter %s", cf.TraceExporter) + logger.DebugContext(cf.Context, "failed to connect to trace exporter", + "error", err, + "exporter", cf.TraceExporter, + ) return func() {} } @@ -1723,7 +1727,7 @@ func initializeTracing(cf *CLIConf) func() { // to get a handle to an Auth client. tc, err := makeClient(cf) if err != nil { - log.WithError(err).Debug("failed to set up span forwarding.") + logger.DebugContext(cf.Context, "failed to set up span forwarding", "error", err) return func() {} } @@ -1747,7 +1751,7 @@ func initializeTracing(cf *CLIConf) func() { provider = p return nil }); err != nil { - log.WithError(err).Debug("failed to set up span forwarding.") + logger.DebugContext(cf.Context, "failed to set up span forwarding", "error", err) return func() {} } @@ -2070,7 +2074,7 @@ func onLogin(cf *CLIConf) error { // If we're in multiplexed mode get SNI name for kube from single multiplexed proxy addr kubeTLSServerName := "" if tc.TLSRoutingEnabled { - log.Debug("Using Proxy SNI for kube TLS server name") + logger.DebugContext(cf.Context, "Using Proxy SNI for kube TLS server name") kubeHost, _ := tc.KubeProxyHostPort() kubeTLSServerName = client.GetKubeTLSServerName(kubeHost) } @@ -2162,7 +2166,7 @@ func onLogin(cf *CLIConf) error { if err := common.ShowClusterAlerts(cf.Context, clusterClient.CurrentCluster(), os.Stderr, map[string]string{ types.AlertOnLogin: "yes", }, types.AlertSeverity_LOW); err != nil { - log.WithError(err).Warn("Failed to display cluster alerts.") + logger.WarnContext(cf.Context, "Failed to display cluster alerts", "error", err) } return nil @@ -2209,7 +2213,10 @@ func onLogout(cf *CLIConf) error { // Log out user from the databases. if profile != nil { for _, db := range profile.Databases { - log.Debugf("Logging %v out of database %v.", profile.Name, db) + logger.DebugContext(cf.Context, "Logging user out of database", + "user", profile.Name, + "database", db, + ) err = dbprofile.Delete(tc, db) if err != nil { return trace.Wrap(err) @@ -2228,7 +2235,7 @@ func onLogout(cf *CLIConf) error { } // Remove Teleport related entries from kubeconfig. - log.Debugf("Removing Teleport related entries with server '%v' from kubeconfig.", tc.KubeClusterAddr()) + logger.DebugContext(cf.Context, "Removing Teleport related entries from kubeconfig", "cluster_addr", tc.KubeClusterAddr()) err = kubeconfig.RemoveByServerAddr("", tc.KubeClusterAddr()) if err != nil { return trace.Wrap(err) @@ -2241,14 +2248,14 @@ func onLogout(cf *CLIConf) error { if err != nil { return trace.Wrap(err) } - log.Debugf("Removing Teleport related entries with server '%v' from kubeconfig.", tc.KubeClusterAddr()) + logger.DebugContext(cf.Context, "Removing Teleport related entries from kubeconfig", "cluster_addr", tc.KubeClusterAddr()) if err = kubeconfig.RemoveByServerAddr("", tc.KubeClusterAddr()); err != nil { return trace.Wrap(err) } // Remove Teleport related entries from kubeconfig for all clusters. for _, profile := range profiles { - log.Debugf("Removing Teleport related entries for cluster '%v' from kubeconfig.", profile.Cluster) + logger.DebugContext(cf.Context, "Removing Teleport related entries from kubeconfig", "cluster", profile.Cluster) err = kubeconfig.RemoveByClusterName("", profile.Cluster) if err != nil { return trace.Wrap(err) @@ -2259,7 +2266,10 @@ func onLogout(cf *CLIConf) error { // connection service file. for _, profile := range profiles { for _, db := range profile.Databases { - log.Debugf("Logging %v out of database %v.", profile.Name, db) + logger.DebugContext(cf.Context, "Logging user out of database", + "user", profile.Name, + "database", db, + ) err = dbprofile.Delete(tc, db) if err != nil { return trace.Wrap(err) @@ -2374,14 +2384,14 @@ func getClusterClients(cf *CLIConf, resource string) ([]*clusterClient, error) { ) defer span.End() - logger := log.WithField("cluster", profile.Cluster) + logger := logger.With("cluster", profile.Cluster) - logger.Debug("Creating client...") + logger.DebugContext(ctx, "Creating client") clt, err := tc.ConnectToCluster(ctx) if err != nil { // log error and return nil so that results may still be retrieved // for other clusters. - logger.Errorf("Failed connecting to proxy: %v", err) + logger.ErrorContext(ctx, "Failed connecting to proxy", "error", err) mu.Lock() clusters = append(clusters, &clusterClient{ @@ -2408,7 +2418,7 @@ func getClusterClients(cf *CLIConf, resource string) ([]*clusterClient, error) { if err != nil { // Log that an error happened but do not return an error to // prevent results from other clusters from being retrieved. - logger.Errorf("Failed to lookup leaf clusters: %v", err) + logger.ErrorContext(ctx, "Failed to lookup leaf clusters", "error", err) return nil } @@ -2509,10 +2519,10 @@ func listNodesAllClusters(cf *CLIConf) error { oteltrace.WithAttributes(attribute.String("cluster", cluster.name))) defer span.End() - logger := log.WithField("cluster", cluster.name) + logger := logger.With("cluster", cluster.name) nodes, err := apiclient.GetAllResources[types.Server](ctx, cluster.auth, &cluster.req) if err != nil { - logger.Errorf("Failed to get nodes: %v.", err) + logger.ErrorContext(ctx, "Failed to get nodes", "error", err) mu.Lock() errors = append(errors, trace.ConnectionProblem(err, "failed to list nodes for cluster %s: %v", cluster.name, err)) @@ -3070,7 +3080,10 @@ type databaseWithUsers struct { func getDBUsers(db types.Database, accessChecker services.AccessChecker) *dbUsers { users, err := accessChecker.EnumerateDatabaseUsers(db) if err != nil { - log.Warnf("Failed to EnumerateDatabaseUsers for database %v: %v.", db.GetName(), err) + logger.WarnContext(context.Background(), "Failed to EnumerateDatabaseUsers for database", + "database", db.GetName(), + "error", err, + ) return &dbUsers{} } var denied []string @@ -3101,7 +3114,10 @@ func newDatabaseWithUsers(db types.Database, accessChecker services.AccessChecke if db.SupportsAutoUsers() && db.GetAdminUser().Name != "" { roles, err := accessChecker.CheckDatabaseRoles(db, nil) if err != nil { - log.Warnf("Failed to CheckDatabaseRoles for database %v: %v.", db.GetName(), err) + logger.WarnContext(context.Background(), "Failed to CheckDatabaseRoles for database", + "database", db.GetName(), + "error", err, + ) } else { dbWithUsers.DatabaseRoles = roles } @@ -3150,7 +3166,10 @@ func formatUsersForDB(database types.Database, accessChecker services.AccessChec if database.SupportsAutoUsers() && database.GetAdminUser().Name != "" { autoUser, err := accessChecker.DatabaseAutoUserMode(database) if err != nil { - log.Warnf("Failed to get DatabaseAutoUserMode for database %v: %v.", database.GetName(), err) + logger.WarnContext(context.Background(), "Failed to get DatabaseAutoUserMode for database", + "database", database.GetName(), + "error", err, + ) } else if autoUser.IsEnabled() { defer func() { users = users + " (Auto-provisioned)" @@ -3620,7 +3639,7 @@ func retryWithAccessRequest( // a short debug message in case this is unexpected, but return the // original AccessDenied error from the ssh attempt which is likely to // be far more relevant to the user. - log.Debugf("Not attempting to automatically request access, reason: %v", err) + logger.DebugContext(cf.Context, "Not attempting to automatically request access, reason", "error", err) return trace.Wrap(origErr) } @@ -4118,14 +4137,17 @@ func makeClientForProxy(cf *CLIConf, proxy string) (*client.TeleportClient, erro if !trace.IsNotFound(err) && !trace.IsConnectionProblem(err) && !trace.IsCompareFailed(err) { return nil, trace.Wrap(err) } - log.WithError(err).Infof("Could not load key for %s into the local agent.", cf.SiteName) + logger.InfoContext(ctx, "Could not load key for cluser into the local agent", + "cluster", cf.SiteName, + "error", err, + ) } } // If we are missing client profile information, ping the webproxy // for proxy info and load it into the client config. if profileError != nil || profile.MissingClusterDetails { - log.Debug("Pinging the proxy to fetch listening addresses for non-web ports.") + logger.DebugContext(cf.Context, "Pinging the proxy to fetch listening addresses for non-web ports") _, err := tc.Ping(cf.Context) if err != nil { return nil, trace.Wrap(err) @@ -4249,7 +4271,7 @@ func loadClientConfigFromCLIConf(cf *CLIConf, proxy string) (*client.Config, err } else if tMatched { if expanded.Host != "" { c.Host = expanded.Host - log.Debugf("Will connect to host %q according to proxy template.", expanded.Host) + logger.DebugContext(ctx, "Will connect to host as dictated by proxy template", "host", expanded.Host) if host, port, err := net.SplitHostPort(c.Host); err == nil { c.Host = host @@ -4259,12 +4281,12 @@ func loadClientConfigFromCLIConf(cf *CLIConf, proxy string) (*client.Config, err } } } else if expanded.Query != "" { - log.Debugf("Will query for hosts via %q according to proxy template.", expanded.Query) + logger.DebugContext(cf.Context, "Will query for hosts as dictated by proxy template.", "query", expanded.Query) cf.PredicateExpression = expanded.Query // The PredicateExpression is ignored if the Host is populated. c.Host = "" } else if expanded.Search != "" { - log.Debugf("Will search for hosts via %q according to proxy template.", expanded.Search) + logger.DebugContext(cf.Context, "Will search for hosts as dictated by proxy template", "search", expanded.Search) cf.SearchKeywords = expanded.Search // The SearchKeywords are ignored if the Host is populated. c.Host = "" @@ -4273,12 +4295,12 @@ func loadClientConfigFromCLIConf(cf *CLIConf, proxy string) (*client.Config, err // Don't overwrite proxy jump if explicitly provided if cf.ProxyJump == "" && expanded.Proxy != "" { cf.ProxyJump = expanded.Proxy - log.Debugf("Will connect to proxy %q according to proxy template.", expanded.Proxy) + logger.DebugContext(cf.Context, "Will connect to proxy as dictated by proxy template", "proxy", expanded.Proxy) } if expanded.Cluster != "" { cf.SiteName = expanded.Cluster - log.Debugf("Will connect to cluster %q according to proxy template.", expanded.Cluster) + logger.DebugContext(cf.Context, "Will connect to cluster as dictated by proxy template", "cluster", expanded.Cluster) } } @@ -4436,7 +4458,7 @@ func loadClientConfigFromCLIConf(cf *CLIConf, proxy string) (*client.Config, err } if err := setX11Config(c, cf, options); err != nil { - log.WithError(err).Info("X11 forwarding is not properly configured, continuing without it.") + logger.InfoContext(ctx, "X11 forwarding is not properly configured, continuing without it", "error", err) } // If the caller does not want to check host keys, pass in a insecure host @@ -4475,7 +4497,7 @@ func loadClientConfigFromCLIConf(cf *CLIConf, proxy string) (*client.Config, err // headless login produces short-lived MFA-verifed certs, which should never be added to the agent. if cf.AuthConnector == constants.HeadlessConnector { if cf.AddKeysToAgent == client.AddKeysToAgentYes || cf.AddKeysToAgent == client.AddKeysToAgentOnly { - log.Info("Skipping adding keys to agent for headless login") + logger.InfoContext(ctx, "Skipping adding keys to agent for headless login") } c.AddKeysToAgent = client.AddKeysToAgentNo } @@ -4699,7 +4721,7 @@ func setClientWebProxyAddr(ctx context.Context, cf *CLIConf, c *client.Config) e proxyAddress := parsedAddrs.WebProxyAddr if parsedAddrs.UsingDefaultWebProxyPort { - log.Debug("Web proxy port was not set. Attempting to detect port number to use.") + logger.DebugContext(ctx, "Web proxy port was not set, attempting to detect port number to use") timeout, cancel := context.WithTimeout(ctx, proxyDefaultResolutionTimeout) defer cancel() @@ -4850,7 +4872,7 @@ func printStatus(debug bool, p *profileInfo, env map[string]string, isActive boo if len(p.AllowedResourceIDs) > 0 { allowedResourcesStr, err := types.ResourceIDsToString(p.AllowedResourceIDs) if err != nil { - log.Warnf("failed to marshal allowed resource IDs to string: %v", err) + logger.WarnContext(context.Background(), "failed to marshal allowed resource IDs to string", "error", err) } else { fmt.Printf(" Allowed Resources: %s\n", allowedResourcesStr) } @@ -4980,7 +5002,7 @@ func onStatus(cf *CLIConf) error { // make the teleport client and retrieve the certificate from the proxy: tc, err := makeClient(cf) if err != nil { - log.WithError(err).Warn("Failed to make client for retrieving cluster alerts.") + logger.WarnContext(cf.Context, "Failed to make client for retrieving cluster alerts", "error", err) return trace.Wrap(err) } @@ -4991,7 +5013,7 @@ func onStatus(cf *CLIConf) error { var accessListsToReview []*accesslist.AccessList if hardwareKeyInteractionRequired { - log.Debug("Skipping fetching access lists to review due to Hardware Key PIN/Touch requirement.") + logger.DebugContext(cf.Context, "Skipping fetching access lists to review due to Hardware Key PIN/Touch requirement") } else { accessListsToReview = cf.getAccessListsToReview(tc) } @@ -5009,11 +5031,11 @@ func onStatus(cf *CLIConf) error { } if hardwareKeyInteractionRequired { - log.Debug("Skipping cluster alerts due to Hardware Key PIN/Touch requirement.") + logger.DebugContext(cf.Context, "Skipping cluster alerts due to Hardware Key PIN/Touch requirement") } else { if err := common.ShowClusterAlerts(cf.Context, tc, os.Stderr, nil, types.AlertSeverity_HIGH); err != nil { - log.WithError(err).Warn("Failed to display cluster alerts.") + logger.WarnContext(cf.Context, "Failed to display cluster alerts", "error", err) } } @@ -5237,7 +5259,7 @@ func awaitRequestResolution(ctx context.Context, clt authclient.ClientI, req typ case types.OpDelete: return nil, trace.Errorf("request %s has expired or been deleted...", event.Resource.GetName()) default: - log.Warnf("Skipping unknown event type %s", event.Type) + logger.WarnContext(ctx, "Skipping unknown event type", "event_type", event.Type) } case <-watcher.Done(): return nil, trace.Wrap(watcher.Error()) @@ -5399,11 +5421,11 @@ func listAppsAllClusters(cf *CLIConf) error { continue } - logger := log.WithField("cluster", cluster.name) + logger := logger.With("cluster", cluster.name) group.Go(func() error { servers, err := apiclient.GetAllResources[types.AppServer](groupCtx, cluster.auth, &cluster.req) if err != nil { - logger.Errorf("Failed to get app servers: %v.", err) + logger.ErrorContext(groupCtx, "Failed to get app servers", "error", err) mu.Lock() errors = append(errors, trace.ConnectionProblem(err, "failed to list app serves for cluster %s: %v", cluster.name, err)) @@ -5610,12 +5632,12 @@ func handleUnimplementedError(ctx context.Context, perr error, cf CLIConf) error ) tc, err := makeClient(&cf) if err != nil { - log.WithError(err).Warning("Failed to create client.") + logger.WarnContext(ctx, "Failed to create client", "error", err) return trace.WrapWithMessage(perr, errMsgFormat, unknownServerVersion, teleport.Version) } pr, err := tc.Ping(ctx) if err != nil { - log.WithError(err).Warning("Failed to call ping.") + logger.WarnContext(ctx, "Failed to call ping", "error", err) return trace.WrapWithMessage(perr, errMsgFormat, unknownServerVersion, teleport.Version) } return trace.WrapWithMessage(perr, errMsgFormat, pr.ServerVersion, teleport.Version) @@ -5703,7 +5725,7 @@ func onHeadlessApprove(cf *CLIConf) error { func (cf *CLIConf) getAccessListsToReview(tc *client.TeleportClient) []*accesslist.AccessList { clusterClient, err := tc.ConnectToCluster(cf.Context) if err != nil { - log.WithError(err).Debug("Error connecting to the cluster") + logger.DebugContext(cf.Context, "Error connecting to the cluster", "error", err) return nil } defer func() { @@ -5714,7 +5736,7 @@ func (cf *CLIConf) getAccessListsToReview(tc *client.TeleportClient) []*accessli // server, which does not support access lists. accessListsToReview, err := clusterClient.AuthClient.AccessListClient().GetAccessListsToReview(cf.Context) if err != nil && !trace.IsNotImplemented(err) { - log.WithError(err).Debug("Error getting access lists to review") + logger.DebugContext(cf.Context, "Error getting access lists to review", "error", err) } return accessListsToReview @@ -5761,7 +5783,7 @@ func tryLockMemory(cf *CLIConf) error { return trace.Wrap(err, mlockFailureMessage) case mlockModeBestEffort: err := mlock.LockMemory() - log.WithError(err).Warning(mlockFailureMessage) + logger.WarnContext(cf.Context, mlockFailureMessage, "error", err) return nil default: return trace.BadParameter("unexpected value for --mlock, expected one of (%v)", strings.Join(mlockModes, ", ")) diff --git a/tool/tsh/common/tsh_test.go b/tool/tsh/common/tsh_test.go index 9302a62ce29ae..2ffa313d42cb1 100644 --- a/tool/tsh/common/tsh_test.go +++ b/tool/tsh/common/tsh_test.go @@ -2542,18 +2542,15 @@ func TestSSHCommands(t *testing.T) { func tryCreateTrustedCluster(t *testing.T, authServer *auth.Server, trustedCluster types.TrustedCluster) { ctx := context.TODO() for i := 0; i < 10; i++ { - log.Debugf("Will create trusted cluster %v, attempt %v.", trustedCluster, i) _, err := authServer.UpsertTrustedClusterV2(ctx, trustedCluster) if err == nil { return } if trace.IsConnectionProblem(err) { - log.Debugf("Retrying on connection problem: %v.", err) time.Sleep(500 * time.Millisecond) continue } if trace.IsAccessDenied(err) { - log.Debugf("Retrying on access denied: %v.", err) time.Sleep(500 * time.Millisecond) continue }