diff --git a/cmd/kubectl-testkube/commands/scripts/common.go b/cmd/kubectl-testkube/commands/scripts/common.go index ab882f1d3de..2743335c942 100644 --- a/cmd/kubectl-testkube/commands/scripts/common.go +++ b/cmd/kubectl-testkube/commands/scripts/common.go @@ -75,11 +75,11 @@ func watchLogs(id string, client client.Client) { if execution.ExecutionResult.IsCompleted() { fmt.Println() - uiShellCommandBlock(id) + uiShellGetExecution(id) return } } - uiShellCommandBlock(id) + uiShellGetExecution(id) } diff --git a/cmd/kubectl-testkube/commands/scripts/get_script_execution.go b/cmd/kubectl-testkube/commands/scripts/execution.go similarity index 100% rename from cmd/kubectl-testkube/commands/scripts/get_script_execution.go rename to cmd/kubectl-testkube/commands/scripts/execution.go diff --git a/cmd/kubectl-testkube/commands/scripts/execution_renderer.go b/cmd/kubectl-testkube/commands/scripts/execution_renderer.go index 48154560a79..5a519d66e0a 100644 --- a/cmd/kubectl-testkube/commands/scripts/execution_renderer.go +++ b/cmd/kubectl-testkube/commands/scripts/execution_renderer.go @@ -7,6 +7,7 @@ import ( "text/template" "github.com/kubeshop/testkube/pkg/api/v1/testkube" + "github.com/kubeshop/testkube/pkg/ui" "github.com/spf13/cobra" ) @@ -77,14 +78,14 @@ func (r ExecutionRawRenderer) Watch(execution testkube.Execution, writer io.Writ return err } +// TODO fix this - introduce some common data interface for rendering such objects +// renderers need to be simplified and render Execution should be in one place (not many as now) +// - move all logic from execution, start, watch here to show final execution func (r ExecutionRawRenderer) renderDetails(execution testkube.Execution, writer io.Writer) error { - _, err := fmt.Fprintf(writer, "Name: %s, Status: %s, Duration: %s\n", - execution.Name, - *execution.ExecutionResult.Status, - execution.CalculateDuration(), - ) - - return err + ui.Writer = writer + uiPrintStatus(execution) + uiShellGetExecution(execution.Id) + return nil } func GetExecutionRenderer(cmd *cobra.Command) ExecutionRenderer { diff --git a/cmd/kubectl-testkube/commands/scripts/start.go b/cmd/kubectl-testkube/commands/scripts/start.go index 4095d942339..e46b090f52b 100644 --- a/cmd/kubectl-testkube/commands/scripts/start.go +++ b/cmd/kubectl-testkube/commands/scripts/start.go @@ -55,7 +55,8 @@ func NewStartScriptCmd() *cobra.Command { DownloadArtifacts(execution.Id, downloadDir, client) } - uiShellCommandBlock(execution.Id) + uiShellGetExecution(execution.Id) + uiShellWatchExecution(execution.Id) }, } @@ -71,6 +72,8 @@ func NewStartScriptCmd() *cobra.Command { func uiPrintStatus(execution testkube.Execution) { result := execution.ExecutionResult + ui.NL() + switch true { case result.IsQueued(): ui.Warn("Script queued for execution") @@ -79,12 +82,13 @@ func uiPrintStatus(execution testkube.Execution) { ui.Warn("Script execution started") case result.IsSuccesful(): - fmt.Println(result.Output) + ui.Info(result.Output) duration := execution.EndTime.Sub(execution.StartTime) ui.Success("Script execution completed with sucess in " + duration.String()) case result.IsFailed(): - fmt.Println(result.ErrorMessage) + ui.Warn("Test script execution failed:") + ui.Info(result.ErrorMessage) ui.Errf("Script execution failed") os.Exit(1) } @@ -92,13 +96,18 @@ func uiPrintStatus(execution testkube.Execution) { ui.NL() } -func uiShellCommandBlock(id string) { +func uiShellGetExecution(id string) { ui.ShellCommand( "Use following command to get script execution details", "kubectl testkube scripts execution "+id, ) + + ui.NL() +} + +func uiShellWatchExecution(id string) { ui.ShellCommand( - "or watch script execution until complete", + "Watch script execution until complete", "kubectl testkube scripts watch "+id, ) diff --git a/internal/app/api/v1/executions.go b/internal/app/api/v1/executions.go index 40512b17ed1..5a0170a305b 100644 --- a/internal/app/api/v1/executions.go +++ b/internal/app/api/v1/executions.go @@ -226,7 +226,6 @@ func (s TestKubeAPI) GetExecutionHandler() fiber.Handler { } } - s.Log.Infow("get script execution request", "id", scriptID, "executionID", executionID) s.Log.Debugw("get script execution request - debug", "execution", execution) return c.JSON(execution) diff --git a/pkg/jobs/jobclient.go b/pkg/jobs/jobclient.go index 1d47970579f..ee4831fa4c8 100644 --- a/pkg/jobs/jobclient.go +++ b/pkg/jobs/jobclient.go @@ -19,12 +19,11 @@ import ( "go.uber.org/zap" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - pods "k8s.io/client-go/kubernetes/typed/core/v1" + tcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) const ( @@ -92,8 +91,8 @@ func (c *JobClient) LaunchK8sJobSync(image string, repo result.Repository, execu // get job pod and for _, pod := range pods.Items { - if pod.Status.Phase != v1.PodRunning && pod.Labels["job-name"] == execution.Id { - l := c.Log.With("pod", pod.Name, "namespace", pod.Namespace) + if pod.Status.Phase != corev1.PodRunning && pod.Labels["job-name"] == execution.Id { + l := c.Log.With("pod", pod.Name, "namespace", pod.Namespace, "func", "LaunchK8sJobSync") // save stop time defer func() { @@ -102,20 +101,18 @@ func (c *JobClient) LaunchK8sJobSync(image string, repo result.Repository, execu }() // wait for complete - l.Debugw("waiting for pod complete error", "error", err) - + l.Debug("poll immediate waiting for pod to succeed") if err := wait.PollImmediate(pollInterval, pollTimeout, IsPodReady(c.ClientSet, pod.Name, c.Namespace)); err != nil { l.Errorw("waiting for pod complete error", "error", err) - repo.UpdateResult(ctx, execution.Id, result.Err(err)) - return result, err } + l.Debug("poll immediate end") var logs []byte logs, err = c.GetPodLogs(pod.Name) if err != nil { l.Errorw("get pod logs error", "error", err) repo.UpdateResult(ctx, execution.Id, result.Err(err)) - return + return result, err } // parse job ouput log (JSON stream) @@ -145,6 +142,9 @@ func (c *JobClient) LaunchK8sJob(image string, repo result.Repository, execution podsClient := c.ClientSet.CoreV1().Pods(c.Namespace) ctx := context.Background() + // init result + result = testkube.NewPendingExecutionResult() + jsn, err := json.Marshal(execution) if err != nil { return result.Err(err), err @@ -164,25 +164,29 @@ func (c *JobClient) LaunchK8sJob(image string, repo result.Repository, execution // get job pod and for _, pod := range pods.Items { - if pod.Status.Phase != v1.PodRunning && pod.Labels["job-name"] == execution.Id { + if pod.Status.Phase != corev1.PodRunning && pod.Labels["job-name"] == execution.Id { // async wait for complete status or error go func() { + l := c.Log.With("executionID", execution.Id, "func", "LaunchK8sJob") // save stop time defer func() { + l.Debug("stopping execution") execution.Stop() repo.EndExecution(ctx, execution.Id, execution.EndTime, execution.CalculateDuration()) }() // wait for complete - if err := wait.PollImmediate(time.Second, time.Duration(0)*time.Second, k8sclient.HasPodSucceeded(c.ClientSet, pod.Name, c.Namespace)); err != nil { + l.Debug("poll immediate waiting for pod to succeed") + if err := wait.PollImmediate(pollInterval, pollTimeout, IsPodReady(c.ClientSet, pod.Name, c.Namespace)); err != nil { // continue on poll err and try to get logs later - c.Log.Errorw("poll immediate error", "error", err) + l.Errorw("poll immediate error", "error", err) } + l.Debug("poll immediate end") var logs []byte logs, err = c.GetPodLogs(pod.Name) if err != nil { - c.Log.Errorw("get pod logs error", "error", err) + l.Errorw("get pod logs error", "error", err) repo.UpdateResult(ctx, execution.Id, result.Err(err)) return } @@ -190,12 +194,12 @@ func (c *JobClient) LaunchK8sJob(image string, repo result.Repository, execution // parse job ouput log (JSON stream) result, _, err := output.ParseRunnerOutput(logs) if err != nil { - c.Log.Errorw("parse ouput error", "error", err) + l.Errorw("parse ouput error", "error", err) repo.UpdateResult(ctx, execution.Id, result.Err(err)) return } - c.Log.Infow("execution completed saving result", "executionId", execution.Id, "status", result.Status) + l.Infow("execution completed saving result", "status", result.Status) repo.UpdateResult(ctx, execution.Id, result) }() } @@ -204,7 +208,7 @@ func (c *JobClient) LaunchK8sJob(image string, repo result.Repository, execution return testkube.NewPendingExecutionResult(), nil } -func (c *JobClient) GetJobPods(podsClient pods.PodInterface, jobName string, retryNr, retryCount int) (*v1.PodList, error) { +func (c *JobClient) GetJobPods(podsClient tcorev1.PodInterface, jobName string, retryNr, retryCount int) (*corev1.PodList, error) { pods, err := podsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "job-name=" + jobName}) if err != nil { return nil, err @@ -234,39 +238,27 @@ func (c *JobClient) TailJobLogs(id string, logs chan []byte) (err error) { for _, pod := range pods.Items { if pod.Labels["job-name"] == id { - l := c.Log.With("namespace", pod.Namespace, "pod", pod.Name) + l := c.Log.With("podNamespace", pod.Namespace, "podName", pod.Name, "podStatus", pod.Status) switch pod.Status.Phase { - case v1.PodRunning: - l.Debug("Tailing pod logs immediately") + case corev1.PodRunning: + l.Debug("tailing pod logs: immediately") return c.TailPodLogs(ctx, pod.Name, logs) - case v1.PodFailed: + case corev1.PodFailed: err := fmt.Errorf("can't get pod logs, pod failed: %s/%s", pod.Namespace, pod.Name) l.Errorw(err.Error()) - return err + return c.GetLastLogLineError(ctx, pod.Namespace, pod.Name) default: - l.Debugw("Waiting for pod to be ready") + l.Debugw("tailing job logs: waiting for pod to be ready") if err = wait.PollImmediate(pollInterval, pollTimeout, IsPodReady(c.ClientSet, pod.Name, c.Namespace)); err != nil { l.Errorw("poll immediate error when tailing logs", "error", err) - log, err := c.GetPodLogError(ctx, pod.Name) - if err != nil { - return fmt.Errorf("GetPodLogs error: %w", err) - } - - l.Debugw("poll immediete log", "log", string(log)) - entry, err := output.GetLogEntry(log) - if err != nil { - return fmt.Errorf("GetLogEntry error: %w", err) - } - close(logs) - - return fmt.Errorf("last log entry: %s", entry.String()) + return c.GetLastLogLineError(ctx, pod.Namespace, pod.Name) } - l.Debug("Tailing pod logs") + l.Debug("tailing pod logs") return c.TailPodLogs(ctx, pod.Name, logs) } } @@ -275,13 +267,30 @@ func (c *JobClient) TailJobLogs(id string, logs chan []byte) (err error) { return } +func (c *JobClient) GetLastLogLineError(ctx context.Context, podNamespace, podName string) error { + l := c.Log.With("pod", podName, "namespace", podNamespace) + log, err := c.GetPodLogError(ctx, podName) + if err != nil { + return fmt.Errorf("getPodLogs error: %w", err) + } + + l.Debugw("log", "got last log bytes", string(log)) // in case distorted log bytes + entry, err := output.GetLogEntry(log) + if err != nil { + return fmt.Errorf("GetLogEntry error: %w", err) + } + + c.Log.Errorw("got last log entry", "log", entry.String()) + return fmt.Errorf("error from last log entry: %s", entry.String()) +} + func (c *JobClient) GetPodLogs(podName string, logLinesCount ...int64) (logs []byte, err error) { count := int64(100) if len(logLinesCount) > 0 { count = logLinesCount[0] } - podLogOptions := v1.PodLogOptions{ + podLogOptions := corev1.PodLogOptions{ Follow: false, TailLines: &count, } @@ -314,7 +323,7 @@ func (c *JobClient) GetPodLogError(ctx context.Context, podName string) (logsByt func (c *JobClient) TailPodLogs(ctx context.Context, podName string, logs chan []byte) (err error) { count := int64(1) - podLogOptions := v1.PodLogOptions{ + podLogOptions := corev1.PodLogOptions{ Follow: true, TailLines: &count, } @@ -373,16 +382,16 @@ func (c *JobClient) CreatePersistentVolume(name string) error { if err != nil { return err } - pv := &v1.PersistentVolume{ + pv := &corev1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{"type": "local"}, }, - Spec: v1.PersistentVolumeSpec{ - Capacity: v1.ResourceList{"storage": quantity}, - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - PersistentVolumeSource: v1.PersistentVolumeSource{ - HostPath: &v1.HostPathVolumeSource{ + Spec: corev1.PersistentVolumeSpec{ + Capacity: corev1.ResourceList{"storage": quantity}, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + PersistentVolumeSource: corev1.PersistentVolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ Path: fmt.Sprintf("/mnt/data/%s", name), }, }, @@ -404,15 +413,15 @@ func (c *JobClient) CreatePersistentVolumeClaim(name string) error { return err } - pvc := &v1.PersistentVolumeClaim{ + pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Spec: v1.PersistentVolumeClaimSpec{ + Spec: corev1.PersistentVolumeClaimSpec{ StorageClassName: &storageClassName, - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{"storage": quantity}, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{"storage": quantity}, }, }, } @@ -429,14 +438,14 @@ func NewJobSpec(id, namespace, image, jsn, scriptName string, hasSecrets bool) * // TODO backOff need to be handled correctly by Logs and by Running job spec - currently we can get unexpected results var backOffLimit int32 = 0 - var secretEnvVars []v1.EnvVar + var secretEnvVars []corev1.EnvVar if hasSecrets { - secretEnvVars = []v1.EnvVar{ + secretEnvVars = []corev1.EnvVar{ { Name: GitUsernameEnvVarName, - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ Name: secret.GetMetadataName(scriptName), }, Key: GitUsernameSecretName, @@ -445,9 +454,9 @@ func NewJobSpec(id, namespace, image, jsn, scriptName string, hasSecrets bool) * }, { Name: GitTokenEnvVarName, - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ Name: secret.GetMetadataName(scriptName), }, Key: GitTokenSecretName, @@ -464,18 +473,18 @@ func NewJobSpec(id, namespace, image, jsn, scriptName string, hasSecrets bool) * }, Spec: batchv1.JobSpec{ TTLSecondsAfterFinished: &TTLSecondsAfterFinished, - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { Name: id, Image: image, Command: []string{"/bin/runner", jsn}, - ImagePullPolicy: v1.PullAlways, + ImagePullPolicy: corev1.PullAlways, Env: append(envVars, secretEnvVars...), }, }, - RestartPolicy: v1.RestartPolicyNever, + RestartPolicy: corev1.RestartPolicyNever, }, }, BackoffLimit: &backOffLimit, @@ -483,7 +492,7 @@ func NewJobSpec(id, namespace, image, jsn, scriptName string, hasSecrets bool) * } } -var envVars = []v1.EnvVar{ +var envVars = []corev1.EnvVar{ { Name: "RUNNER_ENDPOINT", Value: os.Getenv("STORAGE_ENDPOINT"), diff --git a/pkg/runner/output/parser.go b/pkg/runner/output/parser.go index 6ed75b6c40c..b9ab6ff3fb6 100644 --- a/pkg/runner/output/parser.go +++ b/pkg/runner/output/parser.go @@ -57,7 +57,7 @@ func ParseRunnerOutput(b []byte) (result testkube.ExecutionResult, logs []string } case TypeError: - result = testkube.ExecutionResult{ErrorMessage: log.Content} + result = testkube.NewErrorExecutionResult(fmt.Errorf(log.Content)) case TypeLogEvent, TypeLogLine: logs = append(logs, log.Content) diff --git a/pkg/ui/errors.go b/pkg/ui/errors.go index e8efc66a93e..2ac6820cf27 100644 --- a/pkg/ui/errors.go +++ b/pkg/ui/errors.go @@ -17,7 +17,7 @@ func printAndExit(item string, exitOnError bool, errors ...error) { if len(errors) > 0 && hasErrors(errors...) { for _, err := range errors { if err != nil { - fmt.Printf("%s %s (error: %s)\n\n", LightRed("⨯"), Red(item), err) + fmt.Fprintf(Writer, "%s %s (error: %s)\n\n", LightRed("⨯"), Red(item), err) if exitOnError { os.Exit(1) } @@ -26,7 +26,7 @@ func printAndExit(item string, exitOnError bool, errors ...error) { } if Verbose { - fmt.Printf("%s %s\n", Blue("\xE2\x9C\x94"), Green(item)) + fmt.Fprintf(Writer, "%s %s\n", Blue("\xE2\x9C\x94"), Green(item)) } } @@ -34,14 +34,14 @@ func WarnOnError(item string, errors ...error) { if len(errors) > 0 && hasErrors(errors...) { for _, err := range errors { if err != nil { - fmt.Printf("%s %s (error: %s)\n\n", LightYellow("⨯"), Yellow(item), err) + fmt.Fprintf(Writer, "%s %s (error: %s)\n\n", LightYellow("⨯"), Yellow(item), err) return } } } if Verbose { - fmt.Printf("%s %s\n", Blue("\xE2\x9C\x94"), Green(item)) + fmt.Fprintf(Writer, "%s %s\n", Blue("\xE2\x9C\x94"), Green(item)) } } diff --git a/pkg/ui/logo.go b/pkg/ui/logo.go index f3d6d95d506..f50a27d4917 100644 --- a/pkg/ui/logo.go +++ b/pkg/ui/logo.go @@ -15,11 +15,11 @@ var logo = ` ` func Logo() { - fmt.Print(Blue(logo)) - fmt.Println() + fmt.Fprint(Writer, Blue(logo)) + fmt.Fprintln(Writer) } func LogoNoColor() { - fmt.Print(logo) - fmt.Println() + fmt.Fprint(Writer, logo) + fmt.Fprintln(Writer) } diff --git a/pkg/ui/printers.go b/pkg/ui/printers.go index c54b1f22880..4758eeb8a7c 100644 --- a/pkg/ui/printers.go +++ b/pkg/ui/printers.go @@ -3,10 +3,13 @@ package ui import ( "fmt" + "io" "os" "strings" ) +var Writer io.Writer = os.Stdout + // IconMedal emoji const IconMedal = "🥇" @@ -14,74 +17,74 @@ const IconMedal = "🥇" const IconError = "💔" func NL() { - fmt.Println() + fmt.Fprintln(Writer) } // Warn shows warning in terminal func Success(message string, subMessages ...string) { - fmt.Printf("%s", LightYellow(message)) + fmt.Fprintf(Writer, "%s", LightYellow(message)) for _, sub := range subMessages { - fmt.Printf(" %s", LightCyan(sub)) + fmt.Fprintf(Writer, " %s", LightCyan(sub)) } - fmt.Printf(" " + IconMedal) - fmt.Println() + fmt.Fprintf(Writer, " "+IconMedal) + fmt.Fprintln(Writer) } // Warn shows warning in terminal func Warn(message string, subMessages ...string) { - fmt.Printf("%s", LightYellow(message)) + fmt.Fprintf(Writer, "%s", LightYellow(message)) for _, sub := range subMessages { - fmt.Printf(" %s", LightCyan(sub)) + fmt.Fprintf(Writer, " %s", LightCyan(sub)) } - fmt.Println() + fmt.Fprintln(Writer) } func LogLine(message string) { - fmt.Printf("%s\n", DarkGray(message)) + fmt.Fprintf(Writer, "%s\n", DarkGray(message)) } func Debug(message string, subMessages ...string) { if !Verbose { return } - fmt.Printf("%s", DarkGray(message)) + fmt.Fprintf(Writer, "%s", DarkGray(message)) for _, sub := range subMessages { - fmt.Printf(" %s", LightGray(sub)) + fmt.Fprintf(Writer, " %s", LightGray(sub)) } - fmt.Println() + fmt.Fprintln(Writer) } func Info(message string, subMessages ...string) { - fmt.Printf("%s", DarkGray(message)) + fmt.Fprintf(Writer, "%s", DarkGray(message)) for _, sub := range subMessages { - fmt.Printf(" %s", LightGray(sub)) + fmt.Fprintf(Writer, " %s", LightGray(sub)) } - fmt.Println() + fmt.Fprintln(Writer) } func Err(err error) { - fmt.Printf("%s %s %s\n", LightRed("⨯"), Red(err.Error()), IconError) + fmt.Fprintf(Writer, "%s %s %s\n", LightRed("⨯"), Red(err.Error()), IconError) } func Errf(err string, params ...interface{}) { - fmt.Printf("%s %s\n", LightRed("⨯"), Red(fmt.Sprintf(err, params...))) + fmt.Fprintf(Writer, "%s %s\n", LightRed("⨯"), Red(fmt.Sprintf(err, params...))) } func Fail(err error) { Err(err) - fmt.Println() + fmt.Fprintln(Writer) os.Exit(1) } func Failf(err string, params ...interface{}) { Errf(err, params...) - fmt.Println() + fmt.Fprintln(Writer) os.Exit(1) } func CommandOutput(output []byte, command string, params ...string) { fullCommand := fmt.Sprintf("%s %s", LightCyan(command), DarkGray(strings.Join(params, " "))) - fmt.Printf("command: %s\noutput:\n%s\n", LightGray(fullCommand), DarkGray(string(output))) + fmt.Fprintf(Writer, "command: %s\noutput:\n%s\n", LightGray(fullCommand), DarkGray(string(output))) } func Medal() { @@ -89,45 +92,45 @@ func Medal() { } func Completed(main string, sub ...string) { - fmt.Println() + fmt.Fprintln(Writer) if len(sub) == 1 { - fmt.Printf("%s: %s\n", LightGray(main), LightBlue(sub[0])) + fmt.Fprintf(Writer, "%s: %s\n", LightGray(main), LightBlue(sub[0])) } else { - fmt.Println(LightGray(main), LightBlue(strings.Join(sub, " "))) + fmt.Fprintln(Writer, LightGray(main), LightBlue(strings.Join(sub, " "))) } } func GroupCompleted(main string, sub ...string) { - fmt.Println() + fmt.Fprintln(Writer) line := strings.Repeat("=", calculateMessageLength(main, sub...)) - fmt.Println(LightBlue(line)) + fmt.Fprintln(Writer, LightBlue(line)) if len(sub) == 1 { - fmt.Printf("%s: %s", LightGray(main), LightBlue(sub[0])) + fmt.Fprintf(Writer, "%s: %s", LightGray(main), LightBlue(sub[0])) } else { - fmt.Println(LightGray(main)) + fmt.Fprintln(Writer, LightGray(main)) } } func InfoGrid(table map[string]string) { for k, v := range table { - fmt.Printf(" %s: %s\n", DarkGray(k), LightBlue(v)) + fmt.Fprintf(Writer, " %s: %s\n", DarkGray(k), LightBlue(v)) } - fmt.Println() + fmt.Fprintln(Writer) } func Vector(table []string) { for _, v := range table { - fmt.Printf(" %s\n", DarkGray(v)) + fmt.Fprintf(Writer, " %s\n", DarkGray(v)) } } // Warn shows warning in terminal func ShellCommand(title string, commands ...string) { - fmt.Printf("%s:\n", White(title)) + fmt.Fprintf(Writer, "%s:\n", White(title)) for _, sub := range commands { - fmt.Printf("$ %s\n", LightGray(sub)) + fmt.Fprintf(Writer, "$ %s\n", LightGray(sub)) } - fmt.Println() + fmt.Fprintln(Writer) } func calculateMessageLength(message string, subMessages ...string) int {