From 2b68a7c950b14ba6a67875a23725ffe475d41ebe Mon Sep 17 00:00:00 2001 From: Laura Brehm Date: Fri, 13 Dec 2024 12:23:25 +0000 Subject: [PATCH 1/8] run/cleanup: use specific/clearer name for context Signed-off-by: Laura Brehm --- cli/command/container/run.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cli/command/container/run.go b/cli/command/container/run.go index 5ef3e8b7bf7b..a730793f4d58 100644 --- a/cli/command/container/run.go +++ b/cli/command/container/run.go @@ -154,8 +154,8 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption defer signal.StopCatch(sigc) } - ctx, cancelFun := context.WithCancel(context.WithoutCancel(ctx)) - defer cancelFun() + attachStartCtx, attachStartCancel := context.WithCancel(context.WithoutCancel(ctx)) + defer attachStartCancel() var ( waitDisplayID chan struct{} @@ -179,7 +179,7 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption // ctx should not be cancellable here, as this would kill the stream to the container // and we want to keep the stream open until the process in the container exits or until // the user forcefully terminates the CLI. - closeFn, err := attachContainer(ctx, dockerCli, containerID, &errCh, config, container.AttachOptions{ + closeFn, err := attachContainer(attachStartCtx, dockerCli, containerID, &errCh, config, container.AttachOptions{ Stream: true, Stdin: config.AttachStdin, Stdout: config.AttachStdout, @@ -194,17 +194,17 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption // New context here because we don't to cancel waiting on container exit/remove // when we cancel attach, etc. - statusCtx, cancelStatusCtx := context.WithCancel(context.WithoutCancel(ctx)) + statusCtx, cancelStatusCtx := context.WithCancel(context.WithoutCancel(attachStartCtx)) defer cancelStatusCtx() statusChan := waitExitOrRemoved(statusCtx, apiClient, containerID, copts.autoRemove) // start the container - if err := apiClient.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil { + if err := apiClient.ContainerStart(attachStartCtx, containerID, container.StartOptions{}); err != nil { // If we have hijackedIOStreamer, we should notify // hijackedIOStreamer we are going to exit and wait // to avoid the terminal are not restored. if attach { - cancelFun() + attachStartCancel() <-errCh } @@ -223,7 +223,7 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption } if config.Tty && dockerCli.Out().IsTerminal() { - if err := MonitorTtySize(ctx, dockerCli, containerID, false); err != nil { + if err := MonitorTtySize(attachStartCtx, dockerCli, containerID, false); err != nil { _, _ = fmt.Fprintln(stderr, "Error monitoring TTY size:", err) } } @@ -246,7 +246,7 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption case status := <-statusChan: // notify hijackedIOStreamer that we're exiting and wait // so that the terminal can be restored. - cancelFun() + attachStartCancel() <-errCh if status != 0 { return cli.StatusError{StatusCode: status} From 7f257507a46712b582104cab2ef6ff56655af34b Mon Sep 17 00:00:00 2001 From: Laura Brehm Date: Fri, 13 Dec 2024 12:27:31 +0000 Subject: [PATCH 2/8] run/cleanup: use main ctx as parent for statusCtx Signed-off-by: Laura Brehm --- cli/command/container/run.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/command/container/run.go b/cli/command/container/run.go index a730793f4d58..0b20a4c5c44e 100644 --- a/cli/command/container/run.go +++ b/cli/command/container/run.go @@ -194,7 +194,7 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption // New context here because we don't to cancel waiting on container exit/remove // when we cancel attach, etc. - statusCtx, cancelStatusCtx := context.WithCancel(context.WithoutCancel(attachStartCtx)) + statusCtx, cancelStatusCtx := context.WithCancel(ctx) defer cancelStatusCtx() statusChan := waitExitOrRemoved(statusCtx, apiClient, containerID, copts.autoRemove) From daba434f52d46d2b33628fc8e526d2ee39b8cced Mon Sep 17 00:00:00 2001 From: Laura Brehm Date: Fri, 13 Dec 2024 12:29:45 +0000 Subject: [PATCH 3/8] run/refactor: move waitExitOrRemove call up Signed-off-by: Laura Brehm --- cli/command/container/run.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cli/command/container/run.go b/cli/command/container/run.go index 0b20a4c5c44e..ace2c6e15a96 100644 --- a/cli/command/container/run.go +++ b/cli/command/container/run.go @@ -154,6 +154,12 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption defer signal.StopCatch(sigc) } + // New context here because we don't to cancel waiting on container exit/remove + // when we cancel attach, etc. + statusCtx, cancelStatusCtx := context.WithCancel(context.WithoutCancel(ctx)) + defer cancelStatusCtx() + statusChan := waitExitOrRemoved(statusCtx, apiClient, containerID, copts.autoRemove) + attachStartCtx, attachStartCancel := context.WithCancel(context.WithoutCancel(ctx)) defer attachStartCancel() @@ -192,12 +198,6 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption defer closeFn() } - // New context here because we don't to cancel waiting on container exit/remove - // when we cancel attach, etc. - statusCtx, cancelStatusCtx := context.WithCancel(ctx) - defer cancelStatusCtx() - statusChan := waitExitOrRemoved(statusCtx, apiClient, containerID, copts.autoRemove) - // start the container if err := apiClient.ContainerStart(attachStartCtx, containerID, container.StartOptions{}); err != nil { // If we have hijackedIOStreamer, we should notify From 055011c86a73a421811d3ab90a7566080d19fc0b Mon Sep 17 00:00:00 2001 From: Laura Brehm Date: Fri, 13 Dec 2024 12:33:25 +0000 Subject: [PATCH 4/8] run/refactor: move sigproxy setup closer to start The signal proxy forwards received signals to the container. There's no need to start it so long before starting the container, so move it closer to the `ContainerStart` call. Signed-off-by: Laura Brehm --- cli/command/container/run.go | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/cli/command/container/run.go b/cli/command/container/run.go index ace2c6e15a96..32c04c12993d 100644 --- a/cli/command/container/run.go +++ b/cli/command/container/run.go @@ -143,16 +143,6 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption if err != nil { return toStatusError(err) } - if runOpts.sigProxy { - sigc := notifyAllSignals() - // since we're explicitly setting up signal handling here, and the daemon will - // get notified independently of the clients ctx cancellation, we use this context - // but without cancellation to avoid ForwardAllSignals from returning - // before all signals are forwarded. - bgCtx := context.WithoutCancel(ctx) - go ForwardAllSignals(bgCtx, apiClient, containerID, sigc) - defer signal.StopCatch(sigc) - } // New context here because we don't to cancel waiting on container exit/remove // when we cancel attach, etc. @@ -198,6 +188,17 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption defer closeFn() } + if runOpts.sigProxy { + sigc := notifyAllSignals() + // since we're explicitly setting up signal handling here, and the daemon will + // get notified independently of the clients ctx cancellation, we use this context + // but without cancellation to avoid ForwardAllSignals from returning + // before all signals are forwarded. + bgCtx := context.WithoutCancel(ctx) + go ForwardAllSignals(bgCtx, apiClient, containerID, sigc) + defer signal.StopCatch(sigc) + } + // start the container if err := apiClient.ContainerStart(attachStartCtx, containerID, container.StartOptions{}); err != nil { // If we have hijackedIOStreamer, we should notify From 1ed858a5b47736110545a339d9eb9b92886aa5ca Mon Sep 17 00:00:00 2001 From: Laura Brehm Date: Fri, 13 Dec 2024 12:42:33 +0000 Subject: [PATCH 5/8] run/cleanup: split attachStartCtx into separate ctxs Signed-off-by: Laura Brehm --- cli/command/container/run.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/cli/command/container/run.go b/cli/command/container/run.go index 32c04c12993d..a4d231d663bf 100644 --- a/cli/command/container/run.go +++ b/cli/command/container/run.go @@ -150,9 +150,6 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption defer cancelStatusCtx() statusChan := waitExitOrRemoved(statusCtx, apiClient, containerID, copts.autoRemove) - attachStartCtx, attachStartCancel := context.WithCancel(context.WithoutCancel(ctx)) - defer attachStartCancel() - var ( waitDisplayID chan struct{} errCh chan error @@ -166,6 +163,9 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption _, _ = fmt.Fprintln(stdout, containerID) }() } + + attachCtx, attachCancel := context.WithCancel(context.WithoutCancel(ctx)) + defer attachCancel() if attach { detachKeys := dockerCli.ConfigFile().DetachKeys if runOpts.detachKeys != "" { @@ -175,7 +175,7 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption // ctx should not be cancellable here, as this would kill the stream to the container // and we want to keep the stream open until the process in the container exits or until // the user forcefully terminates the CLI. - closeFn, err := attachContainer(attachStartCtx, dockerCli, containerID, &errCh, config, container.AttachOptions{ + closeFn, err := attachContainer(attachCtx, dockerCli, containerID, &errCh, config, container.AttachOptions{ Stream: true, Stdin: config.AttachStdin, Stdout: config.AttachStdout, @@ -199,13 +199,15 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption defer signal.StopCatch(sigc) } + startCtx, startCancel := context.WithCancel(context.WithoutCancel(ctx)) + defer startCancel() // start the container - if err := apiClient.ContainerStart(attachStartCtx, containerID, container.StartOptions{}); err != nil { + if err := apiClient.ContainerStart(startCtx, containerID, container.StartOptions{}); err != nil { // If we have hijackedIOStreamer, we should notify // hijackedIOStreamer we are going to exit and wait // to avoid the terminal are not restored. if attach { - attachStartCancel() + attachCancel() <-errCh } @@ -224,7 +226,7 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption } if config.Tty && dockerCli.Out().IsTerminal() { - if err := MonitorTtySize(attachStartCtx, dockerCli, containerID, false); err != nil { + if err := MonitorTtySize(attachCtx, dockerCli, containerID, false); err != nil { _, _ = fmt.Fprintln(stderr, "Error monitoring TTY size:", err) } } @@ -247,7 +249,7 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption case status := <-statusChan: // notify hijackedIOStreamer that we're exiting and wait // so that the terminal can be restored. - attachStartCancel() + attachCancel() <-errCh if status != 0 { return cli.StatusError{StatusCode: status} From a0cd512773f68b51b2a862a6f85f65a4c928bc52 Mon Sep 17 00:00:00 2001 From: Laura Brehm Date: Fri, 13 Dec 2024 12:45:48 +0000 Subject: [PATCH 6/8] run: use main (cancellable) ctx for ContainerStart MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's no reason to use a non-cancellable context for the `ContainerStart` call – this was previously only incidentally non-cancellable, but in reality we probably want to be able to cancel the start call. Signed-off-by: Laura Brehm --- cli/command/container/run.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cli/command/container/run.go b/cli/command/container/run.go index a4d231d663bf..47dc9127955a 100644 --- a/cli/command/container/run.go +++ b/cli/command/container/run.go @@ -199,10 +199,8 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption defer signal.StopCatch(sigc) } - startCtx, startCancel := context.WithCancel(context.WithoutCancel(ctx)) - defer startCancel() // start the container - if err := apiClient.ContainerStart(startCtx, containerID, container.StartOptions{}); err != nil { + if err := apiClient.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil { // If we have hijackedIOStreamer, we should notify // hijackedIOStreamer we are going to exit and wait // to avoid the terminal are not restored. From ad297241b962f6ce59d8999fa7784f31470218b6 Mon Sep 17 00:00:00 2001 From: Laura Brehm Date: Fri, 13 Dec 2024 17:16:59 +0000 Subject: [PATCH 7/8] run/tests: fix flaky RunAttachTermination test During an attached `docker run`, the CLI starts capturing signals so that they can be forwarded to the container. The CLI stops capturing signals after container is no longer running/it's streams are closed. This test had two issues: - it would close the streams early, causing the CLI to think the container had exited, and stop signal handling (causing the SIGINT to not be captured and interrupt the test instead), and - it would send immediately on the status channel returned by WaitFunc, which would also signal the container has exited and caused the CLI to stop signal handling This patch addresses both of these issues and makes this test less flaky. Signed-off-by: Laura Brehm --- cli/command/container/run_test.go | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/cli/command/container/run_test.go b/cli/command/container/run_test.go index 81b176d904c9..7e5f96d168b9 100644 --- a/cli/command/container/run_test.go +++ b/cli/command/container/run_test.go @@ -150,6 +150,8 @@ func TestRunAttachTermination(t *testing.T) { var conn net.Conn killCh := make(chan struct{}) attachCh := make(chan struct{}) + startCh := make(chan struct{}) + containerExitC := make(chan struct{}) fakeCLI := test.NewFakeCli(&fakeClient{ createContainerFunc: func(_ *container.Config, _ *container.HostConfig, _ *network.NetworkingConfig, _ *specs.Platform, _ string) (container.CreateResponse, error) { return container.CreateResponse{ @@ -158,6 +160,7 @@ func TestRunAttachTermination(t *testing.T) { }, containerKillFunc: func(ctx context.Context, containerID, signal string) error { killCh <- struct{}{} + containerExitC <- struct{}{} return nil }, containerAttachFunc: func(ctx context.Context, containerID string, options container.AttachOptions) (types.HijackedResponse, error) { @@ -173,11 +176,19 @@ func TestRunAttachTermination(t *testing.T) { responseChan := make(chan container.WaitResponse, 1) errChan := make(chan error) - responseChan <- container.WaitResponse{ - StatusCode: 130, - } + go func() { + <-containerExitC + responseChan <- container.WaitResponse{ + StatusCode: 130, + } + }() return responseChan, errChan }, + containerStartFunc: func(containerID string, options container.StartOptions) error { + startCh <- struct{}{} + return nil + }, + // use new (non-legacy) wait API // see: 38591f20d07795aaef45d400df89ca12f29c603b Version: "1.30", @@ -201,9 +212,14 @@ func TestRunAttachTermination(t *testing.T) { case <-attachCh: } + // run command should attempt to start the container + select { + case <-time.After(5 * time.Second): + t.Fatal("containerStartCh was not called before the timeout") + case <-startCh: + } + assert.NilError(t, syscall.Kill(syscall.Getpid(), syscall.SIGINT)) - // end stream from "container" so that we'll detach - conn.Close() select { case <-killCh: @@ -211,6 +227,9 @@ func TestRunAttachTermination(t *testing.T) { t.Fatal("containerKillFunc was not called before the timeout") } + // end stream from "container" so that we'll detach + conn.Close() + select { case cmdErr := <-cmdErrC: assert.Equal(t, cmdErr, cli.StatusError{ From 3379da2ec4691d7a585caf8ce4546d535255dd0d Mon Sep 17 00:00:00 2001 From: Laura Brehm Date: Fri, 13 Dec 2024 15:48:36 +0000 Subject: [PATCH 8/8] run/refactor: WIP extract attach logic from run Signed-off-by: Laura Brehm --- cli/command/container/run.go | 170 +++++++++++++++++++---------------- 1 file changed, 91 insertions(+), 79 deletions(-) diff --git a/cli/command/container/run.go b/cli/command/container/run.go index 47dc9127955a..77f5bf5c32d5 100644 --- a/cli/command/container/run.go +++ b/cli/command/container/run.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "os" "strings" "syscall" @@ -116,12 +117,8 @@ func runRun(ctx context.Context, dockerCli command.Cli, flags *pflag.FlagSet, ro return runContainer(ctx, dockerCli, ropts, copts, containerCfg) } -//nolint:gocyclo func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOptions, copts *containerOptions, containerCfg *containerConfig) error { config := containerCfg.Config - stdout, stderr := dockerCli.Out(), dockerCli.Err() - apiClient := dockerCli.Client() - config.ArgsEscaped = false if !runOpts.detach { @@ -144,71 +141,39 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption return toStatusError(err) } + apiClient := dockerCli.Client() + // New context here because we don't to cancel waiting on container exit/remove // when we cancel attach, etc. statusCtx, cancelStatusCtx := context.WithCancel(context.WithoutCancel(ctx)) defer cancelStatusCtx() statusChan := waitExitOrRemoved(statusCtx, apiClient, containerID, copts.autoRemove) - var ( - waitDisplayID chan struct{} - errCh chan error - ) + var waitDisplayID chan struct{} attach := config.AttachStdin || config.AttachStdout || config.AttachStderr if !attach { // Make this asynchronous to allow the client to write to stdin before having to read the ID waitDisplayID = make(chan struct{}) go func() { defer close(waitDisplayID) - _, _ = fmt.Fprintln(stdout, containerID) + _, _ = fmt.Fprintln(dockerCli.Out(), containerID) }() } - attachCtx, attachCancel := context.WithCancel(context.WithoutCancel(ctx)) - defer attachCancel() + var attachWait func(<-chan int, error) error if attach { - detachKeys := dockerCli.ConfigFile().DetachKeys - if runOpts.detachKeys != "" { - detachKeys = runOpts.detachKeys - } - - // ctx should not be cancellable here, as this would kill the stream to the container - // and we want to keep the stream open until the process in the container exits or until - // the user forcefully terminates the CLI. - closeFn, err := attachContainer(attachCtx, dockerCli, containerID, &errCh, config, container.AttachOptions{ - Stream: true, - Stdin: config.AttachStdin, - Stdout: config.AttachStdout, - Stderr: config.AttachStderr, - DetachKeys: detachKeys, - }) + attachWait, err = setupContainerAttach(ctx, dockerCli, containerID, runOpts, config) if err != nil { return err } - defer closeFn() - } - - if runOpts.sigProxy { - sigc := notifyAllSignals() - // since we're explicitly setting up signal handling here, and the daemon will - // get notified independently of the clients ctx cancellation, we use this context - // but without cancellation to avoid ForwardAllSignals from returning - // before all signals are forwarded. - bgCtx := context.WithoutCancel(ctx) - go ForwardAllSignals(bgCtx, apiClient, containerID, sigc) - defer signal.StopCatch(sigc) } // start the container - if err := apiClient.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil { - // If we have hijackedIOStreamer, we should notify - // hijackedIOStreamer we are going to exit and wait - // to avoid the terminal are not restored. + err = apiClient.ContainerStart(ctx, containerID, container.StartOptions{}) + if err != nil { if attach { - attachCancel() - <-errCh + attachWait(statusChan, err) } - if copts.autoRemove { // wait container to be removed <-statusChan @@ -216,51 +181,100 @@ func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOption return toStatusError(err) } + if attach { + return attachWait(statusChan, nil) + } + // Detached mode: wait for the id to be displayed and return. - if !attach { - // Detached mode - <-waitDisplayID - return nil + <-waitDisplayID + return nil +} + +func setupContainerAttach(ctx context.Context, dockerCli command.Cli, containerID string, runOpts *runOptions, config *container.Config) (func(<-chan int, error) error, error) { + detachKeys := dockerCli.ConfigFile().DetachKeys + if runOpts.detachKeys != "" { + detachKeys = runOpts.detachKeys } - if config.Tty && dockerCli.Out().IsTerminal() { - if err := MonitorTtySize(attachCtx, dockerCli, containerID, false); err != nil { - _, _ = fmt.Fprintln(stderr, "Error monitoring TTY size:", err) - } + // ctx should not be cancellable here, as this would kill the stream to the container + // and we want to keep the stream open until the process in the container exits or until + // the user forcefully terminates the CLI. + attachCtx, attachCancel := context.WithCancel(context.WithoutCancel(ctx)) + errCh, closeFn, err := attachContainer(attachCtx, dockerCli, containerID, config, container.AttachOptions{ + Stream: true, + Stdin: config.AttachStdin, + Stdout: config.AttachStdout, + Stderr: config.AttachStderr, + DetachKeys: detachKeys, + }) + if err != nil { + attachCancel() + return nil, err } - select { - case err := <-errCh: - if err != nil { - if _, ok := err.(term.EscapeError); ok { - // The user entered the detach escape sequence. - return nil - } + var sigc chan os.Signal + if runOpts.sigProxy { + sigc = notifyAllSignals() + // since we're explicitly setting up signal handling here, and the daemon will + // get notified independently of the clients ctx cancellation, we use this context + // but without cancellation to avoid ForwardAllSignals from returning + // before all signals are forwarded. + bgCtx := context.WithoutCancel(ctx) + go ForwardAllSignals(bgCtx, dockerCli.Client(), containerID, sigc) + } - logrus.Debugf("Error hijack: %s", err) - return err + return func(statusC <-chan int, err error) error { + defer closeFn() + if runOpts.sigProxy { + defer signal.StopCatch(sigc) } - status := <-statusChan - if status != 0 { - return cli.StatusError{StatusCode: status} + + // if the container failed to start, just cancel the streamer + // and wait for the terminal to be restored + if err != nil { + attachCancel() + <-errCh + return nil } - case status := <-statusChan: - // notify hijackedIOStreamer that we're exiting and wait - // so that the terminal can be restored. - attachCancel() - <-errCh - if status != 0 { - return cli.StatusError{StatusCode: status} + + if config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(attachCtx, dockerCli, containerID, false); err != nil { + _, _ = fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err) + } } - } - return nil + select { + case err := <-errCh: + if err != nil { + if _, ok := err.(term.EscapeError); ok { + // The user entered the detach escape sequence. + return nil + } + + logrus.Debugf("Error hijack: %s", err) + return err + } + status := <-statusC + if status != 0 { + return cli.StatusError{StatusCode: status} + } + case status := <-statusC: + // notify hijackedIOStreamer that we're exiting and wait + // so that the terminal can be restored. + attachCancel() + <-errCh + if status != 0 { + return cli.StatusError{StatusCode: status} + } + } + return nil + }, nil } -func attachContainer(ctx context.Context, dockerCli command.Cli, containerID string, errCh *chan error, config *container.Config, options container.AttachOptions) (func(), error) { +func attachContainer(ctx context.Context, dockerCli command.Cli, containerID string, config *container.Config, options container.AttachOptions) (chan error, func(), error) { resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options) if errAttach != nil { - return nil, errAttach + return nil, nil, errAttach } var ( @@ -282,8 +296,6 @@ func attachContainer(ctx context.Context, dockerCli command.Cli, containerID str } ch := make(chan error, 1) - *errCh = ch - go func() { ch <- func() error { streamer := hijackedIOStreamer{ @@ -302,7 +314,7 @@ func attachContainer(ctx context.Context, dockerCli command.Cli, containerID str return errAttach }() }() - return resp.Close, nil + return ch, resp.Close, nil } // withHelp decorates the error with a suggestion to use "--help".