From 405886d647c5563a7b999df3640560239807b2f0 Mon Sep 17 00:00:00 2001 From: Josh Wolf Date: Fri, 17 Jan 2025 15:56:45 -0500 Subject: [PATCH] add tests_resource --- docs/resources/tests.md | 104 +++ examples/tests_resource/main.tf | 32 + examples/tests_resource/tests/foo.sh | 11 + internal/bundler/append.go | 99 +++ internal/docker/docker.go | 54 +- internal/drivers/docker_in_docker/doc.go | 12 + internal/drivers/docker_in_docker/driver.go | 205 ++++++ internal/drivers/docker_in_docker/opts.go | 71 ++ internal/drivers/drivers.go | 17 + internal/drivers/k3s_in_docker/doc.go | 10 + internal/drivers/k3s_in_docker/driver.go | 604 ++++++++++++++++++ internal/drivers/k3s_in_docker/opts.go | 114 ++++ internal/entrypoint/entrypoint.go | 2 +- internal/provider/drivers.go | 194 ++++++ internal/provider/provider.go | 4 + internal/provider/store.go | 51 +- .../k3s-in-docker-basic.sh | 5 + .../k3s-in-docker-fails-with-bad-command.sh | 3 + ...s-in-docker-fails-with-proper-exit-code.sh | 3 + .../k3s-in-docker-non-executable.sh | 3 + internal/provider/tests_resource.go | 443 +++++++++++++ internal/provider/tests_resource_test.go | 71 ++ 22 files changed, 2097 insertions(+), 15 deletions(-) create mode 100644 docs/resources/tests.md create mode 100644 examples/tests_resource/main.tf create mode 100755 examples/tests_resource/tests/foo.sh create mode 100644 internal/drivers/docker_in_docker/doc.go create mode 100644 internal/drivers/docker_in_docker/driver.go create mode 100644 internal/drivers/docker_in_docker/opts.go create mode 100644 internal/drivers/drivers.go create mode 100644 internal/drivers/k3s_in_docker/doc.go create mode 100644 internal/drivers/k3s_in_docker/driver.go create mode 100644 internal/drivers/k3s_in_docker/opts.go create mode 100644 internal/provider/drivers.go create mode 100755 internal/provider/testdata/TestAccTestsResource/k3s-in-docker-basic.sh create mode 100755 internal/provider/testdata/TestAccTestsResource/k3s-in-docker-fails-with-bad-command.sh create mode 100755 internal/provider/testdata/TestAccTestsResource/k3s-in-docker-fails-with-proper-exit-code.sh create mode 100644 internal/provider/testdata/TestAccTestsResource/k3s-in-docker-non-executable.sh create mode 100644 internal/provider/tests_resource.go create mode 100644 internal/provider/tests_resource_test.go diff --git a/docs/resources/tests.md b/docs/resources/tests.md new file mode 100644 index 00000000..ddbd52f8 --- /dev/null +++ b/docs/resources/tests.md @@ -0,0 +1,104 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "imagetest_tests Resource - terraform-provider-imagetest" +subcategory: "" +description: |- + +--- + +# imagetest_tests (Resource) + + + + + + +## Schema + +### Required + +- `driver` (String) The driver to use for the test suite. Only one driver can be used at a time. +- `images` (Map of String) Images to use for the test suite. + +### Optional + +- `drivers` (Attributes) The resource specific driver configuration. This is merged with the provider scoped drivers configuration. (see [below for nested schema](#nestedatt--drivers)) +- `name` (String) The name of the test. If one is not provided, a random name will be generated. +- `tests` (Attributes List) An ordered list of test suites to run (see [below for nested schema](#nestedatt--tests)) +- `timeout` (String) The maximum amount of time to wait for all tests to complete. This includes the time it takes to start and destroy the driver. + +### Read-Only + +- `id` (String) The unique identifier for the test. If a name is provided, this will be the name appended with a random suffix. + + +### Nested Schema for `drivers` + +Optional: + +- `docker_in_docker` (Attributes) The docker_in_docker driver (see [below for nested schema](#nestedatt--drivers--docker_in_docker)) +- `k3s_in_docker` (Attributes) The k3s_in_docker driver (see [below for nested schema](#nestedatt--drivers--k3s_in_docker)) + + +### Nested Schema for `drivers.docker_in_docker` + +Optional: + +- `image` (String) The image reference to use for the docker-in-docker driver + + + +### Nested Schema for `drivers.k3s_in_docker` + +Optional: + +- `cni` (Boolean) Enable the CNI plugin +- `image` (String) The image reference to use for the k3s_in_docker driver +- `metrics_server` (Boolean) Enable the metrics server +- `network_policy` (Boolean) Enable the network policy +- `registries` (Attributes Map) A map of registries containing configuration for optional auth, tls, and mirror configuration. (see [below for nested schema](#nestedatt--drivers--k3s_in_docker--registries)) +- `traefik` (Boolean) Enable the traefik ingress controller + + +### Nested Schema for `drivers.k3s_in_docker.registries` + +Optional: + +- `mirrors` (Attributes) A map of registries containing configuration for optional auth, tls, and mirror configuration. (see [below for nested schema](#nestedatt--drivers--k3s_in_docker--registries--mirrors)) + + +### Nested Schema for `drivers.k3s_in_docker.registries.mirrors` + +Optional: + +- `endpoints` (List of String) + + + + + + +### Nested Schema for `tests` + +Required: + +- `image` (String) The image reference to use as the base image for the test. +- `name` (String) The name of the test + +Optional: + +- `cmd` (String) When specified, will override the sandbox image's CMD (oci config). +- `content` (Attributes List) The content to use for the test (see [below for nested schema](#nestedatt--tests--content)) +- `envs` (Map of String) Environment variables to set on the test container. These will overwrite the environment variables set in the image's config on conflicts. +- `timeout` (String) The maximum amount of time to wait for the individual test to complete. This is encompassed by the overall timeout of the parent tests resource. + + +### Nested Schema for `tests.content` + +Required: + +- `source` (String) The source path to use for the test + +Optional: + +- `target` (String) The target path to use for the test diff --git a/examples/tests_resource/main.tf b/examples/tests_resource/main.tf new file mode 100644 index 00000000..77dc9862 --- /dev/null +++ b/examples/tests_resource/main.tf @@ -0,0 +1,32 @@ +terraform { + required_providers { + imagetest = { + source = "registry.terraform.io/chainguard-dev/imagetest" + } + } + backend "inmem" {} +} + +locals { repo = "localhost:55232/foo" } + +provider "imagetest" { + repo = local.repo +} + +resource "imagetest_tests" "foo" { + name = "foo" + driver = "k3s_in_docker" + + images = { + foo = "cgr.dev/chainguard/busybox:latest@sha256:b7fc3eef4303188eb295aaf8e02d888ced307d2a45090d6f673b95a41bfc033d" + } + + tests = [ + { + name = "sample" + image = "cgr.dev/chainguard/kubectl:latest-dev@sha256:5751a1672a7debcc5e847bc1cc6ebfc8899aad188ff90f0445bfef194a9fa512" + content = [{ source = "${path.module}/tests" }] + cmd = "/imagetest/foo.sh" + } + ] +} diff --git a/examples/tests_resource/tests/foo.sh b/examples/tests_resource/tests/foo.sh new file mode 100755 index 00000000..9594920a --- /dev/null +++ b/examples/tests_resource/tests/foo.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +# Test sandbox _always_ has these set via the entrypoint wrapper +# set -eux -o pipefail + +# Test sandbox environment is based on wolfi +apk add jq + +# Test sandbox always has $IMAGES, which are the terraform images parsed into +# their constituent parts +echo "$IMAGES" | jq '.' diff --git a/internal/bundler/append.go b/internal/bundler/append.go index 8351147f..9e85e3b2 100644 --- a/internal/bundler/append.go +++ b/internal/bundler/append.go @@ -19,6 +19,105 @@ type AppendOpts struct { Entrypoint []string } +type MutateOpts struct { + RemoteOptions []remote.Option + ImageMutators []func(base v1.Image) (v1.Image, error) +} + +func Mutate(ctx context.Context, base name.Reference, target name.Repository, opts MutateOpts) (name.Reference, error) { + desc, err := remote.Get(base, opts.RemoteOptions...) + if err != nil { + return nil, fmt.Errorf("failed to get image: %w", err) + } + + if desc.MediaType.IsIndex() { + baseidx, err := desc.ImageIndex() + if err != nil { + return nil, fmt.Errorf("failed to get image index: %w", err) + } + + var midx v1.ImageIndex = empty.Index + + mfst, err := baseidx.IndexManifest() + if err != nil { + return nil, fmt.Errorf("failed to get index manifest: %w", err) + } + + for _, m := range mfst.Manifests { + img, err := baseidx.Image(m.Digest) + if err != nil { + return nil, fmt.Errorf("failed to load image: %w", err) + } + + for _, mutator := range opts.ImageMutators { + img, err = mutator(img) + if err != nil { + return nil, fmt.Errorf("failed to mutate image: %w", err) + } + } + + dig, err := img.Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest: %w", err) + } + + if err := remote.Write(target.Digest(dig.String()), img, opts.RemoteOptions...); err != nil { + return nil, fmt.Errorf("failed to push image: %w", err) + } + + midx = mutate.AppendManifests(midx, mutate.IndexAddendum{ + Add: img, + Descriptor: v1.Descriptor{ + MediaType: m.MediaType, + URLs: m.URLs, + Annotations: m.Annotations, + Platform: m.Platform, + ArtifactType: m.ArtifactType, + }, + }) + } + + dig, err := midx.Digest() + if err != nil { + return nil, fmt.Errorf("failed to get index digest: %w", err) + } + + ref := target.Digest(dig.String()) + if err := remote.WriteIndex(ref, midx, opts.RemoteOptions...); err != nil { + return nil, fmt.Errorf("failed to push index: %w", err) + } + + return ref, nil + + } else if desc.MediaType.IsImage() { + img, err := remote.Image(base, opts.RemoteOptions...) + if err != nil { + return nil, fmt.Errorf("failed to get image: %w", err) + } + + for _, mutator := range opts.ImageMutators { + img, err = mutator(img) + if err != nil { + return nil, fmt.Errorf("failed to mutate image: %w", err) + } + } + + mdig, err := img.Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest: %w", err) + } + + ref := target.Digest(mdig.String()) + if err := remote.Write(ref, img, opts.RemoteOptions...); err != nil { + return nil, fmt.Errorf("failed to push image: %w", err) + } + + return ref, nil + } + + return nil, fmt.Errorf("reference [%s] uses an unsupported media type: [%s]", base.String(), desc.MediaType) +} + // Append mutates the source Image or ImageIndex with the provided append // options, and pushes it to the target repository via its digest. func Append(ctx context.Context, base name.Reference, target name.Repository, opts AppendOpts) (name.Reference, error) { diff --git a/internal/docker/docker.go b/internal/docker/docker.go index 2e26cd26..7051c64d 100644 --- a/internal/docker/docker.go +++ b/internal/docker/docker.go @@ -96,7 +96,6 @@ func New(opts ...Option) (*Client, error) { } func (d *Client) Run(ctx context.Context, req *Request) (string, error) { - req.AutoRemove = true cid, err := d.start(ctx, req) if err != nil { return "", fmt.Errorf("starting container: %w", err) @@ -109,18 +108,18 @@ func (d *Client) Run(ctx context.Context, req *Request) (string, error) { // adding this to Start(), but its unclear how useful those logs would be, // and how to even surface them without being overly verbose. if req.Logger != nil { - defer func() { - logs, err := d.cli.ContainerLogs(ctx, cid, container.LogsOptions{ - ShowStdout: true, - ShowStderr: true, - Follow: true, - }) - if err != nil { - fmt.Fprintf(req.Logger, "failed to get logs: %v\n", err) - return - } - defer logs.Close() + logs, err := d.cli.ContainerLogs(ctx, cid, container.LogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + }) + if err != nil { + return "", fmt.Errorf("failed to get logs: %w", err) + } + defer logs.Close() + go func() { + defer logs.Close() _, err = stdcopy.StdCopy(req.Logger, req.Logger, logs) if err != nil { fmt.Fprintf(req.Logger, "error copying logs: %v", err) @@ -128,6 +127,34 @@ func (d *Client) Run(ctx context.Context, req *Request) (string, error) { }() } + // If a health check is present, set up a poller to poll on health status + unhealthyCh := make(chan error) + if req.HealthCheck != nil { + go func() { + for { + select { + case <-ctx.Done(): + return + default: + inspect, err := d.cli.ContainerInspect(ctx, cid) + if err != nil { + unhealthyCh <- fmt.Errorf("inspecting container: %w", err) + return + } + + if inspect.State != nil && inspect.State.Health != nil { + if inspect.State.Health.Status == "unhealthy" { + status := inspect.State.Health.Log[len(inspect.State.Health.Log)-1].Output + unhealthyCh <- fmt.Errorf("container became unhealthy, last status: %s", status) + return + } + } + time.Sleep(time.Second) + } + } + }() + } + select { case <-ctx.Done(): return "", fmt.Errorf("context cancelled while waiting for container to exit: %w", ctx.Err()) @@ -143,6 +170,9 @@ func (d *Client) Run(ctx context.Context, req *Request) (string, error) { if status.StatusCode != 0 { return "", fmt.Errorf("container exited with non-zero status code: %d", status.StatusCode) } + + case err := <-unhealthyCh: + return "", err } return cid, nil diff --git a/internal/drivers/docker_in_docker/doc.go b/internal/drivers/docker_in_docker/doc.go new file mode 100644 index 00000000..ad94f70b --- /dev/null +++ b/internal/drivers/docker_in_docker/doc.go @@ -0,0 +1,12 @@ +// dockerindocker is a driver that runs each test container in its _own_ dind +// sandbox. Each test container is created as a new image, with the base layer +// containing the dind image, and subsequent layers containing the test +// container. Mapped out, the layers look like: +// +// 0: cgr.dev/chainguard-private/docker-dind:latest +// 1: imagetest created layer, with the appropriate test content and apk dependencies +// +// Things are done this way to ensure the tests that run _feel_ like they are +// simply in an environment with docker installed, while also ensuring they are +// portable to other drivers, such as docker-in-a-vm. +package dockerindocker diff --git a/internal/drivers/docker_in_docker/driver.go b/internal/drivers/docker_in_docker/driver.go new file mode 100644 index 00000000..93741d26 --- /dev/null +++ b/internal/drivers/docker_in_docker/driver.go @@ -0,0 +1,205 @@ +package dockerindocker + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "runtime" + "time" + + "github.com/chainguard-dev/clog" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/bundler" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/docker" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/entrypoint" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/harness" + "github.com/google/go-containerregistry/pkg/name" + ggcrv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/remote" + v1 "github.com/moby/docker-image-spec/specs-go/v1" +) + +type driver struct { + ImageRef name.Reference // The image to use for docker-in-docker + + name string + stack *harness.Stack + cli *docker.Client + config *dockerConfig + ropts []remote.Option +} + +func NewDriver(n string, opts ...DriverOpts) (drivers.Tester, error) { + d := &driver{ + ImageRef: name.MustParseReference("docker:dind"), + name: n, + stack: harness.NewStack(), + ropts: []remote.Option{ + remote.WithPlatform(ggcrv1.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + }), + }, + config: &dockerConfig{}, + } + + for _, opt := range opts { + if err := opt(d); err != nil { + return nil, err + } + } + + return d, nil +} + +// Setup implements drivers.TestDriver. +func (d *driver) Setup(ctx context.Context) error { + cli, err := docker.New() + if err != nil { + return err + } + d.cli = cli + + return nil +} + +// Teardown implements drivers.TestDriver. +func (d *driver) Teardown(ctx context.Context) error { + return d.stack.Teardown(ctx) +} + +// Run implements drivers.TestDriver. +func (d *driver) Run(ctx context.Context, ref name.Reference) error { + // Build the driver image, uses the provided dind image appended with the ref + tref, err := bundler.Mutate(ctx, d.ImageRef, ref.Context(), bundler.MutateOpts{ + RemoteOptions: d.ropts, + ImageMutators: []func(ggcrv1.Image) (ggcrv1.Image, error){ + func(base ggcrv1.Image) (ggcrv1.Image, error) { + timg, err := remote.Image(ref, d.ropts...) + if err != nil { + return nil, fmt.Errorf("failed to load test image: %w", err) + } + + layers, err := timg.Layers() + if err != nil { + return nil, fmt.Errorf("failed to get layers: %w", err) + } + + mutated, err := mutate.AppendLayers(base, layers...) + if err != nil { + return nil, fmt.Errorf("failed to append layers: %w", err) + } + + mcfgf, err := mutated.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config file: %w", err) + } + + tcfgf, err := timg.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config file: %w", err) + } + + // Ensure we preserve things we want from the original image + mcfgf.Config.Entrypoint = tcfgf.Config.Entrypoint + mcfgf.Config.Cmd = tcfgf.Config.Cmd + mcfgf.Config.WorkingDir = tcfgf.Config.WorkingDir + + // Append any environment vars + mcfgf.Config.Env = append(mcfgf.Config.Env, tcfgf.Config.Env...) + + return mutate.ConfigFile(mutated, mcfgf) + }, + }, + }) + if err != nil { + return fmt.Errorf("failed to build driver image: %w", err) + } + + nw, err := d.cli.CreateNetwork(ctx, &docker.NetworkRequest{}) + if err != nil { + return err + } + + if err := d.stack.Add(func(ctx context.Context) error { + return d.cli.RemoveNetwork(ctx, nw) + }); err != nil { + return err + } + + content := []*docker.Content{} + cfg, err := d.config.Content() + if err != nil { + return err + } + content = append(content, cfg) + + r, w := io.Pipe() + defer w.Close() + + go func() { + defer r.Close() + scanner := bufio.NewScanner(r) + for scanner.Scan() { + clog.InfoContext(ctx, scanner.Text()) + } + }() + + clog.InfoContext(ctx, "running docker-in-docker test", "image_ref", tref.String()) + cid, err := d.cli.Run(ctx, &docker.Request{ + Name: d.name, + Ref: tref, + Privileged: true, // Required for dind + User: "0:0", + Networks: []docker.NetworkAttachment{{ + Name: nw.Name, + ID: nw.ID, + }}, + AutoRemove: false, + HealthCheck: &v1.HealthcheckConfig{ + Test: append([]string{"CMD"}, entrypoint.DefaultHealthCheckCommand...), + Interval: 1 * time.Second, + Timeout: 5 * time.Second, + Retries: 1, + StartPeriod: 1 * time.Second, + }, + ExtraHosts: []string{"host.docker.internal:host-gateway"}, + Contents: content, + Logger: w, + }) + if err != nil { + return err + } + + if err := d.stack.Add(func(ctx context.Context) error { + return d.cli.Remove(ctx, &docker.Response{ + ID: cid, + }) + }); err != nil { + return err + } + + return nil +} + +type dockerConfig struct { + Auths map[string]dockerAuthEntry `json:"auths,omitempty"` +} + +type dockerAuthEntry struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` +} + +func (c dockerConfig) Content() (*docker.Content, error) { + data, err := json.Marshal(c) + if err != nil { + return nil, err + } + + return docker.NewContentFromString(string(data), "/root/.docker/config.json"), nil +} diff --git a/internal/drivers/docker_in_docker/opts.go b/internal/drivers/docker_in_docker/opts.go new file mode 100644 index 00000000..6200bcac --- /dev/null +++ b/internal/drivers/docker_in_docker/opts.go @@ -0,0 +1,71 @@ +package dockerindocker + +import ( + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +type DriverOpts func(*driver) error + +func WithImageRef(rawRef string) DriverOpts { + return func(d *driver) error { + ref, err := name.ParseReference(rawRef) + if err != nil { + return err + } + d.ImageRef = ref + return nil + } +} + +func WithRemoteOptions(opts ...remote.Option) DriverOpts { + return func(d *driver) error { + if d.ropts == nil { + d.ropts = make([]remote.Option, 0) + } + d.ropts = append(d.ropts, opts...) + return nil + } +} + +// WithRegistryAuth invokes the docker-credential-helper to exchange static +// creds that are mounted in the container. This current implementation will +// fail if the tests take longer than the tokens ttl. +// TODO: Replace this with Jon's cred proxy: https://gist.github.com/jonjohnsonjr/6d20148edca0f187cfed050cee669685 +func WithRegistryAuth(registry string) DriverOpts { + return func(d *driver) error { + if d.config == nil { + d.config = &dockerConfig{ + Auths: make(map[string]dockerAuthEntry), + } + } + + if d.config.Auths == nil { + d.config.Auths = make(map[string]dockerAuthEntry) + } + + r, err := name.NewRegistry(registry) + if err != nil { + return err + } + + a, err := authn.DefaultKeychain.Resolve(r) + if err != nil { + return err + } + + acfg, err := a.Authorization() + if err != nil { + return err + } + + d.config.Auths[registry] = dockerAuthEntry{ + Username: acfg.Username, + Password: acfg.Password, + Auth: acfg.Auth, + } + + return nil + } +} diff --git a/internal/drivers/drivers.go b/internal/drivers/drivers.go new file mode 100644 index 00000000..d766788f --- /dev/null +++ b/internal/drivers/drivers.go @@ -0,0 +1,17 @@ +package drivers + +import ( + "context" + + "github.com/google/go-containerregistry/pkg/name" +) + +type Tester interface { + // Setup creates the driver's resources, it must be run before Run() is + // available + Setup(context.Context) error + // Teardown destroys the driver's resources + Teardown(context.Context) error + // Run takes a container and runs it + Run(context.Context, name.Reference) error +} diff --git a/internal/drivers/k3s_in_docker/doc.go b/internal/drivers/k3s_in_docker/doc.go new file mode 100644 index 00000000..5591a88e --- /dev/null +++ b/internal/drivers/k3s_in_docker/doc.go @@ -0,0 +1,10 @@ +// k3sindocker is a driver that runs each test in a pod within a k3s cluster +// run in docker. Each test pod is run to completion depending on the +// entrypoint/cmd combination of the test image. The test pod has essentially +// cluster-admin to the cluster it is running on. This means tests authored +// with this driver operate in an environment that is within the network +// boundary of a pod within the cluster, where the default KUBECONFIG is +// pre-wired with cluster-admin scoped. This means accessing network endpoints +// can be achieved directly by addressing the in cluster endpoints +// (*.svc.cluster.local) +package k3sindocker diff --git a/internal/drivers/k3s_in_docker/driver.go b/internal/drivers/k3s_in_docker/driver.go new file mode 100644 index 00000000..aeb5f4bd --- /dev/null +++ b/internal/drivers/k3s_in_docker/driver.go @@ -0,0 +1,604 @@ +package k3sindocker + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "text/template" + "time" + + "github.com/chainguard-dev/clog" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/docker" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/entrypoint" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/harness" + "github.com/docker/docker/api/types/mount" + "github.com/docker/go-connections/nat" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/moby/docker-image-spec/specs-go/v1" + authv1 "k8s.io/api/authorization/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +// driver is a k8s driver that spins up a k3s cluster in docker alongside a +// network attached sandbox. +type driver struct { + ImageRef name.Reference // The image reference to use for the k3s cluster + CNI bool // Toggles whether the default k3s CNI is enabled + Traefik bool // Toggles whether the default k3s traefik ingress controller is enabled + MetricsServer bool // Toggles whether the default k3s metrics server is enabled + NetworkPolicy bool // Toggles whether the default k3s network policy controller is enabled + Snapshotter string // The containerd snapshotter to use + Registries map[string]*K3sRegistryConfig + Namespace string // The namespace to use for the test pods + + kubeconfigWritePath string // When set, the generated kubeconfig will be written to this path on the host + + name string + stack *harness.Stack + kcli kubernetes.Interface +} + +type K3sRegistryConfig struct { + Auth *K3sRegistryAuthConfig + Mirrors *K3sRegistryMirrorConfig +} + +type K3sRegistryAuthConfig struct { + Username string + Password string + Auth string +} + +type K3sRegistryMirrorConfig struct { + Endpoints []string +} + +func NewDriver(n string, opts ...DriverOpts) (drivers.Tester, error) { + k := &driver{ + ImageRef: name.MustParseReference("cgr.dev/chainguard/k3s:latest-dev"), + CNI: true, + Traefik: false, + MetricsServer: false, + NetworkPolicy: false, + Namespace: "imagetest", + + name: n, + stack: harness.NewStack(), + } + + for _, opt := range opts { + if err := opt(k); err != nil { + return nil, err + } + } + + return k, nil +} + +func (k *driver) Setup(ctx context.Context) error { + cli, err := docker.New() + if err != nil { + return fmt.Errorf("creating docker client: %w", err) + } + + contents := []*docker.Content{} + + ktpl := fmt.Sprintf(` +tls-san: "%[1]s" +disable: +{{- if not .Traefik }} + - traefik +{{- end }} +{{- if not .MetricsServer }} + - metrics-server +{{- end }} +{{- if not .NetworkPolicy }} + - network-policy +{{- end }} +{{- if not .CNI }} +flannel-backend: none +{{- end }} +snapshotter: "{{ .Snapshotter }}" +`, k.name) + + var tplo bytes.Buffer + t := template.Must(template.New("k3s-config").Parse(ktpl)) + if err := t.Execute(&tplo, k); err != nil { + return fmt.Errorf("executing template: %w", err) + } + + rtpl := ` +mirrors: + {{- range $k, $v := .Registries }} + {{- if $v.Mirrors }} + "{{ $k }}": + endpoint: + {{- range $v.Mirrors.Endpoints }} + - "{{ . }}" + {{- end }} + {{- end }} + {{- end}} + +configs: + {{- range $k, $v := .Registries }} + {{- if $v.Auth }} + "{{ $k }}": + auth: + username: "{{ $v.Auth.Username }}" + password: "{{ $v.Auth.Password }}" + auth: "{{ $v.Auth.Auth }}" + {{- end }} + {{- end }} +` + + var rto bytes.Buffer + t = template.Must(template.New("k3s-registries").Parse(rtpl)) + if err := t.Execute(&rto, k); err != nil { + return fmt.Errorf("executing template: %w", err) + } + + contents = append(contents, + docker.NewContentFromString(tplo.String(), "/etc/rancher/k3s/config.yaml"), + docker.NewContentFromString(rto.String(), "/etc/rancher/k3s/registries.yaml"), + ) + + nw, err := cli.CreateNetwork(ctx, &docker.NetworkRequest{}) + if err != nil { + return fmt.Errorf("creating docker network: %w", err) + } + + if err := k.stack.Add(func(ctx context.Context) error { + return cli.RemoveNetwork(ctx, nw) + }); err != nil { + return fmt.Errorf("adding network teardown to stack: %w", err) + } + + clog.InfoContext(ctx, "starting k3s in docker", + "image_ref", k.ImageRef.String(), + "network_id", nw.ID, + ) + + resp, err := cli.Start(ctx, &docker.Request{ + Name: k.name, + Ref: k.ImageRef, + Cmd: []string{"server"}, + Privileged: true, // This doesn't work without privilege, so don't make it configurable + Networks: []docker.NetworkAttachment{{ + Name: nw.Name, + ID: nw.ID, + }}, + Labels: map[string]string{ + "dev.chainguard.imagetest/kubeconfig-path": k.kubeconfigWritePath, + }, + Mounts: []mount.Mount{{ + Type: mount.TypeTmpfs, + Target: "/run", + }, { + Type: mount.TypeTmpfs, + Target: "/tmp", + }}, + HealthCheck: &v1.HealthcheckConfig{ + Test: []string{"CMD", "/bin/sh", "-c", "kubectl get --raw='/healthz'"}, + Interval: 2 * time.Second, + Timeout: 5 * time.Second, + Retries: 10, + StartInterval: 1 * time.Second, + }, + PortBindings: nat.PortMap{ + nat.Port(strconv.Itoa(6443)): []nat.PortBinding{{ + HostIP: "127.0.0.1", + HostPort: "", // Lets the docker daemon pick a random port + }}, + }, + ExtraHosts: []string{"host.docker.internal:host-gateway"}, + Contents: contents, + }) + if err != nil { + return fmt.Errorf("starting k3s: %w", err) + } + + if err := k.stack.Add(func(ctx context.Context) error { + return cli.Remove(ctx, resp) + }); err != nil { + return err + } + + kcfgraw, err := resp.ReadFile(ctx, "/etc/rancher/k3s/k3s.yaml") + if err != nil { + return fmt.Errorf("getting kubeconfig: %w", err) + } + + config, err := clientcmd.RESTConfigFromKubeConfig(kcfgraw) + if err != nil { + return fmt.Errorf("creating kubernetes config: %w", err) + } + + config.Host = fmt.Sprintf("https://127.0.0.1:%s", resp.NetworkSettings.Ports["6443/tcp"][0].HostPort) + + kcli, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("creating kubernetes client: %w", err) + } + k.kcli = kcli + + if k.kubeconfigWritePath != "" { + kcfg, err := clientcmd.Load(kcfgraw) + if err != nil { + return fmt.Errorf("loading kubeconfig: %w", err) + } + + for _, cluster := range kcfg.Clusters { + cluster.Server = config.Host + } + + if err := os.MkdirAll(filepath.Dir(k.kubeconfigWritePath), 0755); err != nil { + return fmt.Errorf("failed to create kubeconfig directory: %w", err) + } + + clog.InfoContext(ctx, "writing kubeconfig to file", "path", k.kubeconfigWritePath) + if err := clientcmd.WriteToFile(*kcfg, k.kubeconfigWritePath); err != nil { + return fmt.Errorf("writing kubeconfig: %w", err) + } + } + + return k.preflight(ctx) +} + +func (k *driver) Teardown(ctx context.Context) error { + return k.stack.Teardown(ctx) +} + +func (k *driver) Run(ctx context.Context, ref name.Reference) error { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "imagetest-", + Namespace: k.Namespace, + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: "imagetest", + SecurityContext: &corev1.PodSecurityContext{}, + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + { + Name: "kube-api-access", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ + Path: "token", + ExpirationSeconds: &[]int64{3600}[0], + }, + }, + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "kube-root-ca.crt", + }, + Items: []corev1.KeyToPath{ + { + Key: "ca.crt", + Path: "ca.crt", + }, + }, + }, + }, + { + DownwardAPI: &corev1.DownwardAPIProjection{ + Items: []corev1.DownwardAPIVolumeFile{ + { + Path: "namespace", + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + // The primary test workspace + { + Name: "sandbox", + Image: ref.String(), + SecurityContext: &corev1.SecurityContext{ + Privileged: &[]bool{true}[0], + RunAsUser: &[]int64{0}[0], + RunAsGroup: &[]int64{0}[0], + }, + Env: []corev1.EnvVar{ + { + Name: "IMAGETEST", + Value: "true", + }, + { + Name: "IMAGETEST_DRIVER", + Value: "k3s_in_docker", + }, + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + }, + WorkingDir: "/imagetest", + TerminationMessagePath: entrypoint.DefaultStderrLogPath, + StartupProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: entrypoint.DefaultHealthCheckCommand, + }, + }, + InitialDelaySeconds: 0, + PeriodSeconds: 1, + FailureThreshold: 60, // Allow the pod ample time to start + TimeoutSeconds: 1, + SuccessThreshold: 1, + }, + // Once running, any failure should be captured by probe and considered a stop + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: entrypoint.DefaultHealthCheckCommand, + }, + }, + PeriodSeconds: 1, + FailureThreshold: 1, + TimeoutSeconds: 1, + SuccessThreshold: 1, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "kube-api-access", + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + ReadOnly: true, + }, + }, + }, + }, + }, + } + + clog.InfoContext(ctx, "creating k3s_in_docker test sandbox pod", "pod_name", pod.Name, "pod_namespace", pod.Namespace) + pobj, err := k.kcli.CoreV1().Pods(k.Namespace).Create(ctx, pod, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create pod: %w", err) + } + + // watch the pod status + pw, err := k.kcli.CoreV1().Pods(pobj.Namespace).Watch(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", pobj.Name), + }) + if err != nil { + return fmt.Errorf("failed to watch pod: %w", err) + } + defer pw.Stop() + + running := false + for !running { + select { + case <-ctx.Done(): + return ctx.Err() + case event, ok := <-pw.ResultChan(): + if !ok { + return fmt.Errorf("channel closed") + } + + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return fmt.Errorf("unexpected watch event type: %T", event.Object) + } + + if event.Type == watch.Deleted { + return fmt.Errorf("pod was deleted before becoming ready") + } + + if pod.Status.Phase == corev1.PodFailed || pod.Status.Phase == corev1.PodUnknown { + return fmt.Errorf("pod failed to start") + } + + for _, status := range pod.Status.ContainerStatuses { + if status.Name == "sandbox" { + if status.State.Waiting == nil { + running = true + clog.InfoContext(ctx, "test sandbox pod scheduled", "pod_name", pobj.Name, "pod_namespace", pobj.Namespace, "status", pod.Status.Phase) + break + } + } + } + + clog.InfoContext(ctx, "waiting for test sandbox pod to schedule", "pod_name", pobj.Name, "pod_namespace", pobj.Namespace, "status", pod.Status.Phase) + } + } + + lreq := k.kcli.CoreV1().Pods(k.Namespace).GetLogs(pobj.Name, &corev1.PodLogOptions{Follow: true, Container: "sandbox"}) + logs, err := lreq.Stream(ctx) + if err != nil { + return fmt.Errorf("failed to stream logs: %w", err) + } + defer logs.Close() + + logsDoneCh := make(chan error) + + go func() { + defer close(logsDoneCh) + r := bufio.NewReader(logs) + for { + line, err := r.ReadBytes('\n') + if err != nil { + if err == io.EOF { + return + } + logsDoneCh <- fmt.Errorf("streaming logs: %w", err) + } + clog.InfoContext(ctx, string(line), "pod", pobj.Name) + } + }() + + started := false + for { + select { + case <-ctx.Done(): + return fmt.Errorf("context cancelled while waiting for pod completion: %w", ctx.Err()) + case event, ok := <-pw.ResultChan(): + if !ok { + return fmt.Errorf("pod watch channel closed unexpectedly") + } + pod, ok := event.Object.(*corev1.Pod) + if !ok { + continue + } + + // We can return from here (where we're already scheduled) one of 4 ways: + // 1. The pod starts, and finishes successfully (0/1 Completed) + // 2. The pod starts, and fails without debug mode (0/1 Failed) + // 3. The pod starts, and fails with debug mode (0/1 Running) + + // Case 1 + if pod.Status.Phase == corev1.PodSucceeded { + clog.InfoContext(ctx, "pod successfully completed", "pod", pobj.Name) + return nil + } + + // Case 2 + if pod.Status.Phase == corev1.PodFailed { + for _, cs := range pod.Status.ContainerStatuses { + if cs.Name == "sandbox" { + if cs.State.Terminated != nil { + termMsg := fmt.Sprintf("exit code: %d, reason: %s, message: %s", + cs.State.Terminated.ExitCode, + cs.State.Terminated.Reason, + cs.State.Terminated.Message, + ) + return fmt.Errorf("pod %s/%s exited with failure\n\n%s", pobj.Name, pobj.Namespace, termMsg) + } + } + } + return fmt.Errorf("pod %s/%s exited with failure", pobj.Name, pobj.Namespace) + } + + // Case 3 + if pod.Status.Phase == corev1.PodRunning { + for _, cs := range pod.Status.ContainerStatuses { + if cs.Name == "sandbox" { + // We always see this event once on startup, only error out if we go into an unready state _after_ the first startup + if !cs.Ready && *cs.Started && started { + events, err := k.kcli.CoreV1().Events(pobj.Namespace).List(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("involvedObject.name=%s", pobj.Name), + }) + var readinessMsg string + if err == nil && len(events.Items) > 0 { + lastEvent := events.Items[len(events.Items)-1] + readinessMsg = lastEvent.Message + } + return fmt.Errorf("pod %s/%s failed and is paused\n\n%s", pobj.Name, pobj.Namespace, readinessMsg) + } + + started = true + } + } + } + + case err := <-logsDoneCh: + if err != nil { + return fmt.Errorf("failed to stream logs: %w", err) + } + } + } +} + +// preflight creates the necessary k8s resources to run the tests in pods. +func (k *driver) preflight(ctx context.Context) error { + // Check that we can actually do things with the client + resp, err := k.kcli.AuthorizationV1().SelfSubjectAccessReviews().Create(ctx, &authv1.SelfSubjectAccessReview{ + Spec: authv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &authv1.ResourceAttributes{ + Namespace: k.Namespace, + Verb: "create", + Group: "apps", + Resource: "pods", + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create authorization review: %w", err) + } + + if !resp.Status.Allowed { + return fmt.Errorf("user does not have permission to create pods") + } + + // Create the namespace + ns, err := k.kcli.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: k.Namespace, + }, + }, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create namespace: %w", err) + } + + // Create the relevant rbac + sa, err := k.kcli.CoreV1().ServiceAccounts(ns.Name).Create(ctx, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "imagetest", + Namespace: ns.Name, + }, + }, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create service account: %w", err) + } + + // Create the role binding + _, err = k.kcli.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "imagetest", + Namespace: ns.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: sa.Name, + Namespace: sa.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "cluster-admin", + }, + }, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create role binding: %w", err) + } + + return nil +} diff --git a/internal/drivers/k3s_in_docker/opts.go b/internal/drivers/k3s_in_docker/opts.go new file mode 100644 index 00000000..abe56568 --- /dev/null +++ b/internal/drivers/k3s_in_docker/opts.go @@ -0,0 +1,114 @@ +package k3sindocker + +import ( + "fmt" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" +) + +type DriverOpts func(*driver) error + +func WithImageRef(rawRef string) DriverOpts { + return func(k *driver) error { + ref, err := name.ParseReference(rawRef) + if err != nil { + return err + } + k.ImageRef = ref + return nil + } +} + +func WithCNI(enabled bool) DriverOpts { + return func(k *driver) error { + k.CNI = enabled + return nil + } +} + +func WithTraefik(enabled bool) DriverOpts { + return func(k *driver) error { + k.Traefik = enabled + return nil + } +} + +func WithMetricsServer(enabled bool) DriverOpts { + return func(k *driver) error { + k.MetricsServer = enabled + return nil + } +} + +func WithNetworkPolicy(enabled bool) DriverOpts { + return func(k *driver) error { + k.NetworkPolicy = enabled + return nil + } +} + +func WithSnapshotter(snapshotter string) DriverOpts { + return func(k *driver) error { + k.Snapshotter = snapshotter + return nil + } +} + +func WithRegistry(registry string) DriverOpts { + return func(k *driver) error { + if k.Registries == nil { + k.Registries = make(map[string]*K3sRegistryConfig) + } + + r, err := name.NewRegistry(registry) + if err != nil { + return fmt.Errorf("invalid registry name: %w", err) + } + + a, err := authn.DefaultKeychain.Resolve(r) + if err != nil { + return fmt.Errorf("resolving keychain for registry %s: %w", r.String(), err) + } + + acfg, err := a.Authorization() + if err != nil { + return fmt.Errorf("getting authorization for registry %s: %w", r.String(), err) + } + + k.Registries[registry] = &K3sRegistryConfig{ + Auth: &K3sRegistryAuthConfig{ + Username: acfg.Username, + Password: acfg.Password, + Auth: acfg.Auth, + }, + } + + return nil + } +} + +func WithWriteKubeconfig(path string) DriverOpts { + return func(k *driver) error { + k.kubeconfigWritePath = path + return nil + } +} + +func WithRegistryMirror(registry string, endpoints ...string) DriverOpts { + return func(k *driver) error { + if k.Registries == nil { + k.Registries = make(map[string]*K3sRegistryConfig) + } + + if _, ok := k.Registries[registry]; !ok { + k.Registries[registry] = &K3sRegistryConfig{} + } + + k.Registries[registry].Mirrors = &K3sRegistryMirrorConfig{ + Endpoints: endpoints, + } + + return nil + } +} diff --git a/internal/entrypoint/entrypoint.go b/internal/entrypoint/entrypoint.go index 694b4d86..abaa696b 100644 --- a/internal/entrypoint/entrypoint.go +++ b/internal/entrypoint/entrypoint.go @@ -2,7 +2,7 @@ package entrypoint // ImageRef is replaced at provider build time (ldflag) with the :tag@digest of // the ./cmd/entrypoint binary. -var ImageRef = "gcr.io/wolf-chainguard/entrypoint@sha256:d0d087f258b646f8d52edd6aecd9c72a99f38ab75ff1994799a427a30206f89e" +var ImageRef = "ghcr.io/chainguard-dev/terraform-provider-imagetest/entrypoint:latest" const ( BinaryPath = "/ko-app/entrypoint" diff --git a/internal/provider/drivers.go b/internal/provider/drivers.go new file mode 100644 index 00000000..fa3b857f --- /dev/null +++ b/internal/provider/drivers.go @@ -0,0 +1,194 @@ +package provider + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers" + dockerindocker "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers/docker_in_docker" + k3sindocker "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers/k3s_in_docker" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DriverResourceModel string + +const ( + DriverK3sInDocker DriverResourceModel = "k3s_in_docker" + DriverDockerInDocker DriverResourceModel = "docker_in_docker" +) + +type TestsDriversResourceModel struct { + K3sInDocker *K3sInDockerDriverResourceModel `tfsdk:"k3s_in_docker"` + DockerInDocker *DockerInDockerDriverResourceModel `tfsdk:"docker_in_docker"` +} + +type K3sInDockerDriverResourceModel struct { + Image types.String `tfsdk:"image"` + Cni types.Bool `tfsdk:"cni"` + NetworkPolicy types.Bool `tfsdk:"network_policy"` + Traefik types.Bool `tfsdk:"traefik"` + MetricsServer types.Bool `tfsdk:"metrics_server"` + Registries map[string]*K3sInDockerDriverRegistriesResourceModel `tfsdk:"registries"` +} + +type K3sInDockerDriverRegistriesResourceModel struct { + Mirrors *K3sInDockerDriverRegistriesMirrorResourceModel `tfsdk:"mirrors"` +} + +type K3sInDockerDriverRegistriesMirrorResourceModel struct { + Endpoints []string `tfsdk:"endpoints"` +} + +type DockerInDockerDriverResourceModel struct { + Image types.String `tfsdk:"image"` +} + +func (t TestsResource) LoadDriver(ctx context.Context, drivers *TestsDriversResourceModel, driver DriverResourceModel, id string) (drivers.Tester, error) { + if drivers == nil { + drivers = &TestsDriversResourceModel{} + } + + switch driver { + case DriverK3sInDocker: + cfg := drivers.K3sInDocker + if cfg == nil { + cfg = &K3sInDockerDriverResourceModel{} + } + + opts := []k3sindocker.DriverOpts{ + k3sindocker.WithRegistry(t.repo.RegistryStr()), + } + + tf, err := os.CreateTemp("", "imagetest-k3s-in-docker") + if err != nil { + return nil, err + } + opts = append(opts, k3sindocker.WithWriteKubeconfig(tf.Name())) + + if cfg.Image.ValueString() != "" { + opts = append(opts, k3sindocker.WithImageRef(cfg.Image.ValueString())) + } + + if cfg.Cni.ValueBool() { + opts = append(opts, k3sindocker.WithCNI(true)) + } + + if cfg.NetworkPolicy.ValueBool() { + opts = append(opts, k3sindocker.WithNetworkPolicy(true)) + } + + if cfg.Traefik.ValueBool() { + opts = append(opts, k3sindocker.WithTraefik(true)) + } + + if cfg.MetricsServer.ValueBool() { + opts = append(opts, k3sindocker.WithMetricsServer(true)) + } + + if registries := cfg.Registries; registries != nil { + for k, v := range registries { + if v.Mirrors != nil { + for _, mirror := range v.Mirrors.Endpoints { + opts = append(opts, k3sindocker.WithRegistryMirror(k, mirror)) + } + } + } + } + + // If the user specified registry is "localhost:#", set a mirror to "host.docker.internal:#" + if strings.HasPrefix(t.repo.RegistryStr(), "localhost") { + parts := strings.Split(t.repo.RegistryStr(), ":") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid registry: %s", t.repo.RegistryStr()) + } + opts = append(opts, k3sindocker.WithRegistryMirror(t.repo.RegistryStr(), fmt.Sprintf("http://host.docker.internal:%s", parts[1]))) + } + + return k3sindocker.NewDriver(id, opts...) + + case DriverDockerInDocker: + cfg := drivers.DockerInDocker + if cfg == nil { + cfg = &DockerInDockerDriverResourceModel{} + } + + opts := []dockerindocker.DriverOpts{ + dockerindocker.WithRemoteOptions(t.ropts...), + dockerindocker.WithRegistryAuth(t.repo.RegistryStr()), + } + + if cfg.Image.ValueString() != "" { + opts = append(opts, dockerindocker.WithImageRef(cfg.Image.ValueString())) + } + + return dockerindocker.NewDriver(id, opts...) + default: + return nil, fmt.Errorf("no matching driver: %s", driver) + } +} + +func DriverResourceSchema(ctx context.Context) schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ + Description: "The resource specific driver configuration. This is merged with the provider scoped drivers configuration.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "k3s_in_docker": schema.SingleNestedAttribute{ + Description: "The k3s_in_docker driver", + Optional: true, + Attributes: map[string]schema.Attribute{ + "image": schema.StringAttribute{ + Description: "The image reference to use for the k3s_in_docker driver", + Optional: true, + }, + "cni": schema.BoolAttribute{ + Description: "Enable the CNI plugin", + Optional: true, + }, + "network_policy": schema.BoolAttribute{ + Description: "Enable the network policy", + Optional: true, + }, + "traefik": schema.BoolAttribute{ + Description: "Enable the traefik ingress controller", + Optional: true, + }, + "metrics_server": schema.BoolAttribute{ + Description: "Enable the metrics server", + Optional: true, + }, + "registries": schema.MapNestedAttribute{ + Description: "A map of registries containing configuration for optional auth, tls, and mirror configuration.", + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "mirrors": schema.SingleNestedAttribute{ + Description: "A map of registries containing configuration for optional auth, tls, and mirror configuration.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "endpoints": schema.ListAttribute{ + ElementType: types.StringType, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + "docker_in_docker": schema.SingleNestedAttribute{ + Description: "The docker_in_docker driver", + Optional: true, + Attributes: map[string]schema.Attribute{ + "image": schema.StringAttribute{ + Description: "The image reference to use for the docker-in-docker driver", + Optional: true, + }, + }, + }, + }, + } +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 96f07837..00a0550e 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -302,6 +302,7 @@ func (p *ImageTestProvider) Schema(ctx context.Context, req provider.SchemaReque }, }, }, + // "drivers": DriverProviderSchema(ctx), }, } } @@ -390,6 +391,9 @@ func (p *ImageTestProvider) Resources(_ context.Context) []func() resource.Resou NewHarnessPterraformResource, // Tests NewTestDockerRunResource, + + // Tests Resources + NewTestsResource, } } diff --git a/internal/provider/store.go b/internal/provider/store.go index a48acc9a..a497e4e2 100644 --- a/internal/provider/store.go +++ b/internal/provider/store.go @@ -11,11 +11,13 @@ import ( "sync" "github.com/chainguard-dev/clog" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/entrypoint" "github.com/chainguard-dev/terraform-provider-imagetest/internal/harness" "github.com/chainguard-dev/terraform-provider-imagetest/internal/inventory" ilog "github.com/chainguard-dev/terraform-provider-imagetest/internal/log" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/google" "github.com/google/go-containerregistry/pkg/v1/remote" slogmulti "github.com/samber/slog-multi" @@ -41,6 +43,7 @@ type ProviderStore struct { providerResourceData ImageTestProviderModel repo name.Repository ropts []remote.Option + entrypointLayers map[string][]v1.Layer } func NewProviderStore(repo name.Repository) (*ProviderStore, error) { @@ -56,6 +59,11 @@ func NewProviderStore(repo name.Repository) (*ProviderStore, error) { } ropts = append(ropts, remote.Reuse(pusher)) + el, err := getEntrypointLayers(ropts...) + if err != nil { + return nil, fmt.Errorf("failed to get entrypoint layers: %w", err) + } + return &ProviderStore{ inv: &mmap[string, *inventory.Inventory]{ store: make(map[string]*inventory.Inventory), @@ -67,8 +75,9 @@ func NewProviderStore(repo name.Repository) (*ProviderStore, error) { store: make(map[string]harness.Harness), mu: sync.Mutex{}, }, - repo: repo, - ropts: ropts, + repo: repo, + ropts: ropts, + entrypointLayers: el, }, nil } @@ -138,6 +147,44 @@ func (s *ProviderStore) SkipTeardown() bool { return s.skipTeardown } +func getEntrypointLayers(opts ...remote.Option) (map[string][]v1.Layer, error) { + eref, err := name.ParseReference(entrypoint.ImageRef) + if err != nil { + return nil, fmt.Errorf("failed to parse entrypoint reference: %w", err) + } + + eidx, err := remote.Index(eref, opts...) + if err != nil { + return nil, fmt.Errorf("failed to get entrypoint index: %w", err) + } + + emfst, err := eidx.IndexManifest() + if err != nil { + return nil, fmt.Errorf("failed to get entrypoint index manifest: %w", err) + } + + players := make(map[string][]v1.Layer) + for _, m := range emfst.Manifests { + img, err := eidx.Image(m.Digest) + if err != nil { + return nil, fmt.Errorf("failed to load entrypoint image: %w", err) + } + + l, err := img.Layers() + if err != nil { + return nil, fmt.Errorf("failed to get entrypoint layers: %w", err) + } + + players[m.Platform.Architecture] = l + } + + if len(players) == 0 { + return nil, fmt.Errorf("no entrypoint layers found") + } + + return players, nil +} + // mmap is a generic thread-safe map implementation. type mmap[K comparable, V any] struct { mu sync.Mutex diff --git a/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-basic.sh b/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-basic.sh new file mode 100755 index 00000000..2f220cb3 --- /dev/null +++ b/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-basic.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +echo "Hello from foo" + +kubectl get po -A diff --git a/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-fails-with-bad-command.sh b/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-fails-with-bad-command.sh new file mode 100755 index 00000000..1537c5d7 --- /dev/null +++ b/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-fails-with-bad-command.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +cat LOOKMANOHANDS diff --git a/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-fails-with-proper-exit-code.sh b/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-fails-with-proper-exit-code.sh new file mode 100755 index 00000000..d2f63122 --- /dev/null +++ b/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-fails-with-proper-exit-code.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +exit 213 diff --git a/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-non-executable.sh b/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-non-executable.sh new file mode 100644 index 00000000..02bc9c76 --- /dev/null +++ b/internal/provider/testdata/TestAccTestsResource/k3s-in-docker-non-executable.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "still run!" diff --git a/internal/provider/tests_resource.go b/internal/provider/tests_resource.go new file mode 100644 index 00000000..ec2546ac --- /dev/null +++ b/internal/provider/tests_resource.go @@ -0,0 +1,443 @@ +package provider + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "os" + "strings" + "time" + + "github.com/chainguard-dev/clog" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/bundler" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/entrypoint" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/provider/framework" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +const ( + TestsResourceDefaultTimeout = "30m" + TestResourceDefaultTimeout = "15m" +) + +var _ resource.ResourceWithConfigure = &TestsResource{} + +func NewTestsResource() resource.Resource { + return &TestsResource{WithTypeName: "tests"} +} + +type TestsResource struct { + framework.WithTypeName + framework.WithNoOpDelete + framework.WithNoOpRead + + repo name.Repository + ropts []remote.Option + entrypointLayers map[string][]v1.Layer +} + +type TestsResourceModel struct { + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Driver DriverResourceModel `tfsdk:"driver"` + Drivers *TestsDriversResourceModel `tfsdk:"drivers"` + Images TestsImageResource `tfsdk:"images"` + Tests []TestResourceModel `tfsdk:"tests"` + Timeout types.String `tfsdk:"timeout"` +} + +type TestsImageResource map[string]string + +func (t TestsImageResource) Resolve() (map[string]TestsImagesParsed, error) { + pimgs := make(map[string]TestsImagesParsed) + for k, v := range t { + ref, err := name.ParseReference(v) + if err != nil { + return nil, fmt.Errorf("failed to parse reference: %w", err) + } + + if _, ok := ref.(name.Tag); ok { + return nil, fmt.Errorf("tag references are not supported") + } + + pimgs[k] = TestsImagesParsed{ + Registry: ref.Context().RegistryStr(), + Repo: ref.Context().RepositoryStr(), + RegistryRepo: ref.Context().RegistryStr() + "/" + ref.Context().RepositoryStr(), + Digest: ref.Identifier(), + PseudoTag: fmt.Sprintf("unused@%s", ref.Identifier()), + Ref: ref.String(), + } + } + return pimgs, nil +} + +type TestResourceModel struct { + Name types.String `tfsdk:"name"` + Image types.String `tfsdk:"image"` + Content []TestContentResourceModel `tfsdk:"content"` + Envs map[string]string `tfsdk:"envs"` + Cmd types.String `tfsdk:"cmd"` + Timeout types.String `tfsdk:"timeout"` +} + +type TestContentResourceModel struct { + Source types.String `tfsdk:"source"` + Target types.String `tfsdk:"target"` +} + +func (t *TestsResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + MarkdownDescription: ``, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "The unique identifier for the test. If a name is provided, this will be the name appended with a random suffix.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "The name of the test. If one is not provided, a random name will be generated.", + Optional: true, + Computed: true, + Default: stringdefault.StaticString("test"), + }, + "driver": schema.StringAttribute{ + Description: "The driver to use for the test suite. Only one driver can be used at a time.", + Required: true, + }, + "drivers": DriverResourceSchema(ctx), + "images": schema.MapAttribute{ + ElementType: types.StringType, + Required: true, + Description: "Images to use for the test suite.", + }, + "tests": schema.ListNestedAttribute{ + Description: "An ordered list of test suites to run", + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Description: "The name of the test", + Required: true, + }, + "image": schema.StringAttribute{ + Description: "The image reference to use as the base image for the test.", + Required: true, + }, + "content": schema.ListNestedAttribute{ + Description: "The content to use for the test", + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "source": schema.StringAttribute{ + Description: "The source path to use for the test", + Required: true, + }, + "target": schema.StringAttribute{ + Description: "The target path to use for the test", + Optional: true, + }, + }, + }, + }, + "cmd": schema.StringAttribute{ + Description: "When specified, will override the sandbox image's CMD (oci config).", + Optional: true, + }, + "envs": schema.MapAttribute{ + Description: "Environment variables to set on the test container. These will overwrite the environment variables set in the image's config on conflicts.", + Optional: true, + ElementType: types.StringType, + }, + "timeout": schema.StringAttribute{ + Description: "The maximum amount of time to wait for the individual test to complete. This is encompassed by the overall timeout of the parent tests resource.", + Optional: true, + }, + }, + }, + }, + "timeout": schema.StringAttribute{ + Description: "The maximum amount of time to wait for all tests to complete. This includes the time it takes to start and destroy the driver.", + Optional: true, + Computed: true, + Default: stringdefault.StaticString(TestsResourceDefaultTimeout), + }, + }, + } +} + +func (t *TestsResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + store, ok := req.ProviderData.(*ProviderStore) + if !ok { + resp.Diagnostics.AddError("invalid provider data", "...") + return + } + + t.repo = store.repo + t.ropts = store.ropts + t.entrypointLayers = store.entrypointLayers +} + +func (t *TestsResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data TestsResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(t.do(ctx, &data)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (t *TestsResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var data TestsResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(t.do(ctx, &data)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (t *TestsResource) do(ctx context.Context, data *TestsResourceModel) (ds diag.Diagnostics) { + ctx = clog.WithLogger(ctx, clog.New(slog.Default().Handler())) + + timeout, err := time.ParseDuration(data.Timeout.ValueString()) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to parse timeout", err.Error())} + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + t.ropts = append(t.ropts, remote.WithContext(ctx)) + + // lightly sanitize the name, this likely needs some revision + id := strings.ReplaceAll(fmt.Sprintf("%s-%s-%s", data.Name.ValueString(), data.Driver, uuid.New().String()[:4]), " ", "_") + data.Id = types.StringValue(id) + + l := clog.FromContext(ctx).With( + "test_id", id, + "driver_name", data.Driver, + ) + + imgsResolved, err := data.Images.Resolve() + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to resolve images", err.Error())} + } + + imgsResolvedData, err := json.Marshal(imgsResolved) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to resolve images", err.Error())} + } + l.InfoContext(ctx, "resolved images", "images", string(imgsResolvedData)) + + // we should never get here, but just in case + if t.entrypointLayers == nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("invalid entrypoint image provided", "")} + } + + trepo, err := name.NewRepository(fmt.Sprintf("%s/%s", t.repo.String(), "imagetest")) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to create target repository", err.Error())} + } + + trefs := make([]name.Reference, 0, len(data.Tests)) + for _, test := range data.Tests { + l := l.With("test_name", test.Name.ValueString(), "test_id", id) + l.InfoContext(ctx, "starting test", "driver", data.Driver) + + // for each test, we build the test image. The test image is assembled + // using a combination of the user provided "base" image, the entrypoint + // image, and the user provided test contents. Fully assembled, the layers + // looks something like: + // + // 0: The test image + // 1: The entrypoint image + // 2: The test content + // + // The entrypoint image supports linux/arm64 and linux/amd64 architectures. + // This accommodates for either single or multiarch test images, + // but there must be at _least_ a linux/arm64 or linux/amd64 variant. The + // test content is assumed to be architecture independent (source files), + // but we do not check. This may lead to runtime errors if a user is + // attempting to assemble runtime tools, but for now we'll combat that with + // documentation. + // + // The resulting name.Reference will depend on whether the base image is an + // index or an image. + + baseref, err := name.ParseReference(test.Image.ValueString()) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to parse base image reference", err.Error())} + } + + // We assume, but do not check, that the test contents are architecture independent + sls := make([]v1.Layer, 0, len(test.Content)) + for _, c := range test.Content { + target := c.Target.ValueString() + if target == "" { + target = "/imagetest" + } + + layer, err := bundler.NewLayerFromPath(c.Source.ValueString(), target) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to create layer", err.Error())} + } + sls = append(sls, layer) + } + + tref, err := bundler.Mutate(ctx, baseref, trepo, bundler.MutateOpts{ + RemoteOptions: t.ropts, + ImageMutators: []func(v1.Image) (v1.Image, error){ + // Mutator to append the arch specific entrypoint layers + func(base v1.Image) (v1.Image, error) { + cfg, err := base.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config file: %w", err) + } + + el, ok := t.entrypointLayers[cfg.Platform().Architecture] + if !ok { + return base, nil + } + + return mutate.AppendLayers(base, el...) + }, + // Mutator to append the test source layers + func(base v1.Image) (v1.Image, error) { + return mutate.AppendLayers(base, sls...) + }, + // Mutator to rejigger the final image config + func(img v1.Image) (v1.Image, error) { + cfgf, err := img.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config file: %w", err) + } + + envs := make(map[string]string) + for k, v := range test.Envs { + envs[k] = v + } + envs["IMAGES"] = string(imgsResolvedData) + envs["IMAGETEST_DRIVER"] = string(data.Driver) + + if os.Getenv("IMAGETEST_SKIP_TEARDOWN_ON_FAILURE") != "" || os.Getenv("IMAGETEST_SKIP_TEARDOWN") != "" { + envs["IMAGETEST_PAUSE_ON_ERROR"] = "true" + } + + if cfgf.Config.Env == nil { + cfgf.Config.Env = make([]string, 0) + } + + for k, v := range envs { + cfgf.Config.Env = append(cfgf.Config.Env, fmt.Sprintf("%s=%s", k, v)) + } + + // Use a standard entrypoint + cfgf.Config.Entrypoint = entrypoint.DefaultEntrypoint + + cfgf.Config.Cmd = []string{test.Cmd.ValueString()} + + if cfgf.Config.WorkingDir == "" { + cfgf.Config.WorkingDir = "/imagetest" + } + + cfgf.Config.User = "0:0" + + return mutate.ConfigFile(img, cfgf) + }, + }, + }) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to mutate test image", err.Error())} + } + + clog.InfoContext(ctx, fmt.Sprintf("build test image [%s]", tref.String()), "test_name", test.Name.ValueString(), "test_id", id) + trefs = append(trefs, tref) + } + + dr, err := t.LoadDriver(ctx, data.Drivers, data.Driver, data.Id.ValueString()) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to load driver", err.Error())} + } + + defer func() { + if teardownErr := t.maybeTeardown(ctx, dr, ds.HasError()); teardownErr != nil { + ds = append(ds, teardownErr) + } + }() + + l.InfoContext(ctx, "setting up driver") + if err := dr.Setup(ctx); err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to setup driver", err.Error())} + } + + for i, tref := range trefs { + l.InfoContext(ctx, "running test image", "test_ref", tref.String()) + + t := data.Tests[i].Timeout.ValueString() + if t == "" { + t = TestResourceDefaultTimeout + } + + ttimeout, err := time.ParseDuration(t) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to parse timeout", err.Error())} + } + + tctx, tcancel := context.WithTimeout(ctx, ttimeout) + defer tcancel() + + if err := dr.Run(tctx, tref); err != nil { + return []diag.Diagnostic{ + diag.NewErrorDiagnostic( + fmt.Sprintf("test [%s/%s] (%s) failed", data.Id.ValueString(), data.Tests[i].Name.ValueString(), tref.String()), + err.Error(), + ), + } + } + } + + return +} + +func (t *TestsResource) maybeTeardown(ctx context.Context, d drivers.Tester, failed bool) diag.Diagnostic { + if v := os.Getenv("IMAGETEST_SKIP_TEARDOWN"); v != "" { + return diag.NewWarningDiagnostic("skipping teardown", "IMAGETEST_SKIP_TEARDOWN is set, skipping teardown") + } + + if v := os.Getenv("IMAGETEST_SKIP_TEARDOWN_ON_FAILURE"); v != "" && failed { + return diag.NewWarningDiagnostic("skipping teardown", "IMAGETEST_SKIP_TEARDOWN_ON_FAILURE is set and test failed, skipping teardown") + } + + if err := d.Teardown(ctx); err != nil { + return diag.NewErrorDiagnostic("failed to teardown test driver", err.Error()) + } + + return nil +} + +type TestsImagesParsed struct { + Registry string `json:"registry"` + Repo string `json:"repo"` + RegistryRepo string `json:"registry_repo"` + Digest string `json:"digest"` + PseudoTag string `json:"pseudo_tag"` + Ref string `json:"ref"` +} diff --git a/internal/provider/tests_resource_test.go b/internal/provider/tests_resource_test.go new file mode 100644 index 00000000..8eb27802 --- /dev/null +++ b/internal/provider/tests_resource_test.go @@ -0,0 +1,71 @@ +package provider + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +func TestAccTestsResource(t *testing.T) { + repo := testRegistry(t, context.Background()) + + tpl := ` +resource "imagetest_tests" "foo" { + name = "foo" + driver = "k3s_in_docker" + + images = { + foo = "cgr.dev/chainguard/busybox:latest@sha256:b7fc3eef4303188eb295aaf8e02d888ced307d2a45090d6f673b95a41bfc033d" + } + + tests = [ + { + name = "sample" + image = "cgr.dev/chainguard/kubectl:latest-dev@sha256:5751a1672a7debcc5e847bc1cc6ebfc8899aad188ff90f0445bfef194a9fa512" + content = [{ source = "${path.module}/testdata/TestAccTestsResource" }] + cmd = "/imagetest/%s" + } + ] + + // Something before GHA timeouts + timeout = "5m" +} + ` + + testCases := map[string][]resource.TestStep{ + "basic": {{Config: fmt.Sprintf(tpl, "k3s-in-docker-basic.sh")}}, + // "non-executable": {{Config: fmt.Sprintf(tpl, "k3s-in-docker-non-executable.sh")}}, + // "fails-with-proper-exit-code": { + // { + // Config: fmt.Sprintf(tpl, "k3s-in-docker-fails-with-proper-exit-code.sh"), + // ExpectError: regexp.MustCompile(`.*213.*`), + // }, + // }, + // // ensures set -eux is always plumbed through + // "fails-with-bad-command": { + // { + // Config: fmt.Sprintf(tpl, "k3s-in-docker-fails-with-bad-command.sh"), + // ExpectError: regexp.MustCompile(`.*No such file or directory.*`), + // }, + // }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: map[string]func() (tfprotov6.ProviderServer, error){ + "imagetest": providerserver.NewProtocol6WithError(&ImageTestProvider{ + repo: repo, + }), + }, + Steps: tc, + }) + }) + } +}