From e9c2d693a11b1cb326e5357bd5a78120f43d1183 Mon Sep 17 00:00:00 2001 From: Josh Wolf Date: Fri, 17 Jan 2025 15:56:45 -0500 Subject: [PATCH] add tests_resource --- docs/resources/tests.md | 83 ++++ examples/tests_resource/helm/main.tf | 33 ++ examples/tests_resource/main.tf | 52 +++ examples/tests_resource/tests/foo.sh | 7 + internal/bundler/layer.go | 36 +- internal/drivers/docker_in_docker/doc.go | 12 + internal/drivers/docker_in_docker/driver.go | 198 ++++++++ internal/drivers/docker_in_docker/opts.go | 71 +++ internal/drivers/drivers.go | 17 + internal/drivers/k3s_in_docker/doc.go | 10 + internal/drivers/k3s_in_docker/driver.go | 493 ++++++++++++++++++++ internal/drivers/k3s_in_docker/opts.go | 96 ++++ internal/provider/drivers.go | 96 ++++ internal/provider/provider.go | 3 + internal/provider/tests_resource.go | 338 ++++++++++++++ main.go | 24 +- 16 files changed, 1554 insertions(+), 15 deletions(-) create mode 100644 docs/resources/tests.md create mode 100644 examples/tests_resource/helm/main.tf create mode 100644 examples/tests_resource/main.tf create mode 100755 examples/tests_resource/tests/foo.sh create mode 100644 internal/drivers/docker_in_docker/doc.go create mode 100644 internal/drivers/docker_in_docker/driver.go create mode 100644 internal/drivers/docker_in_docker/opts.go create mode 100644 internal/drivers/drivers.go create mode 100644 internal/drivers/k3s_in_docker/doc.go create mode 100644 internal/drivers/k3s_in_docker/driver.go create mode 100644 internal/drivers/k3s_in_docker/opts.go create mode 100644 internal/provider/drivers.go create mode 100644 internal/provider/tests_resource.go diff --git a/docs/resources/tests.md b/docs/resources/tests.md new file mode 100644 index 0000000..9138a01 --- /dev/null +++ b/docs/resources/tests.md @@ -0,0 +1,83 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "imagetest_tests Resource - terraform-provider-imagetest" +subcategory: "" +description: |- + +--- + +# imagetest_tests (Resource) + + + + + + +## Schema + +### Required + +- `driver` (String) The driver to use for the test suite. Only one driver can be used at a time. +- `images` (Map of String) Images to use for the test suite. + +### Optional + +- `drivers` (Attributes) The resource specific driver configuration. This is merged with the provider scoped drivers configuration. (see [below for nested schema](#nestedatt--drivers)) +- `name` (String) The name of the test. If one is not provided, a random name will be generated. +- `tests` (Attributes List) An ordered list of test suites to run (see [below for nested schema](#nestedatt--tests)) + +### Read-Only + +- `id` (String) The unique identifier for the test. If a name is provided, this will be the name appended with a random suffix. + + +### Nested Schema for `drivers` + +Optional: + +- `docker_in_docker` (Attributes) The docker_in_docker driver (see [below for nested schema](#nestedatt--drivers--docker_in_docker)) +- `k3s_in_docker` (Attributes) The k3s_in_docker driver (see [below for nested schema](#nestedatt--drivers--k3s_in_docker)) + + +### Nested Schema for `drivers.docker_in_docker` + +Optional: + +- `image_ref` (String) The image reference to use for the docker-in-docker driver + + + +### Nested Schema for `drivers.k3s_in_docker` + +Optional: + +- `cni` (Boolean) Enable the CNI plugin +- `metrics_server` (Boolean) Enable the metrics server +- `network_policy` (Boolean) Enable the network policy +- `traefik` (Boolean) Enable the traefik ingress controller + + + + +### Nested Schema for `tests` + +Required: + +- `image` (String) The image reference to use as the base image for the test. +- `name` (String) The name of the test + +Optional: + +- `content` (Attributes List) The content to use for the test (see [below for nested schema](#nestedatt--tests--content)) +- `envs` (Map of String) Environment variables to set on the test container. These will overwrite the environment variables set in the image's config on conflicts. + + +### Nested Schema for `tests.content` + +Required: + +- `source` (String) The source path to use for the test + +Optional: + +- `target` (String) The target path to use for the test diff --git a/examples/tests_resource/helm/main.tf b/examples/tests_resource/helm/main.tf new file mode 100644 index 0000000..94952d4 --- /dev/null +++ b/examples/tests_resource/helm/main.tf @@ -0,0 +1,33 @@ +terraform { + required_providers { + apko = { source = "chainguard-dev/apko" } + } +} + +variable "target_repository" {} + +variable "content" {} + +data "apko_config" "sandbox" { + config_contents = jsonencode({ + contents = { + packages = ["busybox", "bash", "helm", "kubectl", "jq"] + } + cmd = "bash -eux -o pipefail -c 'source /imagetest/foo.sh'" + }) +} + +resource "apko_build" "sandbox" { + repo = var.target_repository + config = data.apko_config.sandbox.config +} + +output "test" { + value = [{ + name = "helm test" + image = apko_build.sandbox.image_ref + content = [{ + source = var.content + }] + }] +} diff --git a/examples/tests_resource/main.tf b/examples/tests_resource/main.tf new file mode 100644 index 0000000..4c1466d --- /dev/null +++ b/examples/tests_resource/main.tf @@ -0,0 +1,52 @@ +terraform { + required_providers { + imagetest = { + source = "registry.terraform.io/chainguard-dev/imagetest" + } + apko = { source = "chainguard-dev/apko" } + } + backend "inmem" {} +} + +locals { repo = "gcr.io/wolf-chainguard/images" } + +provider "imagetest" { + repo = local.repo +} + +provider "apko" { + extra_repositories = concat([ + "https://packages.wolfi.dev/os", + "https://packages.cgr.dev/extras", + ]) + build_repositories = ["https://apk.cgr.dev/chainguard-private"] + extra_keyring = concat([ + "https://packages.wolfi.dev/os/wolfi-signing.rsa.pub", + "https://packages.cgr.dev/extras/chainguard-extras.rsa.pub", + ]) + + extra_packages = ["chainguard-baselayout"] + + default_archs = ["aarch64"] +} + +module "helm_test" { + source = "./helm" + target_repository = local.repo + content = "${path.module}/tests" +} + +resource "imagetest_tests" "foo" { + name = "foo" + driver = "k3s_in_docker" + + images = { + foo = "cgr.dev/chainguard/busybox:latest@sha256:b7fc3eef4303188eb295aaf8e02d888ced307d2a45090d6f673b95a41bfc033d" + } + + tests = concat([], module.helm_test.test) +} + +output "tests" { + value = imagetest_tests.foo +} diff --git a/examples/tests_resource/tests/foo.sh b/examples/tests_resource/tests/foo.sh new file mode 100755 index 0000000..47cf94c --- /dev/null +++ b/examples/tests_resource/tests/foo.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +echo "Hello World" + +kubectl get po -A + +echo "$IMAGES" | jq '.' diff --git a/internal/bundler/layer.go b/internal/bundler/layer.go index d57b880..c757c5b 100644 --- a/internal/bundler/layer.go +++ b/internal/bundler/layer.go @@ -4,8 +4,10 @@ import ( "archive/tar" "io" "io/fs" - "path/filepath" + "os" + "path" + "chainguard.dev/apko/pkg/tarfs" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/mutate" "github.com/google/go-containerregistry/pkg/v1/tarball" @@ -29,6 +31,32 @@ func NewFSLayer(source fs.FS, target string) Layerer { } } +// NewFSLayerFromPath is a helper function that creates a FS from a local path. +func NewFSLayerFromPath(source string, target string) Layerer { + pi, err := os.Stat(source) + if err != nil { + return nil + } + + if pi.IsDir() { + return NewFSLayer(os.DirFS(source), target) + } + + // There are better ways to make an FS from a isngle layer, but we already + // import tfs through apko, so just be a little lazy here + data, err := os.ReadFile(source) + if err != nil { + return nil + } + + tfs := tarfs.New() + if err := tfs.WriteFile(pi.Name(), data, pi.Mode()); err != nil { + return nil + } + + return NewFSLayer(tfs, target) +} + func (l *fsl) Layer() (v1.Layer, error) { return tarball.LayerFromOpener(func() (io.ReadCloser, error) { pr, pw := io.Pipe() @@ -38,7 +66,7 @@ func (l *fsl) Layer() (v1.Layer, error) { defer tw.Close() defer pw.Close() - if err := fs.WalkDir(l.source, ".", func(path string, d fs.DirEntry, err error) error { + if err := fs.WalkDir(l.source, ".", func(p string, d fs.DirEntry, err error) error { if err != nil { return err } @@ -53,14 +81,14 @@ func (l *fsl) Layer() (v1.Layer, error) { return err } - hdr.Name = filepath.Join(l.target, path) + hdr.Name = path.Join(l.target, p) if err := tw.WriteHeader(hdr); err != nil { return err } if !d.IsDir() { - f, err := l.source.Open(path) + f, err := l.source.Open(p) if err != nil { return err } diff --git a/internal/drivers/docker_in_docker/doc.go b/internal/drivers/docker_in_docker/doc.go new file mode 100644 index 0000000..ad94f70 --- /dev/null +++ b/internal/drivers/docker_in_docker/doc.go @@ -0,0 +1,12 @@ +// dockerindocker is a driver that runs each test container in its _own_ dind +// sandbox. Each test container is created as a new image, with the base layer +// containing the dind image, and subsequent layers containing the test +// container. Mapped out, the layers look like: +// +// 0: cgr.dev/chainguard-private/docker-dind:latest +// 1: imagetest created layer, with the appropriate test content and apk dependencies +// +// Things are done this way to ensure the tests that run _feel_ like they are +// simply in an environment with docker installed, while also ensuring they are +// portable to other drivers, such as docker-in-a-vm. +package dockerindocker diff --git a/internal/drivers/docker_in_docker/driver.go b/internal/drivers/docker_in_docker/driver.go new file mode 100644 index 0000000..cfa79fd --- /dev/null +++ b/internal/drivers/docker_in_docker/driver.go @@ -0,0 +1,198 @@ +package dockerindocker + +import ( + "context" + "encoding/json" + "fmt" + "os" + "runtime" + "time" + + "github.com/chainguard-dev/terraform-provider-imagetest/internal/docker" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/harness" + "github.com/google/go-containerregistry/pkg/name" + ggcrv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/remote" + v1 "github.com/moby/docker-image-spec/specs-go/v1" +) + +type driver struct { + ImageRef name.Reference // The image to use for docker-in-docker + + name string + stack *harness.Stack + cli *docker.Client + resp *docker.Response + config *dockerConfig + ropts []remote.Option +} + +func NewDriver(n string, opts ...DriverOpts) (drivers.Tester, error) { + d := &driver{ + ImageRef: name.MustParseReference("cgr.dev/chainguard-private/docker-dind:latest"), + name: n, + stack: harness.NewStack(), + ropts: []remote.Option{ + remote.WithPlatform(ggcrv1.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + }), + }, + config: &dockerConfig{}, + } + + for _, opt := range opts { + if err := opt(d); err != nil { + return nil, err + } + } + + return d, nil +} + +// Setup implements drivers.TestDriver. +func (d *driver) Setup(ctx context.Context) error { + cli, err := docker.New() + if err != nil { + return err + } + d.cli = cli + + return nil +} + +// Teardown implements drivers.TestDriver. +func (d *driver) Teardown(ctx context.Context) error { + return d.stack.Teardown(ctx) +} + +// Run implements drivers.TestDriver. +func (d *driver) Run(ctx context.Context, ref name.Reference) error { + fmt.Println("building dind image") + dind, err := remote.Image(d.ImageRef, d.ropts...) + if err != nil { + return err + } + + timg, err := remote.Image(ref, d.ropts...) + if err != nil { + return err + } + + layers, err := timg.Layers() + if err != nil { + return fmt.Errorf("failed to get layers: %w", err) + } + + for _, l := range layers { + dind, err = mutate.AppendLayers(dind, l) + if err != nil { + return err + } + } + + dindcf, err := dind.ConfigFile() + if err != nil { + return err + } + + timgcf, err := timg.ConfigFile() + if err != nil { + return err + } + + // Merge the two environment vars, dind takes precedence + dindcf.Config.Env = append(dindcf.Config.Env, timgcf.Config.Env...) + + dind, err = mutate.ConfigFile(dind, dindcf) + if err != nil { + return err + } + + ddig, err := dind.Digest() + if err != nil { + return err + } + + r := ref.Context().Digest(ddig.String()) + + if err := remote.Write(r, dind, d.ropts...); err != nil { + return err + } + + nw, err := d.cli.CreateNetwork(ctx, &docker.NetworkRequest{}) + if err != nil { + return err + } + + if err := d.stack.Add(func(ctx context.Context) error { + return d.cli.RemoveNetwork(ctx, nw) + }); err != nil { + return err + } + + content := []*docker.Content{} + cfg, err := d.config.Content() + if err != nil { + return err + } + content = append(content, cfg) + + resp, err := d.cli.Start(ctx, &docker.Request{ + Name: d.name, + Ref: r, + Privileged: true, + User: "0:0", + Networks: []docker.NetworkAttachment{{ + Name: nw.Name, + ID: nw.ID, + }}, + HealthCheck: &v1.HealthcheckConfig{ + Test: []string{"CMD", "/bin/sh", "-c", "docker info"}, + Interval: 2 * time.Second, + Timeout: 5 * time.Second, + Retries: 10, + StartPeriod: 1 * time.Second, + }, + ExtraHosts: []string{"host.docker.internal:host-gateway"}, + Contents: content, + }) + if err != nil { + return err + } + d.resp = resp + + if err := d.stack.Add(func(ctx context.Context) error { + return d.cli.Remove(ctx, resp) + }); err != nil { + return err + } + + return d.resp.Run(ctx, harness.Command{ + // TODO: This is dumb, replace this when we have our own entrypoint runner + Args: "bash -eux -o pipefail -c 'for script in $(find /imagetest -maxdepth 1 -name \"*.sh\" | sort); do echo \"Executing $script\"; chmod +x \"$script\"; source \"$script\"; done'", + Stdout: os.Stdout, + Stderr: os.Stderr, + }) +} + +type dockerConfig struct { + Auths map[string]dockerAuthEntry `json:"auths,omitempty"` +} + +type dockerAuthEntry struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` +} + +func (c dockerConfig) Content() (*docker.Content, error) { + data, err := json.Marshal(c) + if err != nil { + return nil, err + } + + return docker.NewContentFromString(string(data), "/root/.docker/config.json"), nil +} diff --git a/internal/drivers/docker_in_docker/opts.go b/internal/drivers/docker_in_docker/opts.go new file mode 100644 index 0000000..6200bca --- /dev/null +++ b/internal/drivers/docker_in_docker/opts.go @@ -0,0 +1,71 @@ +package dockerindocker + +import ( + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +type DriverOpts func(*driver) error + +func WithImageRef(rawRef string) DriverOpts { + return func(d *driver) error { + ref, err := name.ParseReference(rawRef) + if err != nil { + return err + } + d.ImageRef = ref + return nil + } +} + +func WithRemoteOptions(opts ...remote.Option) DriverOpts { + return func(d *driver) error { + if d.ropts == nil { + d.ropts = make([]remote.Option, 0) + } + d.ropts = append(d.ropts, opts...) + return nil + } +} + +// WithRegistryAuth invokes the docker-credential-helper to exchange static +// creds that are mounted in the container. This current implementation will +// fail if the tests take longer than the tokens ttl. +// TODO: Replace this with Jon's cred proxy: https://gist.github.com/jonjohnsonjr/6d20148edca0f187cfed050cee669685 +func WithRegistryAuth(registry string) DriverOpts { + return func(d *driver) error { + if d.config == nil { + d.config = &dockerConfig{ + Auths: make(map[string]dockerAuthEntry), + } + } + + if d.config.Auths == nil { + d.config.Auths = make(map[string]dockerAuthEntry) + } + + r, err := name.NewRegistry(registry) + if err != nil { + return err + } + + a, err := authn.DefaultKeychain.Resolve(r) + if err != nil { + return err + } + + acfg, err := a.Authorization() + if err != nil { + return err + } + + d.config.Auths[registry] = dockerAuthEntry{ + Username: acfg.Username, + Password: acfg.Password, + Auth: acfg.Auth, + } + + return nil + } +} diff --git a/internal/drivers/drivers.go b/internal/drivers/drivers.go new file mode 100644 index 0000000..d766788 --- /dev/null +++ b/internal/drivers/drivers.go @@ -0,0 +1,17 @@ +package drivers + +import ( + "context" + + "github.com/google/go-containerregistry/pkg/name" +) + +type Tester interface { + // Setup creates the driver's resources, it must be run before Run() is + // available + Setup(context.Context) error + // Teardown destroys the driver's resources + Teardown(context.Context) error + // Run takes a container and runs it + Run(context.Context, name.Reference) error +} diff --git a/internal/drivers/k3s_in_docker/doc.go b/internal/drivers/k3s_in_docker/doc.go new file mode 100644 index 0000000..5591a88 --- /dev/null +++ b/internal/drivers/k3s_in_docker/doc.go @@ -0,0 +1,10 @@ +// k3sindocker is a driver that runs each test in a pod within a k3s cluster +// run in docker. Each test pod is run to completion depending on the +// entrypoint/cmd combination of the test image. The test pod has essentially +// cluster-admin to the cluster it is running on. This means tests authored +// with this driver operate in an environment that is within the network +// boundary of a pod within the cluster, where the default KUBECONFIG is +// pre-wired with cluster-admin scoped. This means accessing network endpoints +// can be achieved directly by addressing the in cluster endpoints +// (*.svc.cluster.local) +package k3sindocker diff --git a/internal/drivers/k3s_in_docker/driver.go b/internal/drivers/k3s_in_docker/driver.go new file mode 100644 index 0000000..13324af --- /dev/null +++ b/internal/drivers/k3s_in_docker/driver.go @@ -0,0 +1,493 @@ +package k3sindocker + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "text/template" + "time" + + "github.com/chainguard-dev/clog" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/docker" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/harness" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/log" + "github.com/docker/docker/api/types/mount" + "github.com/docker/go-connections/nat" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/moby/docker-image-spec/specs-go/v1" + authv1 "k8s.io/api/authorization/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +// driver is a k8s driver that spins up a k3s cluster in docker alongside a +// network attached sandbox. +type driver struct { + ImageRef name.Reference // The image reference to use for the k3s cluster + CNI bool // Toggles whether the default k3s CNI is enabled + Traefik bool // Toggles whether the default k3s traefik ingress controller is enabled + MetricsServer bool // Toggles whether the default k3s metrics server is enabled + NetworkPolicy bool // Toggles whether the default k3s network policy controller is enabled + Snapshotter string // The containerd snapshotter to use + Registries map[string]K3sRegistryConfig + Namespace string // The namespace to use for the test pods + + kubeconfigWritePath string // When set, the generated kubeconfig will be written to this path on the host + + name string + stack *harness.Stack + kcli kubernetes.Interface +} + +type K3sRegistryConfig struct { + Auth *K3sRegistryAuthConfig +} + +type K3sRegistryAuthConfig struct { + Username string + Password string + Auth string +} + +func NewDriver(n string, opts ...DriverOpts) (drivers.Tester, error) { + k := &driver{ + ImageRef: name.MustParseReference("cgr.dev/chainguard/k3s:latest"), + CNI: true, + Traefik: false, + MetricsServer: false, + NetworkPolicy: false, + Namespace: "imagetest", + + name: n, + stack: harness.NewStack(), + } + + for _, opt := range opts { + if err := opt(k); err != nil { + return nil, err + } + } + + return k, nil +} + +func (k *driver) Setup(ctx context.Context) error { + cli, err := docker.New() + if err != nil { + return fmt.Errorf("creating docker client: %w", err) + } + + contents := []*docker.Content{} + + ktpl := fmt.Sprintf(` +tls-san: "%[1]s" +disable: +{{- if not .Traefik }} + - traefik +{{- end }} +{{- if not .MetricsServer }} + - metrics-server +{{- end }} +{{- if not .NetworkPolicy }} + - network-policy +{{- end }} +{{- if not .CNI }} +flannel-backend: none +{{- end }} +snapshotter: "{{ .Snapshotter }}" +`, k.name) + + var tplo bytes.Buffer + t := template.Must(template.New("k3s-config").Parse(ktpl)) + if err := t.Execute(&tplo, k); err != nil { + return fmt.Errorf("executing template: %w", err) + } + + rtpl := ` +configs: + {{- range $k, $v := .Registries }} + "{{ $k }}": + auth: + username: "{{ $v.Auth.Username }}" + password: "{{ $v.Auth.Password }}" + auth: "{{ $v.Auth.Auth }}" + {{- end }} +` + + var rto bytes.Buffer + t = template.Must(template.New("k3s-registries").Parse(rtpl)) + if err := t.Execute(&rto, k); err != nil { + return fmt.Errorf("executing template: %w", err) + } + + contents = append(contents, + docker.NewContentFromString(tplo.String(), "/etc/rancher/k3s/config.yaml"), + docker.NewContentFromString(rto.String(), "/etc/rancher/k3s/registries.yaml"), + ) + + nw, err := cli.CreateNetwork(ctx, &docker.NetworkRequest{}) + if err != nil { + return fmt.Errorf("creating docker network: %w", err) + } + + if err := k.stack.Add(func(ctx context.Context) error { + return cli.RemoveNetwork(ctx, nw) + }); err != nil { + return fmt.Errorf("adding network teardown to stack: %w", err) + } + + clog.InfoContext(ctx, "starting k3s container in docker", + "image_ref", k.ImageRef.String(), + "network_id", nw.ID, + ) + + resp, err := cli.Start(ctx, &docker.Request{ + Name: k.name, + Ref: k.ImageRef, + Cmd: []string{"server"}, + Privileged: true, // This doesn't work without privilege, so don't make it configurable + Networks: []docker.NetworkAttachment{{ + Name: nw.Name, + ID: nw.ID, + }}, + Labels: map[string]string{ + "dev.chainguard.imagetest/kubeconfig-path": k.kubeconfigWritePath, + }, + Mounts: []mount.Mount{{ + Type: mount.TypeTmpfs, + Target: "/run", + }, { + Type: mount.TypeTmpfs, + Target: "/tmp", + }}, + HealthCheck: &v1.HealthcheckConfig{ + Test: []string{"CMD", "/bin/sh", "-c", "kubectl get --raw='/healthz'"}, + Interval: 2 * time.Second, + Timeout: 5 * time.Second, + Retries: 10, + StartInterval: 1 * time.Second, + }, + PortBindings: nat.PortMap{ + nat.Port(strconv.Itoa(6443)): []nat.PortBinding{{ + HostIP: "127.0.0.1", + HostPort: "", // Lets the docker daemon pick a random port + }}, + }, + ExtraHosts: []string{"host.docker.internal:host-gateway"}, + Contents: contents, + }) + if err != nil { + return fmt.Errorf("starting k3s container: %w", err) + } + + if err := k.stack.Add(func(ctx context.Context) error { + return cli.Remove(ctx, resp) + }); err != nil { + return err + } + + kcfgraw, err := resp.ReadFile(ctx, "/etc/rancher/k3s/k3s.yaml") + if err != nil { + return fmt.Errorf("getting kubeconfig: %w", err) + } + + config, err := clientcmd.RESTConfigFromKubeConfig(kcfgraw) + if err != nil { + return fmt.Errorf("creating kubernetes config: %w", err) + } + + config.Host = fmt.Sprintf("https://127.0.0.1:%s", resp.NetworkSettings.Ports["6443/tcp"][0].HostPort) + + kcli, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("creating kubernetes client: %w", err) + } + k.kcli = kcli + + if k.kubeconfigWritePath != "" { + kcfg, err := clientcmd.Load(kcfgraw) + if err != nil { + return fmt.Errorf("loading kubeconfig: %w", err) + } + + for _, cluster := range kcfg.Clusters { + cluster.Server = config.Host + } + + if err := os.MkdirAll(filepath.Dir(k.kubeconfigWritePath), 0755); err != nil { + return fmt.Errorf("failed to create kubeconfig directory: %w", err) + } + + clog.InfoContext(ctx, "writing kubeconfig to file", "path", k.kubeconfigWritePath) + if err := clientcmd.WriteToFile(*kcfg, k.kubeconfigWritePath); err != nil { + return fmt.Errorf("writing kubeconfig: %w", err) + } + } + + return k.preflight(ctx) +} + +func (k *driver) Teardown(ctx context.Context) error { + return k.stack.Teardown(ctx) +} + +func (k *driver) Run(ctx context.Context, ref name.Reference) error { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "imagetest-", + Namespace: k.Namespace, + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: "imagetest", + SecurityContext: &corev1.PodSecurityContext{}, + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + { + Name: "kube-api-access", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ + Path: "token", + ExpirationSeconds: &[]int64{3600}[0], + }, + }, + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "kube-root-ca.crt", + }, + Items: []corev1.KeyToPath{ + { + Key: "ca.crt", + Path: "ca.crt", + }, + }, + }, + }, + { + DownwardAPI: &corev1.DownwardAPIProjection{ + Items: []corev1.DownwardAPIVolumeFile{ + { + Path: "namespace", + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + // The primary test workspace + { + Name: "sandbox", + Image: ref.String(), + Env: []corev1.EnvVar{ + { + Name: "IMAGETEST", + Value: "true", + }, + }, + WorkingDir: "/imagetest", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "kube-api-access", + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + ReadOnly: true, + }, + }, + }, + // TODO: Helper sidecar for logging + // TODO: Helper sidecar for uploading test artifacts + }, + }, + } + + clog.InfoContext(ctx, "creating k3s_in_docker test sandbox pod", "pod_name", pod.Name, "pod_namespace", pod.Namespace) + pobj, err := k.kcli.CoreV1().Pods(k.Namespace).Create(ctx, pod, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create pod: %w", err) + } + + // watch the pod status + pw, err := k.kcli.CoreV1().Pods(pobj.Namespace).Watch(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", pobj.Name), + }) + if err != nil { + return fmt.Errorf("failed to watch pod: %w", err) + } + defer pw.Stop() + + running := false + for !running { + select { + case <-ctx.Done(): + return ctx.Err() + case event, ok := <-pw.ResultChan(): + if !ok { + return fmt.Errorf("channel closed") + } + + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return fmt.Errorf("unexpected watch event type: %T", event.Object) + } + + if event.Type == watch.Deleted { + return fmt.Errorf("pod was deleted before becoming ready") + } + + if pod.Status.Phase == corev1.PodFailed || pod.Status.Phase == corev1.PodUnknown { + return fmt.Errorf("pod failed to start") + } + + for _, status := range pod.Status.ContainerStatuses { + if status.Name == "sandbox" { + if status.State.Waiting == nil { + running = true + clog.InfoContext(ctx, "test sandbox pod scheduled", "pod_name", pobj.Name, "pod_namespace", pobj.Namespace, "status", pod.Status.Phase) + break + } + } + } + + clog.InfoContext(ctx, "waiting for test sandbox pod to schedule", "pod_name", pobj.Name, "pod_namespace", pobj.Namespace, "status", pod.Status.Phase) + } + } + + lreq := k.kcli.CoreV1().Pods(k.Namespace).GetLogs(pobj.Name, &corev1.PodLogOptions{Follow: true, Container: "sandbox"}) + logs, err := lreq.Stream(ctx) + if err != nil { + return fmt.Errorf("failed to stream logs: %w", err) + } + defer logs.Close() + + logsDoneCh := make(chan error) + + go func() { + defer close(logsDoneCh) + r := bufio.NewReader(logs) + for { + line, err := r.ReadBytes('\n') + if err != nil { + if err == io.EOF { + return + } + logsDoneCh <- fmt.Errorf("streaming logs: %w", err) + } + log.Info(ctx, string(line), "pod", pobj.Name) + } + }() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("context cancelled while waiting for pod completion: %w", ctx.Err()) + case event, ok := <-pw.ResultChan(): + if !ok { + return fmt.Errorf("pod watch channel closed unexpectedly") + } + pod, ok := event.Object.(*corev1.Pod) + if !ok { + continue + } + + if pod.Status.Phase == corev1.PodSucceeded { + clog.InfoContext(ctx, "pod successfully completed", "pod", pobj.Name) + return nil + } + + if pod.Status.Phase == corev1.PodFailed { + return fmt.Errorf("pod %s/%s exited with failure", pobj.Name, pobj.Namespace) + } + + clog.InfoContext(ctx, "waiting for pod to complete", "pod", pobj.Name, "status", pod.Status.Phase) + } + } +} + +// preflight creates the necessary k8s resources to run the tests in pods. +func (k *driver) preflight(ctx context.Context) error { + // Check that we can actually do things with the client + resp, err := k.kcli.AuthorizationV1().SelfSubjectAccessReviews().Create(ctx, &authv1.SelfSubjectAccessReview{ + Spec: authv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &authv1.ResourceAttributes{ + Namespace: k.Namespace, + Verb: "create", + Group: "apps", + Resource: "pods", + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create authorization review: %w", err) + } + + if !resp.Status.Allowed { + return fmt.Errorf("user does not have permission to create pods") + } + + // Create the namespace + ns, err := k.kcli.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: k.Namespace, + }, + }, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create namespace: %w", err) + } + + // Create the relevant rbac + sa, err := k.kcli.CoreV1().ServiceAccounts(ns.Name).Create(ctx, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "imagetest", + Namespace: ns.Name, + }, + }, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create service account: %w", err) + } + + // Create the role binding + _, err = k.kcli.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "imagetest", + Namespace: ns.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: sa.Name, + Namespace: sa.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "cluster-admin", + }, + }, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create role binding: %w", err) + } + + return nil +} diff --git a/internal/drivers/k3s_in_docker/opts.go b/internal/drivers/k3s_in_docker/opts.go new file mode 100644 index 0000000..b870b36 --- /dev/null +++ b/internal/drivers/k3s_in_docker/opts.go @@ -0,0 +1,96 @@ +package k3sindocker + +import ( + "fmt" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" +) + +type DriverOpts func(*driver) error + +func WithImageRef(rawRef string) DriverOpts { + return func(k *driver) error { + ref, err := name.ParseReference(rawRef) + if err != nil { + return err + } + k.ImageRef = ref + return nil + } +} + +func WithCNI(enabled bool) DriverOpts { + return func(k *driver) error { + k.CNI = enabled + return nil + } +} + +func WithTraefik(enabled bool) DriverOpts { + return func(k *driver) error { + k.Traefik = enabled + return nil + } +} + +func WithMetricsServer(enabled bool) DriverOpts { + return func(k *driver) error { + k.MetricsServer = enabled + return nil + } +} + +func WithNetworkPolicy(enabled bool) DriverOpts { + return func(k *driver) error { + k.NetworkPolicy = enabled + return nil + } +} + +func WithSnapshotter(snapshotter string) DriverOpts { + return func(k *driver) error { + k.Snapshotter = snapshotter + return nil + } +} + +func WithRegistry(registry string) DriverOpts { + return func(k *driver) error { + if k.Registries == nil { + k.Registries = make(map[string]K3sRegistryConfig) + } + + r, err := name.NewRegistry(registry) + if err != nil { + return fmt.Errorf("invalid registry name: %w", err) + } + + a, err := authn.DefaultKeychain.Resolve(r) + if err != nil { + return fmt.Errorf("resolving keychain for registry %s: %w", r.String(), err) + } + + acfg, err := a.Authorization() + if err != nil { + return fmt.Errorf("getting authorization for registry %s: %w", r.String(), err) + } + + k.Registries[registry] = K3sRegistryConfig{ + Auth: &K3sRegistryAuthConfig{ + Username: acfg.Username, + Password: acfg.Password, + Auth: acfg.Auth, + }, + } + + return nil + } +} + +func WithWriteKubeconfig(path string) DriverOpts { + return func(k *driver) error { + k.kubeconfigWritePath = path + return nil + } +} diff --git a/internal/provider/drivers.go b/internal/provider/drivers.go new file mode 100644 index 0000000..cca818f --- /dev/null +++ b/internal/provider/drivers.go @@ -0,0 +1,96 @@ +package provider + +import ( + "context" + "fmt" + "os" + + "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers" + dockerindocker "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers/docker_in_docker" + k3sindocker "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers/k3s_in_docker" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DriverResourceModel string + +const ( + DriverK3sInDocker DriverResourceModel = "k3s_in_docker" + DriverDockerInDocker DriverResourceModel = "docker_in_docker" +) + +type TestsDriversResourceModel struct { + K3sInDocker *K3sInDockerDriverResourceModel `tfsdk:"k3s_in_docker"` + DockerInDocker *DockerInDockerDriverResourceModel `tfsdk:"docker_in_docker"` +} + +type K3sInDockerDriverResourceModel struct { + Cni types.Bool `tfsdk:"cni"` + NetworkPolicy types.Bool `tfsdk:"network_policy"` + Traefik types.Bool `tfsdk:"traefik"` + MetricsServer types.Bool `tfsdk:"metrics_server"` +} + +type DockerInDockerDriverResourceModel struct { + ImageRef types.String `tfsdk:"image_ref"` +} + +func (t TestsResource) LoadDriver(ctx context.Context, drivers *TestsDriversResourceModel, driver DriverResourceModel, id string) (drivers.Tester, error) { + if drivers == nil { + drivers = &TestsDriversResourceModel{} + } + + switch driver { + case DriverK3sInDocker: + cfg := drivers.K3sInDocker + if cfg == nil { + cfg = &K3sInDockerDriverResourceModel{} + } + + opts := []k3sindocker.DriverOpts{ + k3sindocker.WithRegistry(t.repo.RegistryStr()), + } + + tf, err := os.CreateTemp("", "imagetest-k3s-in-docker") + if err != nil { + return nil, err + } + opts = append(opts, k3sindocker.WithWriteKubeconfig(tf.Name())) + + if cfg.Cni.ValueBool() { + opts = append(opts, k3sindocker.WithCNI(true)) + } + + if cfg.NetworkPolicy.ValueBool() { + opts = append(opts, k3sindocker.WithNetworkPolicy(true)) + } + + if cfg.Traefik.ValueBool() { + opts = append(opts, k3sindocker.WithTraefik(true)) + } + + if cfg.MetricsServer.ValueBool() { + opts = append(opts, k3sindocker.WithMetricsServer(true)) + } + + return k3sindocker.NewDriver(id, opts...) + + case DriverDockerInDocker: + cfg := drivers.DockerInDocker + if cfg == nil { + cfg = &DockerInDockerDriverResourceModel{} + } + + opts := []dockerindocker.DriverOpts{ + dockerindocker.WithRemoteOptions(t.ropts...), + dockerindocker.WithRegistryAuth(t.repo.RegistryStr()), + } + + if cfg.ImageRef.ValueString() != "" { + opts = append(opts, dockerindocker.WithImageRef(cfg.ImageRef.ValueString())) + } + + return dockerindocker.NewDriver(id, opts...) + default: + return nil, fmt.Errorf("no matching driver: %s", driver) + } +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 96f0783..649414c 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -390,6 +390,9 @@ func (p *ImageTestProvider) Resources(_ context.Context) []func() resource.Resou NewHarnessPterraformResource, // Tests NewTestDockerRunResource, + + // Tests Resources + NewTestsResource, } } diff --git a/internal/provider/tests_resource.go b/internal/provider/tests_resource.go new file mode 100644 index 0000000..463c952 --- /dev/null +++ b/internal/provider/tests_resource.go @@ -0,0 +1,338 @@ +package provider + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "os" + + "github.com/chainguard-dev/clog" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/bundler" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/drivers" + "github.com/chainguard-dev/terraform-provider-imagetest/internal/provider/framework" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +const ( + TestsResourceDefaultTimeout = "20m" +) + +var _ resource.ResourceWithConfigure = &TestsResource{} + +func NewTestsResource() resource.Resource { + return &TestsResource{WithTypeName: "tests"} +} + +type TestsResource struct { + framework.WithTypeName + framework.WithNoOpDelete + framework.WithNoOpRead + + repo name.Repository + ropts []remote.Option +} + +type TestsResourceModel struct { + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Driver DriverResourceModel `tfsdk:"driver"` + Drivers *TestsDriversResourceModel `tfsdk:"drivers"` + Images TestsImageResource `tfsdk:"images"` + Tests []TestResourceModel `tfsdk:"tests"` +} + +type TestsImageResource map[string]string + +func (t TestsImageResource) Resolve() (map[string]TestsImagesParsed, error) { + pimgs := make(map[string]TestsImagesParsed) + for k, v := range t { + ref, err := name.ParseReference(v) + if err != nil { + return nil, fmt.Errorf("failed to parse reference: %w", err) + } + + if _, ok := ref.(name.Tag); ok { + return nil, fmt.Errorf("tag references are not supported") + } + + pimgs[k] = TestsImagesParsed{ + Registry: ref.Context().RegistryStr(), + Repo: ref.Context().RepositoryStr(), + RegistryRepo: ref.Context().RegistryStr() + "/" + ref.Context().RepositoryStr(), + Digest: ref.Identifier(), + PseudoTag: fmt.Sprintf("unused@%s", ref.Identifier()), + Ref: ref.String(), + } + } + return pimgs, nil +} + +type TestResourceModel struct { + Name types.String `tfsdk:"name"` + Image types.String `tfsdk:"image"` + Content []TestContentResourceModel `tfsdk:"content"` + Envs map[string]string `tfsdk:"envs"` +} + +type TestContentResourceModel struct { + Source types.String `tfsdk:"source"` + Target types.String `tfsdk:"target"` +} + +func (t *TestsResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + MarkdownDescription: ``, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "The unique identifier for the test. If a name is provided, this will be the name appended with a random suffix.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "The name of the test. If one is not provided, a random name will be generated.", + Optional: true, + Computed: true, + Default: stringdefault.StaticString("imagetest"), + }, + "driver": schema.StringAttribute{ + Description: "The driver to use for the test suite. Only one driver can be used at a time.", + Required: true, + }, + "drivers": schema.SingleNestedAttribute{ + Description: "The resource specific driver configuration. This is merged with the provider scoped drivers configuration.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "k3s_in_docker": schema.SingleNestedAttribute{ + Description: "The k3s_in_docker driver", + Optional: true, + Attributes: map[string]schema.Attribute{ + "cni": schema.BoolAttribute{ + Description: "Enable the CNI plugin", + Optional: true, + }, + "network_policy": schema.BoolAttribute{ + Description: "Enable the network policy", + Optional: true, + }, + "traefik": schema.BoolAttribute{ + Description: "Enable the traefik ingress controller", + Optional: true, + }, + "metrics_server": schema.BoolAttribute{ + Description: "Enable the metrics server", + Optional: true, + }, + }, + }, + "docker_in_docker": schema.SingleNestedAttribute{ + Description: "The docker_in_docker driver", + Optional: true, + Attributes: map[string]schema.Attribute{ + "image_ref": schema.StringAttribute{ + Description: "The image reference to use for the docker-in-docker driver", + Optional: true, + }, + }, + }, + }, + }, + "images": schema.MapAttribute{ + ElementType: types.StringType, + Required: true, + Description: "Images to use for the test suite.", + }, + "tests": schema.ListNestedAttribute{ + Description: "An ordered list of test suites to run", + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Description: "The name of the test", + Required: true, + }, + "image": schema.StringAttribute{ + Description: "The image reference to use as the base image for the test.", + Required: true, + }, + "content": schema.ListNestedAttribute{ + Description: "The content to use for the test", + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "source": schema.StringAttribute{ + Description: "The source path to use for the test", + Required: true, + }, + "target": schema.StringAttribute{ + Description: "The target path to use for the test", + Optional: true, + }, + }, + }, + }, + "envs": schema.MapAttribute{ + Description: "Environment variables to set on the test container. These will overwrite the environment variables set in the image's config on conflicts.", + Optional: true, + ElementType: types.StringType, + }, + }, + }, + }, + }, + } +} + +func (t *TestsResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + store, ok := req.ProviderData.(*ProviderStore) + if !ok { + resp.Diagnostics.AddError("invalid provider data", "...") + return + } + + t.repo = store.repo + t.ropts = store.ropts +} + +func (t *TestsResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data TestsResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(t.do(ctx, &data)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (t *TestsResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var data TestsResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(t.do(ctx, &data)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (t *TestsResource) do(ctx context.Context, data *TestsResourceModel) (ds diag.Diagnostics) { + ctx = clog.WithLogger(ctx, clog.New(slog.Default().Handler())) + + id := fmt.Sprintf("%s-%s", data.Name.ValueString(), uuid.New().String()[:4]) + data.Id = types.StringValue(id) + + l := clog.FromContext(ctx).With( + "test_id", id, + "driver_name", data.Driver, + ) + + imgsResolved, err := data.Images.Resolve() + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to resolve images", err.Error())} + } + + imgsResolvedData, err := json.Marshal(imgsResolved) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to resolve images", err.Error())} + } + l.InfoContext(ctx, "resolved images", "images", string(imgsResolvedData)) + + dr, err := t.LoadDriver(ctx, data.Drivers, data.Driver, data.Id.ValueString()) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to load driver", err.Error())} + } + + defer func() { + if teardownErr := t.maybeTeardown(ctx, dr, ds.HasError()); teardownErr != nil { + ds = append(ds, teardownErr) + } + }() + + l.InfoContext(ctx, "setting up driver") + if err := dr.Setup(ctx); err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to setup driver", err.Error())} + } + + for _, test := range data.Tests { + l := l.With("test_name", test.Name.ValueString()) + l.InfoContext(ctx, "starting test", "driver", data.Driver) + + // Build the test image + baseRepo, err := name.ParseReference(test.Image.ValueString()) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to parse base image reference", err.Error())} + } + + targetRepo, err := name.NewRepository(fmt.Sprintf("%s/%s", t.repo.String(), "imagetest")) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to create target repository", err.Error())} + } + + layers := make([]bundler.Layerer, 0, len(test.Content)) + for _, c := range test.Content { + target := c.Target.ValueString() + if target == "" { + target = "/imagetest" + } + + layers = append(layers, bundler.NewFSLayerFromPath(c.Source.ValueString(), target)) + } + + l.InfoContext(ctx, "creating and publishing test image", "base_ref", baseRepo.String(), "target_ref", targetRepo.String()) + + // Ensure "IMAGES" is appended last to the test env, to prevent anything else from overriding it + test.Envs["IMAGES"] = string(imgsResolvedData) + + tref, err := bundler.Append(ctx, baseRepo, targetRepo, bundler.AppendOpts{ + Layers: layers, + Envs: test.Envs, + RemoteOptions: t.ropts, + }) + if err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to build test image", err.Error())} + } + + l.InfoContext(ctx, "running test image", "test_ref", tref.String()) + if err := dr.Run(ctx, tref); err != nil { + return []diag.Diagnostic{diag.NewErrorDiagnostic("failed to run test image", err.Error())} + } + } + + return +} + +func (t *TestsResource) maybeTeardown(ctx context.Context, d drivers.Tester, failed bool) diag.Diagnostic { + if v := os.Getenv("IMAGETEST_SKIP_TEARDOWN"); v != "" { + return diag.NewWarningDiagnostic("skipping teardown", "IMAGETEST_SKIP_TEARDOWN is set, skipping teardown") + } + + if v := os.Getenv("IMAGETEST_SKIP_TEARDOWN_ON_FAILURE"); v != "" && failed { + return diag.NewWarningDiagnostic("skipping teardown", "IMAGETEST_SKIP_TEARDOWN_ON_FAILURE is set and test failed, skipping teardown") + } + + if err := d.Teardown(ctx); err != nil { + return diag.NewErrorDiagnostic("failed to teardown test driver", err.Error()) + } + + return nil +} + +type TestsImagesParsed struct { + Registry string `json:"registry"` + Repo string `json:"repo"` + RegistryRepo string `json:"registry_repo"` + Digest string `json:"digest"` + PseudoTag string `json:"pseudo_tag"` + Ref string `json:"ref"` +} diff --git a/main.go b/main.go index 755070e..25a26f5 100644 --- a/main.go +++ b/main.go @@ -8,11 +8,9 @@ import ( "os/signal" "syscall" - "github.com/chainguard-dev/clog" log2 "github.com/chainguard-dev/terraform-provider-imagetest/internal/log" "github.com/chainguard-dev/terraform-provider-imagetest/internal/provider" "github.com/hashicorp/terraform-plugin-framework/providerserver" - slogmulti "github.com/samber/slog-multi" ) // Run "go generate" to format example terraform files and generate the docs for the registry/website @@ -46,7 +44,7 @@ func main() { ctx, stop := signal.NotifyContext(ctx, syscall.SIGTERM, syscall.SIGINT) defer stop() - ctx = setupLog(ctx) + // ctx = setupLog(ctx) err := providerserver.Serve(ctx, provider.New(version), opts) if err != nil { @@ -54,12 +52,16 @@ func main() { } } -// setupLog sets up the default logging configuration. -func setupLog(ctx context.Context) context.Context { - logger := clog.New(slogmulti.Fanout( - &log2.TFHandler{}, - )) - ctx = clog.WithLogger(ctx, logger) - slog.SetDefault(&logger.Logger) - return ctx +func init() { + slog.SetDefault(slog.New(&log2.TFHandler{})) } + +// // setupLog sets up the default logging configuration. +// func setupLog(ctx context.Context) context.Context { +// logger := clog.New(slogmulti.Fanout( +// &log2.TFHandler{}, +// )) +// ctx = clog.WithLogger(ctx, logger) +// slog.SetDefault(&logger.Logger) +// return ctx +// }