diff --git a/cli/cmd/nodepool.go b/cli/cmd/nodepool.go index 08f1c975..062e7238 100644 --- a/cli/cmd/nodepool.go +++ b/cli/cmd/nodepool.go @@ -11,18 +11,38 @@ import ( "bytes" "context" "errors" + "fmt" "os" + "strings" + "time" apiv1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + cliapi "k8s.io/client-go/tools/clientcmd/api" cliutils "github.com/softwarefactory-project/sf-operator/cli/cmd/utils" "github.com/softwarefactory-project/sf-operator/controllers" "github.com/spf13/cobra" + "k8s.io/client-go/tools/clientcmd" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/yaml" +) + +var npGetAllowedArgs = []string{"providers-secrets", "builder-ssh-key"} +var npConfigureAllowedArgs = []string{"providers-secrets"} +var npCreateAllowedArgs = []string{"openshiftpods-namespace"} + +// openshiftpods namespace default values +var ( + nodepoolServiceAccount = "nodepool-sa" + nodepoolRole = "nodepool-role" + nodepoolRoleBinding = "nodepool-rb" + nodepoolToken = "nodepool-token" + nodepoolKubeContext = "openshiftpods" ) func npGet(kmd *cobra.Command, args []string) { - cliCtx := cliutils.GetCLIctxOrDie(kmd, args, []string{"providers-secrets", "builder-ssh-key"}) + cliCtx := cliutils.GetCLIctxOrDie(kmd, args, npGetAllowedArgs) target := args[0] ns := cliCtx.Namespace kubeContext := cliCtx.KubeContext @@ -44,7 +64,7 @@ func npGet(kmd *cobra.Command, args []string) { } func npConfigure(kmd *cobra.Command, args []string) { - cliCtx := cliutils.GetCLIctxOrDie(kmd, args, []string{"providers-secrets"}) + cliCtx := cliutils.GetCLIctxOrDie(kmd, args, npConfigureAllowedArgs) ns := cliCtx.Namespace kubeContext := cliCtx.KubeContext cloudsFile, _ := kmd.Flags().GetString("clouds") @@ -70,17 +90,73 @@ func npConfigure(kmd *cobra.Command, args []string) { ctrl.Log.Error(err, "Error opening %s", kubeFile) os.Exit(1) } - ensureNodepoolProvidersSecrets(ns, kubeContext, cloudsContent, kubeContent) + env := cliutils.ENV{ + Cli: cliutils.CreateKubernetesClientOrDie(kubeContext), + Ctx: context.TODO(), + Ns: ns, + } + ensureNodepoolProvidersSecrets(&env, cloudsContent, kubeContent) } -func ensureNodepoolProvidersSecrets(ns string, kubeContext string, cloudconfig []byte, kubeconfig []byte) { - env := cliutils.ENV{ +func npCreate(kmd *cobra.Command, args []string) { + cliCtx := cliutils.GetCLIctxOrDie(kmd, args, npCreateAllowedArgs) + ns := cliCtx.Namespace + kubeContext := cliCtx.KubeContext + sfEnv := cliutils.ENV{ Cli: cliutils.CreateKubernetesClientOrDie(kubeContext), Ctx: context.TODO(), Ns: ns, } + if args[0] == "openshiftpods-namespace" { + nodepoolContext, _ := kmd.Flags().GetString("nodepool-context") + nodepoolNamespace, _ := kmd.Flags().GetString("nodepool-namespace") + showConfigTemplate, _ := kmd.Flags().GetBool("show-config-template") + skipProvidersSecrets, _ := kmd.Flags().GetBool("skip-providers-secrets") + + if nodepoolContext == kubeContext { + ctrl.Log.Info("Warning: Nodepool will use the same cluster context as SF") + if nodepoolNamespace == ns { + ctrl.Log.Info("Warning: Nodepool will manage resources in the same namespace as the Software Factory deployment") + } + } + CreateNamespaceForNodepool(&sfEnv, nodepoolContext, nodepoolNamespace, skipProvidersSecrets) + if showConfigTemplate { + configTemplate := mkNodepoolOpenshiftPodsConfigTemplate(nodepoolNamespace) + fmt.Println("Nodepool configuration template:") + fmt.Println(configTemplate) + } + } +} + +func CreateNamespaceForNodepool(sfEnv *cliutils.ENV, nodepoolContext, nodepoolNamespace string, skipProvidersSecrets bool) { + nodepoolEnv := cliutils.ENV{ + Cli: cliutils.CreateKubernetesClientOrDie(nodepoolContext), + Ctx: context.TODO(), + Ns: nodepoolNamespace, + } + cliutils.EnsureNamespaceOrDie(&nodepoolEnv, nodepoolNamespace) + cliutils.EnsureServiceAccountOrDie(&nodepoolEnv, nodepoolServiceAccount) + ensureNodepoolRole(&nodepoolEnv) + token := ensureNodepoolServiceAccountSecret(&nodepoolEnv) + nodepoolKubeConfig := createNodepoolKubeConfigOrDie(nodepoolContext, nodepoolNamespace, token) + kconfig, err := clientcmd.Write(nodepoolKubeConfig) + + if err != nil { + ctrl.Log.Error(err, "Could not serialize nodepool's kubeconfig") + } + if skipProvidersSecrets { + fmt.Println("Provider kubeconfig:") + fmt.Println(string(kconfig)) + } else { + ensureNodepoolProvidersSecrets(sfEnv, []byte{}, kconfig) + } + +} + +func ensureNodepoolProvidersSecrets(env *cliutils.ENV, cloudconfig []byte, kubeconfig []byte) { + var secret apiv1.Secret - if x, _ := cliutils.GetM(&env, controllers.NodepoolProvidersSecretsName, &secret); !x { + if !cliutils.GetMOrDie(env, controllers.NodepoolProvidersSecretsName, &secret) { // Initialize the secret data secret.Name = controllers.NodepoolProvidersSecretsName secret.Data = make(map[string][]byte) @@ -90,7 +166,7 @@ func ensureNodepoolProvidersSecrets(ns string, kubeContext string, cloudconfig [ if kubeconfig != nil { secret.Data["kube.config"] = kubeconfig } - cliutils.CreateROrDie(&env, &secret) + cliutils.CreateROrDie(env, &secret) } else { // Handle secret update if secret.Data == nil { @@ -124,7 +200,7 @@ func ensureNodepoolProvidersSecrets(ns string, kubeContext string, cloudconfig [ } } if needUpdate { - cliutils.UpdateROrDie(&env, &secret) + cliutils.UpdateROrDie(env, &secret) } else { ctrl.Log.Info("Secret \"" + controllers.NodepoolProvidersSecretsName + "\" already up to date, doing nothing") } @@ -187,12 +263,159 @@ func getBuilderSSHKey(ns string, kubeContext string, pubKey string) { } } +func ensureNodepoolRole(env *cliutils.ENV) { + var role rbacv1.Role + var roleBinding rbacv1.RoleBinding + + if !cliutils.GetMOrDie(env, nodepoolRole, &role) { + role.Name = nodepoolRole + role.Rules = []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods", "pods/exec", "services", "persistentvolumeclaims", "configmaps", "secrets"}, + Verbs: []string{"create", "delete", "get", "list", "patch", "update", "watch"}, + }, + { + APIGroups: []string{"apps"}, + Resources: []string{"deployments", "statefulsets"}, + Verbs: []string{"create", "delete", "get", "list", "patch", "update", "watch"}, + }, + } + cliutils.CreateROrDie(env, &role) + } + + if !cliutils.GetMOrDie(env, nodepoolRoleBinding, &roleBinding) { + roleBinding.Name = nodepoolRoleBinding + roleBinding.Subjects = []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: nodepoolServiceAccount, + }, + } + roleBinding.RoleRef.Kind = "Role" + roleBinding.RoleRef.Name = nodepoolRole + roleBinding.RoleRef.APIGroup = "rbac.authorization.k8s.io" + cliutils.CreateROrDie(env, &roleBinding) + } +} + +func ensureNodepoolServiceAccountSecret(env *cliutils.ENV) string { + var secret apiv1.Secret + if !cliutils.GetMOrDie(env, nodepoolToken, &secret) { + secret.Name = nodepoolToken + secret.ObjectMeta.Annotations = map[string]string{ + "kubernetes.io/service-account.name": nodepoolServiceAccount, + } + secret.Type = "kubernetes.io/service-account-token" + cliutils.CreateROrDie(env, &secret) + } + var token []byte + for retry := 1; retry < 20; retry++ { + token = secret.Data["token"] + if token != nil { + break + } + time.Sleep(time.Second) + cliutils.GetMOrDie(env, nodepoolToken, &secret) + } + if token == nil { + ctrl.Log.Error(errors.New("query timeout"), "Error getting nodepool service account token") + os.Exit(1) + } + return string(token) +} + +func createNodepoolKubeConfigOrDie(contextName string, ns string, token string) cliapi.Config { + currentConfig := controllers.GetConfigContextOrDie(contextName) + if strings.HasPrefix(currentConfig.Host, "https://localhost") || strings.HasPrefix(currentConfig.Host, "https://127.") { + ctrl.Log.Error( + errors.New("invalid config host address"), + "The server address of the context used by nodepool cannot be \"localhost\" and must be resolvable from nodepool's pod.", + ) + os.Exit(1) + } + return cliapi.Config{ + Kind: "Config", + APIVersion: "v1", + Clusters: map[string]*cliapi.Cluster{ + "OpenshiftPodsCluster": { + Server: currentConfig.Host + currentConfig.APIPath, + CertificateAuthorityData: currentConfig.TLSClientConfig.CAData, + }, + }, + Contexts: map[string]*cliapi.Context{ + nodepoolKubeContext: { + Cluster: "OpenshiftPodsCluster", + Namespace: ns, + AuthInfo: nodepoolServiceAccount, + }, + }, + CurrentContext: nodepoolKubeContext, + AuthInfos: map[string]*cliapi.AuthInfo{ + nodepoolServiceAccount: { + Token: token, + }, + }, + } +} + +func mkNodepoolOpenshiftPodsConfigTemplate(nodepoolNamespace string) string { + + type Label struct { + Name string `json:"name"` + Image string `json:"image"` + } + type Pool struct { + Name string `json:"name"` + Labels []Label `json:"labels"` + } + type Provider struct { + Name string `json:"name"` + Driver string `json:"driver"` + Context string `json:"context"` + Pools []Pool `json:"pools"` + } + type ProvidersConfig struct { + Providers []Provider `json:"providers"` + } + templateConfig := ProvidersConfig{ + Providers: []Provider{ + { + Name: "openshiftpods", + Driver: "openshiftpods", + Context: nodepoolKubeContext, + Pools: []Pool{ + { + Name: nodepoolNamespace, + Labels: []Label{ + { + Name: "fedora-latest", + Image: "quay.io/fedora/fedora:latest", + }, + }, + }, + }, + }, + }, + } + templateYaml, err := yaml.Marshal(templateConfig) + if err != nil { + ctrl.Log.Error(err, "Could not serialize sample provider configuration") + os.Exit(1) + } + return string(templateYaml) +} + func MkNodepoolCmd() *cobra.Command { var ( - cloudsOutput string - kubeconfigOutput string - builderPubKey string + cloudsOutput string + kubeconfigOutput string + builderPubKey string + nodepoolContext string + nodepoolNamespace string + showConfigTemplate bool + skipProvidersSecrets bool nodepoolCmd = &cobra.Command{ Use: "nodepool", @@ -205,7 +428,7 @@ func MkNodepoolCmd() *cobra.Command { getCmd.Run = npGet getCmd.Use = "get {providers-secrets, builder-ssh-key}" getCmd.Long = "Get a Nodepool resource. The resource can be the providers secrets or the builder's public SSH key." - getCmd.ValidArgs = []string{"providers-secrets", "builder-ssh-key"} + getCmd.ValidArgs = npGetAllowedArgs getCmd.Flags().StringVar(&cloudsOutput, "clouds", "", "(use with providers-secrets) File where to dump the clouds secrets") getCmd.Flags().StringVar(&kubeconfigOutput, "kube", "", "(use with providers-secrets) File where to dump the kube secrets") getCmd.Flags().StringVar(&builderPubKey, "pubkey", "", "(use with builder-ssh-key) File where to dump nodepool-builder's SSH public key") @@ -213,10 +436,19 @@ func MkNodepoolCmd() *cobra.Command { configureCmd.Run = npConfigure configureCmd.Use = "configure {providers-secrets}" configureCmd.Long = "Configure OpenStack and/or K8s-based providers' secrets from local files." - configureCmd.ValidArgs = []string{"providers-secrets"} + configureCmd.ValidArgs = npConfigureAllowedArgs configureCmd.Flags().StringVar(&cloudsOutput, "clouds", "", "(use with providers-secrets) File to read the clouds secrets from") configureCmd.Flags().StringVar(&kubeconfigOutput, "kube", "", "(use with providers-secrets) File to read the kube secrets from") + createCmd.Run = npCreate + createCmd.Use = "create {openshiftpods-namespace}" + createCmd.Long = "Create a nodepool resource. The resource can be: a namespace that can be used with the \"openshiftpods\" provider." + createCmd.ValidArgs = npCreateAllowedArgs + createCmd.Flags().StringVar(&nodepoolContext, "nodepool-context", "", "(openshiftpods-namespace) the kube context nodepool will use to configure the namespace") + createCmd.Flags().StringVar(&nodepoolNamespace, "nodepool-namespace", "nodepool", "(openshiftpods-namespace) the name of the namespace to create") + createCmd.Flags().BoolVar(&showConfigTemplate, "show-config-template", false, "(openshiftpods-namespace) display a YAML snippet that can be used to configure an \"openshiftpods\" provider with nodepool") + createCmd.Flags().BoolVar(&skipProvidersSecrets, "skip-providers-secrets", false, "openshiftpods-namespace) do not update providers secrets, and instead display the nodepool kube config on stdout") + nodepoolCmd.AddCommand(createCmd) nodepoolCmd.AddCommand(configureCmd) nodepoolCmd.AddCommand(getCmd) diff --git a/cli/cmd/utils/utils.go b/cli/cmd/utils/utils.go index 5f52d513..65fe0c3c 100644 --- a/cli/cmd/utils/utils.go +++ b/cli/cmd/utils/utils.go @@ -26,6 +26,7 @@ import ( "reflect" "strings" + apiv1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" apiroutev1 "github.com/openshift/api/route/v1" @@ -311,3 +312,21 @@ func RunCmdWithEnvOrDie(environ []string, cmd string, args ...string) string { func RunCmdOrDie(cmd string, args ...string) string { return RunCmdWithEnvOrDie([]string{}, cmd, args...) } + +func EnsureNamespaceOrDie(env *ENV, name string) { + var ns apiv1.Namespace + if err := env.Cli.Get(env.Ctx, client.ObjectKey{Name: name}, &ns); apierrors.IsNotFound(err) { + ns.Name = name + CreateROrDie(env, &ns) + } else if err != nil { + ctrl.Log.Error(err, "Error checking namespace "+name) + os.Exit(1) + } +} +func EnsureServiceAccountOrDie(env *ENV, name string) { + var sa apiv1.ServiceAccount + if !GetMOrDie(env, name, &sa) { + sa.Name = name + CreateROrDie(env, &sa) + } +} diff --git a/cli/sfconfig/cmd/dev/run.go b/cli/sfconfig/cmd/dev/run.go index e8c18b2e..788d6597 100644 --- a/cli/sfconfig/cmd/dev/run.go +++ b/cli/sfconfig/cmd/dev/run.go @@ -21,7 +21,6 @@ import ( cliutils "github.com/softwarefactory-project/sf-operator/cli/cmd/utils" bootstraptenantconfigrepo "github.com/softwarefactory-project/sf-operator/cli/sfconfig/cmd/bootstrap-tenant-config-repo" - "github.com/softwarefactory-project/sf-operator/cli/sfconfig/cmd/nodepool" "github.com/softwarefactory-project/sf-operator/cli/sfconfig/cmd/sfprometheus" "github.com/softwarefactory-project/sf-operator/cli/sfconfig/cmd/utils" "github.com/softwarefactory-project/sf-operator/cli/sfconfig/config" @@ -80,7 +79,6 @@ func Run(cmd *cobra.Command) { sfprometheus.EnsurePrometheus(&env, sfconfig.FQDN, false) } EnsureDemoConfig(&env, &sfconfig, !dontUpdateDemoTenantDefinition) - nodepool.CreateNamespaceForNodepool(&env, "", "nodepool", "") EnsureCRD() } diff --git a/doc/reference/cli/main.md b/doc/reference/cli/main.md index 42f9fb07..6cf0a7f0 100644 --- a/doc/reference/cli/main.md +++ b/doc/reference/cli/main.md @@ -18,6 +18,7 @@ deployments, beyond what can be defined in a custom resource manifest. 1. [Init](#init) 1. [Nodepool](#nodepool) 1. [configure providers-secrets](#configure-providers-secrets) + 1. [create openshiftpods-namespace] 1. [get builder-ssh-key](#get-builder-ssh-key) 1. [get providers-secrets](#get-providers-secrets) 1. [Operator](#apply) @@ -275,6 +276,22 @@ Flags: | --kube | string | The file from which to read nodepool's kube.config | yes | - | | --clouds | string | The file from which to read nodepool's clouds.yaml | yes | - | +#### create openshiftpods-namespace + +Create and set up a dedicated namespace on a cluster, so that nodepool can spawn pods with the [openshiftpods](https://zuul-ci.org/docs/nodepool/latest/openshift-pods.html) driver. + +```sh +go run ./main.go [GLOBAL FLAGS] nodepool create openshiftpods-namespace [FLAGS] +``` +Flags: + +| Argument | Type | Description | Optional | Default | +|----------|------|-------|----|----| +| --nodepool-context | string | The kube context to use to set up the namespace | yes | default context set with `kubectl` | +| --nodepool-namespace | string | The namespace to set up | yes | nodepool | +| --show-config-template | boolean | Display a nodepool configuration snippet that can be used to enable an openshiftpods provider using the created namespace | yes | false | +| --skip-providers-secrets | boolean | Do not update or create nodepool's providers secrets after setting up the namespace | yes | false | + #### get builder-ssh-key The Nodepool builder component should be used with at least one `image-builder` companion machine. diff --git a/roles/health-check/config-update-nodepool-launcher/tasks/main.yaml b/roles/health-check/config-update-nodepool-launcher/tasks/main.yaml index ec38b9e4..9d9d9446 100644 --- a/roles/health-check/config-update-nodepool-launcher/tasks/main.yaml +++ b/roles/health-check/config-update-nodepool-launcher/tasks/main.yaml @@ -65,7 +65,7 @@ microshift_provider: - name: microshiftLocal driver: openshiftpods - context: microshift + context: openshiftpods pools: # NOTE: name is a name of the namespace # https://github.com/softwarefactory-project/sf-config/blob/master/ansible/roles/sf-repos/files/config/nodepool/openshift.yaml#L30 diff --git a/roles/setup-nodepool-ns/tasks/main.yaml b/roles/setup-nodepool-ns/tasks/main.yaml index 04bbeee5..80908b17 100644 --- a/roles/setup-nodepool-ns/tasks/main.yaml +++ b/roles/setup-nodepool-ns/tasks/main.yaml @@ -1,5 +1,6 @@ --- -- name: Setup nodepool namespace - command: "tools/sfconfig create-namespace-for-nodepool" +- name: Set up nodepool namespace + ansible.builtin.shell: > + go run ./main.go --namespace sf nodepool create openshiftpods-namespace args: - chdir: "{{ zuul.project.src_dir }}" + chdir: "{{ zuul.project.src_dir | default(src_dir) }}" \ No newline at end of file