diff --git a/.gitignore b/.gitignore index 792f31b7..b767e62e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,9 @@ +build/ +export/ +Makefile.local +version +packer/*/*-cloud-init.iso +packer/*/*-userdata context-windows/out/ context-windows/*.msi context-windows/rhsrvany.exe diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..522c9129 --- /dev/null +++ b/Makefile @@ -0,0 +1,52 @@ +# load variables and makefile config +include Makefile.config + +# load possible overrides or non-free definitions +-include Makefile.local + +# all, aliases +all: $(patsubst %, packer-%, $(DISTROS)) $(patsubst %, packer-%, $(SERVICES)) +distros: $(patsubst %, packer-%, $(DISTROS)) +services: $(patsubst %, packer-%, $(SERVICES)) + +# allow individual distribution targets (e.g., "make debian11") +$(DISTROS): %: packer-% ; +$(SERVICES): %: packer-% ; + +packer-%: context-linux ${DIR_EXPORT}/%.qcow2 + @${INFO} "Packer ${*} done" + +# run packer build for given distro or service +${DIR_EXPORT}/%.qcow2: + $(eval DISTRO_NAME := $(shell echo ${*} | sed 's/[0-9].*//')) + $(eval DISTRO_VER := $(shell echo ${*} | sed 's/[a-z]*//')) + packer/build.sh ${DISTRO_NAME} ${DISTRO_VER} ${@} + +# context packages +context-linux: $(patsubst %, context-linux/out/%, $(LINUX_CONTEXT_PACKAGES)) + @${INFO} "Generate context-linux done" + +context-linux/out/%: + cd context-linux; ./generate-all.sh + +clean: + -rm -rf ${DIR_EXPORT}/* + +help: + @echo 'Available distros:' + @echo "$(shell echo "${DISTROS}" | fmt -w 65 | tr '\n' '\1' )" \ + | tr '\1' '\n' | sed 's/^/ /' + @echo 'Available services:' + @echo ' $(SERVICES)' + @echo + @echo 'Usage examples:' + @echo ' make all -- build all distros and services' + @echo ' make distros -- build all distros' + @echo ' make services -- build all services' + @echo + @echo ' make -- build just one distro' + @echo ' make context-linux -- build context linux packages' + @echo ' make context-windows -- TODO' + +version: + @echo $(VERSION)-$(RELEASE) > version diff --git a/Makefile.config b/Makefile.config new file mode 100644 index 00000000..d2083374 --- /dev/null +++ b/Makefile.config @@ -0,0 +1,57 @@ +# context version definition +VERSION := 6.6.1 +RELEASE := 1 + +# log +VERBOSE := 1 +PACKER_LOG := 0 +PACKER_HEADLESS := true + +DISTROS := alma8 alma9 \ + alpine316 alpine317 \ + alt9 alt10 \ + amazon2 \ + centos7 centos8stream \ + debian10 debian11 debian12 \ + devuan3 devuan4\ + fedora37 fedora38 \ + freebsd12 freebsd13 \ + ol8 ol9 \ + opensuse15 \ + rocky8 rocky9 \ + ubuntu2004 ubuntu2004min ubuntu2204 ubuntu2204min + +SERVICES := service_OneKE service_wordpress + +.DEFAULT_GOAL := help + +# default directories +DIR_BUILD := build +DIR_EXPORT := export +$(shell mkdir -p ${DIR_BUILD} ${DIR_EXPORT}) + +# don't delete exported +.SECONDARY: $(patsubst %, $(DIR_EXPORT)/%.qcow2, $(DISTROS)) $(patsubst %, $(DIR_EXPORT)/%.qcow2, $(SERVICES)) + +.PHONY: context-linux help + +# this needs to match context-linux/generate-all.sh products +LINUX_CONTEXT_PACKAGES := one-context_${VERSION}-${RELEASE}.deb \ + one-context-${VERSION}-${RELEASE}.el6.noarch.rpm \ + one-context-${VERSION}-${RELEASE}.el7.noarch.rpm \ + one-context-${VERSION}-${RELEASE}.el8.noarch.rpm \ + one-context-${VERSION}-${RELEASE}.el9.noarch.rpm \ + one-context-${VERSION}-${RELEASE}.suse.noarch.rpm \ + one-context-${VERSION}_${RELEASE}.txz \ + one-context-${VERSION}-alt${RELEASE}.noarch.rpm \ + one-context-${VERSION}-r${RELEASE}.apk \ + one-context-linux-${VERSION}-${RELEASE}.iso + +LINUX_CONTEXT_PACKAGES_FULL := $(patsubst %, context-linux/out/%, $(LINUX_CONTEXT_PACKAGES)) + + +# logging func +INFO=sh -c 'if [ $(VERBOSE) = 1 ]; then echo [INFO] $$1; fi' INFO + +# export all variables +export diff --git a/README.md b/README.md new file mode 100644 index 00000000..543eef53 --- /dev/null +++ b/README.md @@ -0,0 +1,10 @@ +# one-apps +Toolchain to build OpenNebula appliances + +Requirements: +- make +- Packer +- Qemu Packer Plugin +- cloud-utils +- guestfs-tools +- qemu-img diff --git a/appliances/OneKE/appliance.sh b/appliances/OneKE/appliance.sh new file mode 100644 index 00000000..a0ecf6bf --- /dev/null +++ b/appliances/OneKE/appliance.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +service_bootstrap() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" bootstrap; } + +service_cleanup() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" cleanup; } + +service_configure() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" configure; } + +service_install() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" install; } + +return diff --git a/appliances/OneKE/appliance/.rubocop.yml b/appliances/OneKE/appliance/.rubocop.yml new file mode 100644 index 00000000..0ca2b581 --- /dev/null +++ b/appliances/OneKE/appliance/.rubocop.yml @@ -0,0 +1,30 @@ +AllCops: + Exclude: + - '*_spec.rb' + +Lint/MissingCopEnableDirective: + Enabled: false + +Layout/FirstArrayElementIndentation: + Enabled: false + +Layout/FirstHashElementIndentation: + Enabled: false + +Layout/HashAlignment: + Enabled: false + +Layout/HeredocIndentation: + Enabled: false + +Layout/IndentationWidth: + Enabled: false + +Layout/MultilineMethodCallIndentation: + Enabled: false + +Metrics/BlockLength: + Enabled: false + +Metrics/MethodLength: + Enabled: false diff --git a/appliances/OneKE/appliance/appliance.rb b/appliances/OneKE/appliance/appliance.rb new file mode 100644 index 00000000..0511ae3a --- /dev/null +++ b/appliances/OneKE/appliance/appliance.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: true + +require_relative 'config.rb' +require_relative 'helpers.rb' + +require_relative 'vnf.rb' +require_relative 'kubernetes.rb' + +require_relative 'multus.rb' +require_relative 'calico.rb' +require_relative 'canal.rb' +require_relative 'cilium.rb' + +require_relative 'metallb.rb' +require_relative 'traefik.rb' +require_relative 'longhorn.rb' +require_relative 'cleaner.rb' + +if caller.empty? + case ARGV[0].to_sym + when :install + install_packages PACKAGES + + with_policy_rc_d_disabled do + install_kubernetes + end + + install_metallb + install_traefik + install_longhorn + install_cleaner + + # NOTE: Longhorn images are pulled separately. + pull_addon_images if ONE_SERVICE_AIRGAPPED + + msg :info, 'Installation completed successfully' + + when :configure + prepare_dedicated_storage unless ONEAPP_STORAGE_DEVICE.nil? + + configure_vnf + + if ONE_SERVICE_AIRGAPPED + include_images 'rke2-images-core' + include_images 'rke2-images-multus' if ONEAPP_K8S_MULTUS_ENABLED + include_images 'rke2-images-cilium' if ONEAPP_K8S_CNI_PLUGIN == 'cilium' + + include_images 'one-longhorn' if ONEAPP_K8S_LONGHORN_ENABLED + include_images 'one-metallb' if ONEAPP_K8S_METALLB_ENABLED + include_images 'one-traefik' if ONEAPP_K8S_TRAEFIK_ENABLED + include_images 'one-cleaner' + end + + node = configure_kubernetes( + configure_cni: ->{ + configure_multus if ONEAPP_K8S_MULTUS_ENABLED + configure_calico if ONEAPP_K8S_CNI_PLUGIN == 'calico' + configure_canal if ONEAPP_K8S_CNI_PLUGIN == 'canal' + configure_cilium if ONEAPP_K8S_CNI_PLUGIN == 'cilium' + }, + configure_addons: ->{ + configure_metallb if ONEAPP_K8S_METALLB_ENABLED + + include_manifests 'one-longhorn' if ONEAPP_K8S_LONGHORN_ENABLED + include_manifests 'one-metallb' if ONEAPP_K8S_METALLB_ENABLED + include_manifests 'one-traefik' if ONEAPP_K8S_TRAEFIK_ENABLED + include_manifests 'one-cleaner' + } + ) + + if node[:join_worker] + vnf_ingress_setup_https_backend + vnf_ingress_setup_http_backend + end + + msg :info, 'Configuration completed successfully' + + when :bootstrap + puts 'bootstrap_success' + end +end diff --git a/appliances/OneKE/appliance/calico.rb b/appliances/OneKE/appliance/calico.rb new file mode 100644 index 00000000..fccc8a14 --- /dev/null +++ b/appliances/OneKE/appliance/calico.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +require 'base64' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_calico(manifest_dir = K8S_MANIFEST_DIR) + msg :info, 'Configure Calico' + + if ONEAPP_K8S_CNI_CONFIG.nil? + msg :info, 'Create Calico CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-calico + namespace: kube-system + spec: + valuesContent: |- + MANIFEST + else + msg :info, 'Use Calico user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_CNI_CONFIG + end + + msg :info, 'Generate Calico config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-calico-config.yaml", manifest, overwrite: true +end diff --git a/appliances/OneKE/appliance/canal.rb b/appliances/OneKE/appliance/canal.rb new file mode 100644 index 00000000..f0b3c397 --- /dev/null +++ b/appliances/OneKE/appliance/canal.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +require 'base64' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_canal(manifest_dir = K8S_MANIFEST_DIR) + msg :info, 'Configure Canal' + + if ONEAPP_K8S_CNI_CONFIG.nil? + msg :info, 'Create Canal CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-canal + namespace: kube-system + spec: + valuesContent: |- + MANIFEST + else + msg :info, 'Use Canal user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_CNI_CONFIG + end + + msg :info, 'Generate Canal config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-canal-config.yaml", manifest, overwrite: true +end diff --git a/appliances/OneKE/appliance/cilium.rb b/appliances/OneKE/appliance/cilium.rb new file mode 100644 index 00000000..84fd6e27 --- /dev/null +++ b/appliances/OneKE/appliance/cilium.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +require 'base64' +require 'uri' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_cilium(manifest_dir = K8S_MANIFEST_DIR, endpoint = K8S_CONTROL_PLANE_EP) + msg :info, 'Configure Cilium' + + ep = URI.parse "https://#{endpoint}" + + if ONEAPP_K8S_CNI_CONFIG.nil? + msg :info, 'Create Cilium CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "#{ep.host}" + k8sServicePort: #{ep.port} + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: {} + MANIFEST + + unless ONEAPP_K8S_CILIUM_RANGES.empty? + ip_address_pool = documents.find do |doc| + doc['kind'] == 'CiliumLoadBalancerIPPool' && doc.dig('metadata', 'name') == 'default' + end + ip_address_pool['spec']['cidrs'] = extract_cilium_ranges.map do |item| + { 'cidr' => item.join('/') } + end + end + else + msg :info, 'Use Cilium user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_CNI_CONFIG + end + + msg :info, 'Generate Cilium config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-cilium-config.yaml", manifest, overwrite: true +end + +def extract_cilium_ranges(ranges = ONEAPP_K8S_CILIUM_RANGES) + ranges.compact + .map(&:strip) + .reject(&:empty?) + .map { |item| item.split('/').map(&:strip) } + .reject { |item| item.length > 2 } + .reject { |item| item.map(&:empty?).any? } + .reject { |item| !(ipv4?(item.first) && integer?(item.last)) } +end diff --git a/appliances/OneKE/appliance/cilium_spec.rb b/appliances/OneKE/appliance/cilium_spec.rb new file mode 100644 index 00000000..718f1141 --- /dev/null +++ b/appliances/OneKE/appliance/cilium_spec.rb @@ -0,0 +1,154 @@ +# frozen_string_literal: true + +require 'base64' +require 'rspec' +require 'tmpdir' +require 'yaml' + +require_relative 'cilium.rb' + +RSpec.describe 'extract_cilium_ranges' do + it 'should extract and return all ranges (positive)' do + input = [ + '10.11.12.0/24', + '10.11.0.0/16' + ] + output = [ + %w[10.11.12.0 24], + %w[10.11.0.0 16] + ] + expect(extract_cilium_ranges(input)).to eq output + end + + it 'should extract and return no ranges (negative)' do + input = [ + '', + '10.11.12.0', + '10.11.12.0/', + 'asd.11.12.0/24', + '10.11.12.0/asd' + ] + output = [] + expect(extract_cilium_ranges(input)).to eq output + end +end + +RSpec.describe 'configure_cilium' do + it 'should apply user-defined ranges (empty)' do + stub_const 'K8S_CONTROL_PLANE_EP', '192.168.150.86:6443' + stub_const 'ONEAPP_K8S_CNI_PLUGIN', 'cilium' + stub_const 'ONEAPP_K8S_CNI_CONFIG', nil + stub_const 'ONEAPP_K8S_CILIUM_RANGES', [] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "192.168.150.86" + k8sServicePort: 6443 + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: {} + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_cilium temp_dir + result = YAML.load_stream File.read "#{temp_dir}/rke2-cilium-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined ranges' do + stub_const 'K8S_CONTROL_PLANE_EP', '192.168.150.86:6443' + stub_const 'ONEAPP_K8S_CNI_PLUGIN', 'cilium' + stub_const 'ONEAPP_K8S_CILIUM_RANGES', ['192.168.150.128/25', '10.11.12.0/24'] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "192.168.150.86" + k8sServicePort: 6443 + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: + - cidr: 192.168.150.128/25 + - cidr: 10.11.12.0/24 + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_cilium temp_dir + result = YAML.load_stream File.read "#{temp_dir}/rke2-cilium-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined config manifest (and ignore user-defined ranges)' do + manifest = <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "192.168.150.86" + k8sServicePort: 6443 + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: + - cidr: 192.168.150.128/25 + - cidr: 10.11.12.0/24 + MANIFEST + stub_const 'ONEAPP_K8S_CNI_PLUGIN', 'cilium' + stub_const 'ONEAPP_K8S_CNI_CONFIG', Base64.encode64(manifest) + stub_const 'ONEAPP_K8S_CILIUM_RANGES', ['1.2.3.4/5', '6.7.8.9/10'] + output = YAML.load_stream manifest + Dir.mktmpdir do |temp_dir| + configure_cilium temp_dir + result = YAML.load_stream File.read "#{temp_dir}/rke2-cilium-config.yaml" + expect(result).to eq output + end + end + +end diff --git a/appliances/OneKE/appliance/cleaner.rb b/appliances/OneKE/appliance/cleaner.rb new file mode 100644 index 00000000..91eb747b --- /dev/null +++ b/appliances/OneKE/appliance/cleaner.rb @@ -0,0 +1,125 @@ +# frozen_string_literal: true + +require_relative 'config.rb' +require_relative 'helpers.rb' +require_relative 'onegate.rb' + +def install_cleaner(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install One-Cleaner' + fetch_cleaner addon_dir +end + +def fetch_cleaner(addon_dir = ONE_ADDON_DIR, cron = '*/2 * * * *', ttl = 180) + msg :info, 'Generate One-Cleaner manifest' + + file "#{addon_dir}/one-cleaner.yaml", <<~MANIFEST, overwrite: true + apiVersion: batch/v1 + kind: CronJob + metadata: + name: one-cleaner + namespace: kube-system + spec: + schedule: "#{cron}" + jobTemplate: + spec: + ttlSecondsAfterFinished: #{ttl} + template: + spec: + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Equal + value: "true" + effect: NoExecute + nodeSelector: + node-role.kubernetes.io/master: "true" + containers: + - name: one-cleaner + image: ruby:2.7-alpine3.16 + imagePullPolicy: IfNotPresent + command: + - /usr/local/bin/ruby + - /etc/one-appliance/service.d/appliance/cleaner.rb + volumeMounts: + - name: kubectl + mountPath: /var/lib/rancher/rke2/bin/kubectl + - name: kubeconfig + mountPath: /etc/rancher/rke2/rke2.yaml + - name: context + mountPath: /var/run/one-context/one_env + - name: onegate + mountPath: /usr/bin/onegate + - name: onegaterb + mountPath: /usr/bin/onegate.rb + - name: appliance + mountPath: /etc/one-appliance/service.d/appliance/ + volumes: + - name: kubectl + hostPath: + path: /var/lib/rancher/rke2/bin/kubectl + type: File + - name: kubeconfig + hostPath: + path: /etc/rancher/rke2/rke2.yaml + type: File + - name: context + hostPath: + path: /var/run/one-context/one_env + type: File + - name: onegate + hostPath: + path: /usr/bin/onegate + type: File + - name: onegaterb + hostPath: + path: /usr/bin/onegate.rb + type: File + - name: appliance + hostPath: + path: /etc/one-appliance/service.d/appliance/ + type: Directory + restartPolicy: Never + MANIFEST +end + +def detect_invalid_nodes + kubernetes_nodes = kubectl_get_nodes.dig 'items' + if kubernetes_nodes.nil? || kubernetes_nodes.empty? + msg :error, 'No Kubernetes nodes found' + exit 1 + end + + onegate_vms = all_vms_show + if onegate_vms.nil? || onegate_vms.empty? + msg :error, 'No Onegate VMs found' + exit 1 + end + + kubernetes_node_names = kubernetes_nodes + .map { |item| item.dig 'metadata', 'name' } + .reject(&:nil?) + .select { |item| item.start_with? 'oneke-ip-' } + + onegate_node_names = onegate_vms + .map { |item| item.dig 'VM', 'USER_TEMPLATE', 'ONEGATE_K8S_NODE_NAME' } + .reject(&:nil?) + .select { |item| item.start_with? 'oneke-ip-' } + + kubernetes_node_names - onegate_node_names +end + +if caller.empty? + # The ruby / alpine container does not have bash pre-installed, + # but busybox / ash seems to be somewhat compatible, at least usable.. + # It cannot be a simple symlink, because busybox is a multi-call binary.. + file '/bin/bash', <<~SCRIPT, mode: 'u=rwx,go=rx', overwrite: false + #!/bin/ash + exec /bin/ash "$@" + SCRIPT + + detect_invalid_nodes.each do |name| + puts kubectl "delete node '#{name}'" + end +end diff --git a/appliances/OneKE/appliance/cleaner_spec.rb b/appliances/OneKE/appliance/cleaner_spec.rb new file mode 100644 index 00000000..bd61eb47 --- /dev/null +++ b/appliances/OneKE/appliance/cleaner_spec.rb @@ -0,0 +1,482 @@ +# frozen_string_literal: true + +require 'json' +require 'rspec' + +require_relative 'cleaner.rb' + +RSpec.describe 'detect_invalid_nodes' do + it 'should return list of invalid nodes (to be removed)' do + allow(self).to receive(:kubectl_get_nodes).and_return JSON.parse <<~'JSON' + { + "apiVersion": "v1", + "items": [ + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "flannel.alpha.coreos.com/backend-data": "{\"VtepMAC\":\"6e:c7:7a:19:fb:7f\"}", + "flannel.alpha.coreos.com/backend-type": "vxlan", + "flannel.alpha.coreos.com/kube-subnet-manager": "true", + "flannel.alpha.coreos.com/public-ip": "172.20.0.100", + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "projectcalico.org/IPv4Address": "172.20.0.100/24", + "projectcalico.org/IPv4IPIPTunnelAddr": "10.244.0.1", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-03-15T09:06:29Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "oneke-ip-172-20-0-100", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "", + "node.kubernetes.io/exclude-from-external-load-balancers": "" + }, + "name": "oneke-ip-172-20-0-100", + "resourceVersion": "17537", + "uid": "e198b625-8c3b-40c5-b41b-acd994a73be3" + }, + "spec": { + "podCIDR": "10.244.0.0/24", + "podCIDRs": [ + "10.244.0.0/24" + ], + "taints": [ + { + "effect": "NoSchedule", + "key": "node-role.kubernetes.io/master" + } + ] + }, + "status": { + "addresses": [ + { + "address": "172.20.0.100", + "type": "InternalIP" + }, + { + "address": "oneke-ip-172-20-0-100", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "2", + "ephemeral-storage": "18566299208", + "hugepages-2Mi": "0", + "memory": "1939544Ki", + "pods": "110" + }, + "capacity": { + "cpu": "2", + "ephemeral-storage": "20145724Ki", + "hugepages-2Mi": "0", + "memory": "2041944Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2022-03-15T09:07:04Z", + "lastTransitionTime": "2022-03-15T09:07:04Z", + "message": "Flannel is running on this node", + "reason": "FlannelIsUp", + "status": "False", + "type": "NetworkUnavailable" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:06:22Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:06:22Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:06:22Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:07:02Z", + "message": "kubelet is posting ready status. AppArmor enabled", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [], + "nodeInfo": { + "architecture": "amd64", + "bootID": "612377df-f413-43ae-91d9-b9ab75d2661a", + "containerRuntimeVersion": "docker://20.10.13", + "kernelVersion": "5.4.0-1058-kvm", + "kubeProxyVersion": "v1.21.10", + "kubeletVersion": "v1.21.10", + "machineID": "2f2741fd3cb14ef4b6560ae805e1756c", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "2f2741fd-3cb1-4ef4-b656-0ae805e1756c" + } + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "csi.volume.kubernetes.io/nodeid": "{\"driver.longhorn.io\":\"oneke-ip-172-20-0-101\"}", + "flannel.alpha.coreos.com/backend-data": "{\"VtepMAC\":\"fa:f6:f4:57:8f:2e\"}", + "flannel.alpha.coreos.com/backend-type": "vxlan", + "flannel.alpha.coreos.com/kube-subnet-manager": "true", + "flannel.alpha.coreos.com/public-ip": "172.20.0.101", + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "projectcalico.org/IPv4Address": "172.20.0.101/24", + "projectcalico.org/IPv4IPIPTunnelAddr": "10.244.1.1", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-03-15T09:08:14Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "oneke-ip-172-20-0-101", + "kubernetes.io/os": "linux" + }, + "name": "oneke-ip-172-20-0-101", + "resourceVersion": "17722", + "uid": "dc33eae6-73c2-4a91-90c7-990c2fa5cc11" + }, + "spec": { + "podCIDR": "10.244.1.0/24", + "podCIDRs": [ + "10.244.1.0/24" + ] + }, + "status": { + "addresses": [ + { + "address": "172.20.0.101", + "type": "InternalIP" + }, + { + "address": "oneke-ip-172-20-0-101", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "2", + "ephemeral-storage": "18566299208", + "hugepages-2Mi": "0", + "memory": "1939544Ki", + "pods": "110" + }, + "capacity": { + "cpu": "2", + "ephemeral-storage": "20145724Ki", + "hugepages-2Mi": "0", + "memory": "2041944Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2022-03-15T09:08:25Z", + "lastTransitionTime": "2022-03-15T09:08:25Z", + "message": "Flannel is running on this node", + "reason": "FlannelIsUp", + "status": "False", + "type": "NetworkUnavailable" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:14Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:14Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:14Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:25Z", + "message": "kubelet is posting ready status. AppArmor enabled", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [], + "nodeInfo": { + "architecture": "amd64", + "bootID": "b2b7b410-bc29-4a6d-b4a6-fdbf7328b6cb", + "containerRuntimeVersion": "docker://20.10.13", + "kernelVersion": "5.4.0-1058-kvm", + "kubeProxyVersion": "v1.21.10", + "kubeletVersion": "v1.21.10", + "machineID": "1f5851ae52914927a1cf4c86427e0a36", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "1f5851ae-5291-4927-a1cf-4c86427e0a36" + } + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "csi.volume.kubernetes.io/nodeid": "{\"driver.longhorn.io\":\"oneke-ip-172-20-0-102\"}", + "flannel.alpha.coreos.com/backend-data": "{\"VtepMAC\":\"1a:f1:ed:df:19:cd\"}", + "flannel.alpha.coreos.com/backend-type": "vxlan", + "flannel.alpha.coreos.com/kube-subnet-manager": "true", + "flannel.alpha.coreos.com/public-ip": "172.20.0.102", + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "projectcalico.org/IPv4Address": "172.20.0.102/24", + "projectcalico.org/IPv4IPIPTunnelAddr": "10.244.2.1", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-03-15T09:08:28Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "oneke-ip-172-20-0-102", + "kubernetes.io/os": "linux", + "node.longhorn.io/create-default-disk": "true" + }, + "name": "oneke-ip-172-20-0-102", + "resourceVersion": "17746", + "uid": "cb5c7412-0ec8-47a6-9caa-5fd8bd720684" + }, + "spec": { + "podCIDR": "10.244.2.0/24", + "podCIDRs": [ + "10.244.2.0/24" + ], + "taints": [ + { + "effect": "NoSchedule", + "key": "node.longhorn.io/create-default-disk", + "value": "true" + } + ] + }, + "status": { + "addresses": [ + { + "address": "172.20.0.102", + "type": "InternalIP" + }, + { + "address": "oneke-ip-172-20-0-102", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "2", + "ephemeral-storage": "18566299208", + "hugepages-2Mi": "0", + "memory": "1939544Ki", + "pods": "110" + }, + "capacity": { + "cpu": "2", + "ephemeral-storage": "20145724Ki", + "hugepages-2Mi": "0", + "memory": "2041944Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2022-03-15T09:08:39Z", + "lastTransitionTime": "2022-03-15T09:08:39Z", + "message": "Flannel is running on this node", + "reason": "FlannelIsUp", + "status": "False", + "type": "NetworkUnavailable" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:28Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:28Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:28Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:38Z", + "message": "kubelet is posting ready status. AppArmor enabled", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [], + "nodeInfo": { + "architecture": "amd64", + "bootID": "0df98c4d-163e-4468-b299-7d8fdb34a172", + "containerRuntimeVersion": "docker://20.10.13", + "kernelVersion": "5.4.0-1058-kvm", + "kubeProxyVersion": "v1.21.10", + "kubeletVersion": "v1.21.10", + "machineID": "69820ee32d094fdbbb065b80643a06dc", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "69820ee3-2d09-4fdb-bb06-5b80643a06dc" + } + } + } + ], + "kind": "List", + "metadata": { + "resourceVersion": "", + "selfLink": "" + } + } + JSON + allow(self).to receive(:all_vms_show).and_return JSON.parse <<~JSON + [ + { + "VM": { + "NAME": "master_0_(service_21)", + "ID": "49", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-172-20-0-100", + "ONEGATE_K8S_HASH": "09a9ed140fec2fa1a2281a3125952d6f2951b67a67534647b0a606ae2d478f60", + "ONEGATE_K8S_MASTER": "172.20.0.100", + "ONEGATE_K8S_TOKEN": "sg7711.p19vy0eqxefc0lqz", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.100", + "MAC": "02:00:ac:14:00:64", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + }, + { + "VM": { + "NAME": "storage_0_(service_21)", + "ID": "51", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-172-20-0-102", + "READY": "YES", + "ROLE_NAME": "storage", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.102", + "MAC": "02:00:ac:14:00:66", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + ] + JSON + expect(detect_invalid_nodes).to eq ['oneke-ip-172-20-0-101'] + end +end diff --git a/appliances/OneKE/appliance/config.rb b/appliances/OneKE/appliance/config.rb new file mode 100644 index 00000000..74ccc499 --- /dev/null +++ b/appliances/OneKE/appliance/config.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +def env(name, default) + value = ENV.fetch name.to_s, '' + value = value.empty? ? default : value + value = %w[YES 1].include?(value.upcase) if default.instance_of?(String) && %w[YES NO].include?(default.upcase) + value +end + +ONE_SERVICE_VERSION = env :ONE_SERVICE_VERSION, '1.27' +ONE_SERVICE_AIRGAPPED = env :ONE_SERVICE_AIRGAPPED, 'YES' +ONE_SERVICE_SETUP_DIR = env :ONE_SERVICE_SETUP_DIR, '/opt/one-appliance' + +ONE_SERVICE_RKE2_RELEASE = env :ONE_SERVICE_RKE2_RELEASE, "#{ONE_SERVICE_VERSION}.2" +ONE_SERVICE_RKE2_VERSION = env :ONE_SERVICE_RKE2_VERSION, "v#{ONE_SERVICE_RKE2_RELEASE}+rke2r1" +ONE_SERVICE_HELM_VERSION = env :ONE_SERVICE_HELM_VERSION, '3.11.3' + +ONEAPP_K8S_MULTUS_ENABLED = env :ONEAPP_K8S_MULTUS_ENABLED, 'NO' +ONEAPP_K8S_MULTUS_CONFIG = env :ONEAPP_K8S_MULTUS_CONFIG, nil + +ONEAPP_K8S_CNI_PLUGIN = env :ONEAPP_K8S_CNI_PLUGIN, 'cilium' +ONEAPP_K8S_CNI_CONFIG = env :ONEAPP_K8S_CNI_CONFIG, nil +ONEAPP_K8S_CILIUM_RANGES = ENV.select { |key, _| key.start_with? 'ONEAPP_K8S_CILIUM_RANGE' }.values.freeze + +ONEAPP_K8S_LONGHORN_CHART_VERSION = env :ONEAPP_K8S_LONGHORN_CHART_VERSION, '1.4.1' +ONEAPP_K8S_LONGHORN_ENABLED = env :ONEAPP_K8S_LONGHORN_ENABLED, 'NO' + +ONEAPP_K8S_METALLB_CHART_VERSION = env :ONEAPP_K8S_METALLB_CHART_VERSION, '0.13.9' +ONEAPP_K8S_METALLB_ENABLED = env :ONEAPP_K8S_METALLB_ENABLED, 'NO' +ONEAPP_K8S_METALLB_CONFIG = env :ONEAPP_K8S_METALLB_CONFIG, nil +ONEAPP_K8S_METALLB_RANGES = ENV.select { |key, _| key.start_with? 'ONEAPP_K8S_METALLB_RANGE' }.values.freeze + +ONEAPP_K8S_TRAEFIK_CHART_VERSION = env :ONEAPP_K8S_TRAEFIK_CHART_VERSION, '23.0.0' +ONEAPP_K8S_TRAEFIK_ENABLED = env :ONEAPP_K8S_TRAEFIK_ENABLED, 'NO' + +ONEAPP_VROUTER_ETH0_VIP0 = env :ONEAPP_VROUTER_ETH0_VIP0, nil +ONEAPP_VROUTER_ETH1_VIP0 = env :ONEAPP_VROUTER_ETH1_VIP0, nil +ONEAPP_VNF_HAPROXY_LB2_PORT = env :ONEAPP_VNF_HAPROXY_LB2_PORT, '443' +ONEAPP_VNF_HAPROXY_LB3_PORT = env :ONEAPP_VNF_HAPROXY_LB3_PORT, '80' + +ONEAPP_K8S_EXTRA_SANS = env :ONEAPP_K8S_EXTRA_SANS, 'localhost,127.0.0.1' + +ONEAPP_STORAGE_DEVICE = env :ONEAPP_STORAGE_DEVICE, nil # for example '/dev/vdb' +ONEAPP_STORAGE_FILESYSTEM = env :ONEAPP_STORAGE_FILESYSTEM, 'xfs' +ONEAPP_STORAGE_MOUNTPOINT = env :ONEAPP_STORAGE_MOUNTPOINT, '/var/lib/longhorn' + +ONE_ADDON_DIR = env :ONE_ADDON_DIR, "#{ONE_SERVICE_SETUP_DIR}/addons" +ONE_AIRGAP_DIR = env :ONE_AIRGAP_DIR, "#{ONE_SERVICE_SETUP_DIR}/airgap" + +K8S_MANIFEST_DIR = env :K8S_MANIFEST_DIR, '/var/lib/rancher/rke2/server/manifests' +K8S_IMAGE_DIR = env :K8S_IMAGE_DIR, '/var/lib/rancher/rke2/agent/images' + +K8S_SUPERVISOR_EP = "#{ONEAPP_VROUTER_ETH0_VIP0}:9345" +K8S_CONTROL_PLANE_EP = "#{ONEAPP_VROUTER_ETH0_VIP0}:6443" + +RETRIES = 86 +SECONDS = 5 + +PACKAGES = %w[ + curl + gawk + gnupg + lsb-release + openssl + skopeo + zstd +].freeze + +KUBECONFIG = %w[/etc/rancher/rke2/rke2.yaml].freeze diff --git a/appliances/OneKE/appliance/helpers.rb b/appliances/OneKE/appliance/helpers.rb new file mode 100644 index 00000000..c263bcf7 --- /dev/null +++ b/appliances/OneKE/appliance/helpers.rb @@ -0,0 +1,242 @@ +# frozen_string_literal: true + +require 'base64' +require 'date' +require 'fileutils' +require 'json' +require 'ipaddr' +require 'logger' +require 'net/http' +require 'open3' +require 'socket' +require 'tempfile' +require 'uri' +require 'yaml' + +LOGGER_STDOUT = Logger.new(STDOUT) +LOGGER_STDERR = Logger.new(STDERR) + +LOGGERS = { + info: LOGGER_STDOUT.method(:info), + debug: LOGGER_STDERR.method(:debug), + warn: LOGGER_STDERR.method(:warn), + error: LOGGER_STDERR.method(:error) +}.freeze + +def msg(level, string) + LOGGERS[level].call string +end + +def slurp(path) + Base64.encode64(File.read(path)).lines.map(&:strip).join +end + +def file(path, content, mode: 'u=rw,go=r', overwrite: false) + return if !overwrite && File.exist?(path) + + FileUtils.mkdir_p File.dirname path + + File.write path, content + + FileUtils.chmod mode, path +end + +def bash(script, chomp: false, terminate: true) + command = 'exec /bin/bash --login -s' + + stdin_data = <<~SCRIPT + export DEBIAN_FRONTEND=noninteractive + set -o errexit -o nounset -o pipefail + set -x + #{script} + SCRIPT + + stdout, stderr, status = Open3.capture3 command, stdin_data: stdin_data + unless status.exitstatus.zero? + error_message = "#{status.exitstatus}: #{stderr}" + msg :error, error_message + + raise error_message unless terminate + + exit status.exitstatus + end + + chomp ? stdout.chomp : stdout +end + +def kubectl(arguments, namespace: nil, kubeconfig: KUBECONFIG) + kubeconfig = [kubeconfig].flatten.find { |path| !path.nil? && File.exist?(path) } + command = ['/var/lib/rancher/rke2/bin/kubectl'] + command << "--kubeconfig #{kubeconfig}" unless kubeconfig.nil? + command << "--namespace #{namespace}" unless namespace.nil? + command << arguments + bash command.flatten.join(' ') +end + +def kubectl_get_nodes + JSON.parse kubectl 'get nodes -o json' +end + +def kubectl_get_configmap(name, namespace: 'kube-system', kubeconfig: KUBECONFIG) + YAML.safe_load kubectl <<~COMMAND, namespace: namespace, kubeconfig: kubeconfig + get configmap/#{name} -o yaml + COMMAND +end + +def kubectl_apply_f(path, kubeconfig: KUBECONFIG) + kubectl "apply -f #{path}", kubeconfig: kubeconfig +end + +def kubectl_apply(manifest, kubeconfig: KUBECONFIG) + Tempfile.create do |temp_file| + temp_file.write manifest + temp_file.close + return kubectl_apply_f temp_file.path, kubeconfig: kubeconfig + end +end + +def pull_docker_images(images, dest_dir) + images.each do |image| + name, tag = image.split ':' + + path = "#{dest_dir}/#{name.gsub '/', '_'}.tar.zst" + + next if File.exist? path + + msg :info, "Pull #{name}:#{tag} -> #{path}" + + FileUtils.mkdir_p dest_dir + + bash <<~SCRIPT + skopeo copy 'docker://#{name}:#{tag}' 'docker-archive:/dev/fd/2:#{name}:#{tag}' 3>&1 1>&2 2>&3 \ + | zstd --ultra -o '#{path}' + SCRIPT + end +end + +def extract_images(manifest) + images = [] + + YAML.load_stream manifest do |document| + next if document.nil? + + if document.dig('kind') == 'HelmChart' + # NOTE: Aassuming all one-*.yaml manifests contain chartContent: and valuesContent: fields. + chart_tgz = Base64.decode64 document.dig('spec', 'chartContent') + values_yml = document.dig('spec', 'valuesContent') + + Dir.mktmpdir do |temp_dir| + file "#{temp_dir}/chart.tgz", chart_tgz, overwrite: true + file "#{temp_dir}/values.yml", values_yml, overwrite: true + images += extract_images bash("helm template '#{temp_dir}/chart.tgz' -f '#{temp_dir}/values.yml'") + end + + next + end + + containers = [] + containers += document.dig('spec', 'template', 'spec', 'containers') || [] + containers += document.dig('spec', 'template', 'spec', 'initContainers') || [] + containers += document.dig('spec', 'jobTemplate', 'spec', 'template', 'spec', 'containers') || [] + containers += document.dig('spec', 'jobTemplate', 'spec', 'template', 'spec', 'initContainers') || [] + + images += containers.map { |container| container.dig 'image' } + end + + images.uniq +end + +def pull_addon_images(addon_dir = ONE_ADDON_DIR, airgap_dir = ONE_AIRGAP_DIR) + Dir["#{addon_dir}/one-*.yaml"].each do |path| + manifest = File.read path + pull_docker_images extract_images(manifest), "#{airgap_dir}/#{File.basename(path, '.yaml')}/" + end +end + +# NOTE: This must be executed *before* starting rke2-server/agent services, +# otherwise images will not be loaded into containerd. +def include_images(name, airgap_dir = ONE_AIRGAP_DIR, image_dir = K8S_IMAGE_DIR) + FileUtils.mkdir_p image_dir + Dir["#{airgap_dir}/#{name}/*.tar.zst"].each do |path| + msg :info, "Include airgapped image: #{File.basename(path)}" + symlink = "#{image_dir}/#{File.basename(path)}" + File.symlink path, symlink unless File.exist? symlink + end +end + +# NOTE: This must be executed *after* starting rke2-server/agent services. +def include_manifests(name, addon_dir = ONE_ADDON_DIR, manifest_dir = K8S_MANIFEST_DIR) + FileUtils.mkdir_p manifest_dir + Dir["#{addon_dir}/#{name}*.yaml"].each do |path| + msg :info, "Include addon: #{File.basename(path)}" + symlink = "#{manifest_dir}/#{File.basename(path)}" + File.symlink path, symlink unless File.exist? symlink + end +end + +def with_policy_rc_d_disabled + file '/usr/sbin/policy-rc.d', 'exit 101', mode: 'a+x', overwrite: true + yield +ensure + file '/usr/sbin/policy-rc.d', 'exit 0', mode: 'a+x', overwrite: true +end + +def install_packages(packages, hold: false) + msg :info, "Install APT packages: #{packages.join(',')}" + + puts bash <<~SCRIPT + apt-get install -y #{packages.join(' ')} + SCRIPT + + bash <<~SCRIPT if hold + apt-mark hold #{packages.join(' ')} + SCRIPT +end + +def ipv4?(string) + string.is_a?(String) && IPAddr.new(string) ? true : false +rescue IPAddr::InvalidAddressError + false +end + +def integer?(string) + Integer(string) ? true : false +rescue ArgumentError + false +end + +alias port? integer? + +def tcp_port_open?(ipv4, port, seconds = 5) + # > If a block is given, the block is called with the socket. + # > The value of the block is returned. + # > The socket is closed when this method returns. + Socket.tcp(ipv4, port, connect_timeout: seconds) {} + true +rescue Errno::ECONNREFUSED, Errno::ECONNRESET, Errno::EHOSTUNREACH, Errno::ETIMEDOUT + false +end + +def http_status_200?(url, + cacert = '/var/lib/rancher/rke2/server/tls/server-ca.crt', + cert = '/var/lib/rancher/rke2/server/tls/client-admin.crt', + key = '/var/lib/rancher/rke2/server/tls/client-admin.key', + seconds = 5) + + url = URI.parse url + http = Net::HTTP.new url.host, url.port + + if url.scheme == 'https' + http.use_ssl = true + http.verify_mode = OpenSSL::SSL::VERIFY_PEER + http.ca_file = cacert + http.cert = OpenSSL::X509::Certificate.new File.read cert + http.key = OpenSSL::PKey::EC.new File.read key + end + + http.open_timeout = seconds + + http.get(url.path).code == '200' +rescue Errno::ECONNREFUSED, Errno::ECONNRESET, Errno::EHOSTUNREACH, Errno::ETIMEDOUT, Net::OpenTimeout + false +end diff --git a/appliances/OneKE/appliance/helpers_spec.rb b/appliances/OneKE/appliance/helpers_spec.rb new file mode 100644 index 00000000..51e1f22f --- /dev/null +++ b/appliances/OneKE/appliance/helpers_spec.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +require 'rspec' + +require_relative 'helpers.rb' + +RSpec.describe 'bash' do + it 'should raise' do + allow(self).to receive(:exit).and_return nil + expect { bash 'false', terminate: false }.to raise_error(RuntimeError) + end + it 'should not raise' do + allow(self).to receive(:exit).and_return nil + expect { bash 'false' }.not_to raise_error + end +end + +RSpec.describe 'ipv4?' do + it 'should evaluate to true' do + ipv4s = %w[ + 10.11.12.13 + 10.11.12.13/24 + 10.11.12.13/32 + 192.168.144.120 + ] + ipv4s.each do |item| + expect(ipv4?(item)).to be true + end + end + it 'should evaluate to false' do + ipv4s = %w[ + 10.11.12 + 10.11.12. + 10.11.12.256 + asd.168.144.120 + 192.168.144.96-192.168.144.120 + ] + ipv4s.each do |item| + expect(ipv4?(item)).to be false + end + end +end diff --git a/appliances/OneKE/appliance/kubernetes.rb b/appliances/OneKE/appliance/kubernetes.rb new file mode 100644 index 00000000..93fc0e7d --- /dev/null +++ b/appliances/OneKE/appliance/kubernetes.rb @@ -0,0 +1,312 @@ +# frozen_string_literal: true + +require 'securerandom' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' +require_relative 'onegate.rb' +require_relative 'vnf.rb' + +def install_kubernetes(airgap_dir = ONE_AIRGAP_DIR) + rke2_release_url = "https://github.com/rancher/rke2/releases/download/#{ONE_SERVICE_RKE2_VERSION}" + + msg :info, "Install RKE2 runtime: #{ONE_SERVICE_RKE2_VERSION}" + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2.linux-amd64.tar.gz' | tar -xz -f- -C /usr/local/ + SCRIPT + + msg :info, "Download RKE2 airgapped image archives: #{ONE_SERVICE_RKE2_VERSION}" + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2-images-core.linux-amd64.tar.zst' \ + | install -o 0 -g 0 -m u=rw,go=r -D /dev/fd/0 '#{airgap_dir}/rke2-images-core/rke2-images-core.linux-amd64.tar.zst' + SCRIPT + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2-images-multus.linux-amd64.tar.zst' \ + | install -o 0 -g 0 -m u=rw,go=r -D /dev/fd/0 '#{airgap_dir}/rke2-images-multus/rke2-images-multus.linux-amd64.tar.zst' + SCRIPT + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2-images-cilium.linux-amd64.tar.zst' \ + | install -o 0 -g 0 -m u=rw,go=r -D /dev/fd/0 '#{airgap_dir}/rke2-images-cilium/rke2-images-cilium.linux-amd64.tar.zst' + SCRIPT + + msg :info, "Install Helm binary: #{ONE_SERVICE_HELM_VERSION}" + bash <<~SCRIPT + curl -fsSL 'https://get.helm.sh/helm-v#{ONE_SERVICE_HELM_VERSION}-linux-amd64.tar.gz' \ + | tar -xOz -f- linux-amd64/helm \ + | install -o 0 -g 0 -m u=rwx,go=rx -D /dev/fd/0 /usr/local/bin/helm + SCRIPT + + msg :info, 'Link kubectl binary' + File.symlink '/var/lib/rancher/rke2/bin/kubectl', '/usr/local/bin/kubectl' + + msg :info, 'Link crictl binary' + File.symlink '/var/lib/rancher/rke2/bin/crictl', '/usr/local/bin/crictl' + + msg :info, 'Set BASH profile defaults' + file '/etc/profile.d/98-oneke.sh', <<~PROFILE, mode: 'u=rw,go=r' + export KUBECONFIG=/etc/rancher/rke2/rke2.yaml + export CRI_CONFIG_FILE=/var/lib/rancher/rke2/agent/etc/crictl.yaml + PROFILE +end + +def configure_kubernetes(configure_cni: ->{}, configure_addons: ->{}) + node = detect_node + + if node[:init_master] + configure_cni.() + init_master + configure_addons.() + elsif node[:join_master] + configure_cni.() + join_master node[:token] + configure_addons.() + elsif node[:join_worker] + join_worker node[:token] + elsif node[:join_storage] + join_storage node[:token] + end + + node +end + +def wait_for_any_master(retries = RETRIES, seconds = SECONDS) + msg :info, 'Wait for any master to be available' + + retries.times.to_a.reverse.each do |retry_num| + msg :debug, "wait_for_any_master / #{retry_num}" + + master_vms_show.each do |master_vm| + ready = master_vm.dig 'VM', 'USER_TEMPLATE', 'READY' + next unless ready == 'YES' + + # Not using the CP/EP here, only a direct validation without going through VNF/LB. + # The first responding master wins. + + k8s_master = master_vm.dig 'VM', 'USER_TEMPLATE', 'ONEGATE_K8S_MASTER' + next if k8s_master.nil? + + return master_vm if tcp_port_open? k8s_master, 6443 + end + + if retry_num.zero? + msg :error, 'No usable master found' + exit 1 + end + + sleep seconds + end +end + +def wait_for_control_plane(endpoint = K8S_CONTROL_PLANE_EP, retries = RETRIES, seconds = SECONDS) + msg :info, 'Wait for Control-Plane to be ready' + + retries.times.to_a.reverse.each do |retry_num| + msg :debug, "wait_for_control_plane / #{retry_num}" + + break if http_status_200? "https://#{endpoint}/readyz" + + if retry_num.zero? + msg :error, 'Control-Plane not ready' + exit 1 + end + + sleep seconds + end +end + +def wait_for_kubelets(retries = RETRIES, seconds = SECONDS) + msg :info, 'Wait for available Kubelets to be ready' + + retries.times.to_a.reverse.each do |retry_num| + msg :debug, "wait_for_kubelets / #{retry_num}" + + conditions = kubectl_get_nodes['items'].map do |node| + node.dig('status', 'conditions').find do |item| + item['reason'] == 'KubeletReady' && item['type'] == 'Ready' && item['status'] == 'True' + end + end + + break if conditions.all? + + if retry_num.zero? + msg :error, 'Kubelets not ready' + exit 1 + end + + sleep seconds + end +end + +def init_master + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + msg :info, 'Set this master to be the first VNF backend' + vnf_supervisor_setup_backend + vnf_control_plane_setup_backend + + cni = [] + cni << 'multus' if ONEAPP_K8S_MULTUS_ENABLED + cni << ONEAPP_K8S_CNI_PLUGIN + + server_config = { + 'node-name' => name, + 'token' => SecureRandom.uuid, + 'tls-san' => ONEAPP_K8S_EXTRA_SANS.split(',').map(&:strip).append(ONEAPP_VROUTER_ETH0_VIP0), + 'node-taint' => ['CriticalAddonsOnly=true:NoExecute'], + 'disable' => ['rke2-ingress-nginx'], + 'cni' => cni, + 'disable-kube-proxy' => ONEAPP_K8S_CNI_PLUGIN == 'cilium' + } + + msg :info, 'Prepare initial rke2-server config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(server_config), overwrite: false + + msg :info, "Initialize first master: #{name}" + bash 'systemctl enable rke2-server.service --now' + + server_config.merge!({ + 'server' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => File.read('/var/lib/rancher/rke2/server/node-token', encoding: 'utf-8').strip + }) + + msg :info, 'Normalize rke2-server config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(server_config), overwrite: true + + onegate_vm_update ["ONEGATE_K8S_MASTER=#{ipv4}", "ONEGATE_K8S_TOKEN=#{server_config['token']}"] + + wait_for_control_plane + wait_for_kubelets +end + +def join_master(token, retries = RETRIES, seconds = SECONDS) + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + cni = [] + cni << 'multus' if ONEAPP_K8S_MULTUS_ENABLED + cni << ONEAPP_K8S_CNI_PLUGIN + + server_config = { + 'node-name' => name, + 'server' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => token, + 'tls-san' => ONEAPP_K8S_EXTRA_SANS.split(',').map(&:strip).append(ONEAPP_VROUTER_ETH0_VIP0), + 'node-taint' => ['CriticalAddonsOnly=true:NoExecute'], + 'disable' => ['rke2-ingress-nginx'], + 'cni' => cni, + 'disable-kube-proxy' => ONEAPP_K8S_CNI_PLUGIN == 'cilium' + } + + msg :info, 'Prepare rke2-server config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(server_config), overwrite: true + + # The rke2-server systemd service restarts automatically and eventually joins. + # If it really cannot join we want to reflect this in OneFlow. + retries.times.to_a.reverse.each do |retry_num| + if retry_num.zero? + msg :error, 'Unable to join Control-Plane' + exit 1 + end + begin + msg :info, "Join master: #{name} / #{retry_num}" + bash 'systemctl enable rke2-server.service --now', terminate: false + rescue RuntimeError + sleep seconds + next + end + break + end + + onegate_vm_update ["ONEGATE_K8S_MASTER=#{ipv4}", "ONEGATE_K8S_TOKEN=#{server_config['token']}"] + + msg :info, 'Set this master to be a VNF backend' + vnf_supervisor_setup_backend + vnf_control_plane_setup_backend + + wait_for_control_plane + wait_for_kubelets +end + +def join_worker(token) + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + agent_config = { + 'node-name' => name, + 'server' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => token + } + + msg :info, 'Prepare rke2-agent config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(agent_config), overwrite: true + + msg :info, "Join worker: #{name}" + bash 'systemctl enable rke2-agent.service --now' +end + +def join_storage(token) + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + agent_config = { + 'node-name' => name, + 'server ' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => token, + 'node-taint' => ['node.longhorn.io/create-default-disk=true:NoSchedule'], + 'node-label' => ['node.longhorn.io/create-default-disk=true'] + } + + msg :info, 'Prepare rke2-agent config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(agent_config), overwrite: true + + msg :info, "Join storage: #{name}" + bash 'systemctl enable rke2-agent.service --now' +end + +def detect_node + current_vm = onegate_vm_show + current_vmid = current_vm.dig 'VM', 'ID' + current_role = current_vm.dig 'VM', 'USER_TEMPLATE', 'ROLE_NAME' + + master_vm = master_vm_show + master_vmid = master_vm.dig 'VM', 'ID' + + master_vm = wait_for_any_master if current_vmid != master_vmid + + token = master_vm.dig 'VM', 'USER_TEMPLATE', 'ONEGATE_K8S_TOKEN' + + ready_to_join = !token.nil? + + results = { + init_master: current_role == 'master' && current_vmid == master_vmid && !ready_to_join, + join_master: current_role == 'master' && current_vmid != master_vmid && ready_to_join, + join_worker: current_role == 'worker' && current_vmid != master_vmid && ready_to_join, + join_storage: current_role == 'storage' && current_vmid != master_vmid && ready_to_join, + token: token + } + + msg :debug, "detect_node / #{results}" + results +end diff --git a/appliances/OneKE/appliance/longhorn.rb b/appliances/OneKE/appliance/longhorn.rb new file mode 100644 index 00000000..68d26dbf --- /dev/null +++ b/appliances/OneKE/appliance/longhorn.rb @@ -0,0 +1,143 @@ +# frozen_string_literal: true + +require 'base64' +require 'tmpdir' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def install_longhorn(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install Longhorn' + fetch_longhorn addon_dir + pull_longhorn_images if ONE_SERVICE_AIRGAPPED +end + +def prepare_dedicated_storage + msg :info, 'Setup dedicated storage and populate /etc/fstab' + + # Previously executed in a start script, moved here because the start script was causing race condition issues. + puts bash <<~SCRIPT + # Silently abort when there is no disk attached. + if ! lsblk -n -o name '#{ONEAPP_STORAGE_DEVICE}'; then exit 0; fi + + # Make sure mountpoint exists. + install -o 0 -g 0 -m u=rwx,go=rx -d '#{ONEAPP_STORAGE_MOUNTPOINT}' + + # Silently abort when mountpoint is taken. + if mountpoint '#{ONEAPP_STORAGE_MOUNTPOINT}'; then exit 0; fi + + # Create new filesystem if the device does not contain any. + if ! blkid -s TYPE -o value '#{ONEAPP_STORAGE_DEVICE}'; then + 'mkfs.#{ONEAPP_STORAGE_FILESYSTEM}' '#{ONEAPP_STORAGE_DEVICE}' + fi + + export STORAGE_UUID=$(blkid -s UUID -o value '#{ONEAPP_STORAGE_DEVICE}') + # Assert that the detected UUID is not empty. + if [[ -z "$STORAGE_UUID" ]]; then exit 1; fi + + # Update fstab if necessary. + gawk -i inplace -f- /etc/fstab <s" + valuesContent: | + defaultSettings: + createDefaultDiskLabeledNodes: true + taintToleration: "node.longhorn.io/create-default-disk=true:NoSchedule" + longhornManager: + tolerations: + - key: node.longhorn.io/create-default-disk + value: "true" + operator: Equal + effect: NoSchedule + longhornDriver: + tolerations: + - key: node.longhorn.io/create-default-disk + value: "true" + operator: Equal + effect: NoSchedule + nodeSelector: + node.longhorn.io/create-default-disk: "true" + longhornUI: + tolerations: + - key: node.longhorn.io/create-default-disk + value: "true" + operator: Equal + effect: NoSchedule + nodeSelector: + node.longhorn.io/create-default-disk: "true" + --- + # Please note, changing default storage class is discouraged: https://longhorn.io/docs/1.3.0/best-practices/#storageclass + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: longhorn-retain + provisioner: driver.longhorn.io + allowVolumeExpansion: true + reclaimPolicy: Retain + volumeBindingMode: Immediate + parameters: + fsType: "ext4" + numberOfReplicas: "3" + staleReplicaTimeout: "2880" + fromBackup: "" + MANIFEST + + msg :info, "Generate Longhorn addon manifest: #{ONEAPP_K8S_LONGHORN_CHART_VERSION}" + Dir.mktmpdir do |temp_dir| + bash <<~SCRIPT + cd #{temp_dir}/ + helm pull longhorn/longhorn --version '#{ONEAPP_K8S_LONGHORN_CHART_VERSION}' + SCRIPT + + manifest %= { chart_b64: slurp("#{temp_dir}/longhorn-#{ONEAPP_K8S_LONGHORN_CHART_VERSION}.tgz") } + + file "#{addon_dir}/one-longhorn.yaml", manifest, overwrite: true + end +end + +def pull_longhorn_images(airgap_dir = ONE_AIRGAP_DIR) + # https://longhorn.io/docs/1.3.0/advanced-resources/deploy/airgap/ + + msg :info, "Pull Longhorn images: #{ONEAPP_K8S_LONGHORN_CHART_VERSION}" + + images = bash <<~SCRIPT, chomp: true + curl -fsSL 'https://raw.githubusercontent.com/longhorn/longhorn/v#{ONEAPP_K8S_LONGHORN_CHART_VERSION}/deploy/longhorn-images.txt' + SCRIPT + + images = images.lines + .map(&:strip) + .reject(&:empty?) + + pull_docker_images images, "#{airgap_dir}/one-longhorn/" +end diff --git a/appliances/OneKE/appliance/metallb.rb b/appliances/OneKE/appliance/metallb.rb new file mode 100644 index 00000000..d290d40b --- /dev/null +++ b/appliances/OneKE/appliance/metallb.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +require 'base64' +require 'tmpdir' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def install_metallb(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install MetalLB' + fetch_metallb addon_dir +end + +def configure_metallb(addon_dir = ONE_ADDON_DIR) + msg :info, 'Configure MetalLB' + + if ONEAPP_K8S_METALLB_CONFIG.nil? + msg :info, 'Create MetalLB CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: [] + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: [default] + MANIFEST + + unless ONEAPP_K8S_METALLB_RANGES.empty? + ip_address_pool = documents.find do |doc| + doc['kind'] == 'IPAddressPool' && doc.dig('metadata', 'name') == 'default' + end + ip_address_pool['spec']['addresses'] = extract_metallb_ranges.map { |item| item.join('-') } + end + else + msg :info, 'Use MetalLB user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_METALLB_CONFIG + end + + msg :info, 'Generate MetalLB config manifest' + manifest = YAML.dump_stream *documents + file "#{addon_dir}/one-metallb-config.yaml", manifest, overwrite: true +end + +def fetch_metallb(addon_dir = ONE_ADDON_DIR) + bash <<~SCRIPT + helm repo add metallb https://metallb.github.io/metallb + helm repo update + SCRIPT + + manifest = <<~MANIFEST + --- + apiVersion: v1 + kind: Namespace + metadata: + name: metallb-system + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: one-metallb + namespace: kube-system + spec: + bootstrap: false + targetNamespace: metallb-system + chartContent: "%s" + valuesContent: | + controller: + image: + pullPolicy: IfNotPresent + speaker: + image: + pullPolicy: IfNotPresent + MANIFEST + + msg :info, "Generate MetalLB addon manifest: #{ONEAPP_K8S_METALLB_CHART_VERSION}" + Dir.mktmpdir do |temp_dir| + bash <<~SCRIPT + cd #{temp_dir}/ + helm pull metallb/metallb --version '#{ONEAPP_K8S_METALLB_CHART_VERSION}' + SCRIPT + + manifest %= { chart_b64: slurp("#{temp_dir}/metallb-#{ONEAPP_K8S_METALLB_CHART_VERSION}.tgz") } + + file "#{addon_dir}/one-metallb.yaml", manifest, overwrite: true + end +end + +def extract_metallb_ranges(ranges = ONEAPP_K8S_METALLB_RANGES) + ranges.compact + .map(&:strip) + .reject(&:empty?) + .map { |item| item.split('-').map(&:strip) } + .reject { |item| item.length > 2 } + .map { |item| item.length == 1 ? [item.first, item.first] : item } + .reject { |item| item.map(&:empty?).any? } + .reject { |item| !(ipv4?(item.first) && ipv4?(item.last)) } +end diff --git a/appliances/OneKE/appliance/metallb_spec.rb b/appliances/OneKE/appliance/metallb_spec.rb new file mode 100644 index 00000000..89e4b353 --- /dev/null +++ b/appliances/OneKE/appliance/metallb_spec.rb @@ -0,0 +1,136 @@ +# frozen_string_literal: true + +require 'base64' +require 'rspec' +require 'tmpdir' +require 'yaml' + +require_relative 'metallb.rb' + +RSpec.describe 'extract_metallb_ranges' do + it 'should extract and return all ranges (positive)' do + input = [ + '10.11.12.13', + '10.11.12.13-', + '10.11.12.13-10.11.12.31', + ' 10.11.12.13-10.11.12.31', + '10.11.12.13-10.11.12.31 ', + '10.11.12.13 -10.11.12.31', + '10.11.12.13- 10.11.12.31' + ] + output = [ + %w[10.11.12.13 10.11.12.13], + %w[10.11.12.13 10.11.12.13], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31] + ] + expect(extract_metallb_ranges(input)).to eq output + end + + it 'should extract and return no ranges (negative)' do + input = [ + '', + '-10.11.12.13', + 'asd.11.12.13-10.11.12.31', + '10.11.12.13-10.11.12.31-10.11.12.123' + ] + output = [] + expect(extract_metallb_ranges(input)).to eq output + end +end + +RSpec.describe 'configure_metallb' do + it 'should apply user-defined ranges (empty)' do + stub_const 'ONEAPP_K8S_METALLB_CONFIG', nil + stub_const 'ONEAPP_K8S_METALLB_RANGES', [] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: [] + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: + - default + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_metallb temp_dir + result = YAML.load_stream File.read "#{temp_dir}/one-metallb-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined ranges' do + stub_const 'ONEAPP_K8S_METALLB_CONFIG', nil + stub_const 'ONEAPP_K8S_METALLB_RANGES', ['192.168.150.87-192.168.150.88'] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: + - 192.168.150.87-192.168.150.88 + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: + - default + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_metallb temp_dir + result = YAML.load_stream File.read "#{temp_dir}/one-metallb-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined config manifest (and ignore user-defined ranges)' do + manifest = <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: + - 192.168.150.87-192.168.150.88 + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: + - default + MANIFEST + stub_const 'ONEAPP_K8S_METALLB_CONFIG', Base64.encode64(manifest) + stub_const 'ONEAPP_K8S_METALLB_RANGES', ['1.2.3.4-1.2.3.4'] + output = YAML.load_stream manifest + Dir.mktmpdir do |temp_dir| + configure_metallb temp_dir + result = YAML.load_stream File.read "#{temp_dir}/one-metallb-config.yaml" + expect(result).to eq output + end + end + +end diff --git a/appliances/OneKE/appliance/multus.rb b/appliances/OneKE/appliance/multus.rb new file mode 100644 index 00000000..ee040e1b --- /dev/null +++ b/appliances/OneKE/appliance/multus.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +require 'base64' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_multus(manifest_dir = K8S_MANIFEST_DIR) + msg :info, 'Configure Multus' + + if ONEAPP_K8S_MULTUS_CONFIG.nil? + msg :info, 'Create Multus CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-multus + namespace: kube-system + spec: + valuesContent: |- + rke2-whereabouts: + enabled: true + MANIFEST + else + msg :info, 'Use Multus user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_MULTUS_CONFIG + end + + msg :info, 'Generate Multus config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-multus-config.yaml", manifest, overwrite: true +end diff --git a/appliances/OneKE/appliance/onegate.rb b/appliances/OneKE/appliance/onegate.rb new file mode 100644 index 00000000..f1c7e511 --- /dev/null +++ b/appliances/OneKE/appliance/onegate.rb @@ -0,0 +1,133 @@ +# frozen_string_literal: true + +require 'json' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def onegate_service_show + JSON.parse bash 'onegate --json service show' +end + +def onegate_vm_show(vmid = '') + JSON.parse bash "onegate --json vm show #{vmid}" +end + +def onegate_vm_update(data, vmid = '') + bash "onegate vm update #{vmid} --data \"#{data.join('\n')}\"" +end + +def ip_addr_show(ifname = '') + JSON.parse bash "ip --json addr show #{ifname}" +end + +def all_vms_show + onegate_service = onegate_service_show + + roles = onegate_service.dig 'SERVICE', 'roles' + if roles.empty? + msg :error, 'No roles found in Onegate' + exit 1 + end + + vmids = roles.each_with_object [] do |role, acc| + nodes = role.dig 'nodes' + next if nodes.nil? + + nodes.each do |node| + acc << node.dig('vm_info', 'VM', 'ID') + end + end + + vmids.each_with_object [] do |vmid, acc| + acc << onegate_vm_show(vmid) + end +end + +def master_vms_show + onegate_service = onegate_service_show + + roles = onegate_service.dig 'SERVICE', 'roles' + if roles.empty? + msg :error, 'No roles found in Onegate' + exit 1 + end + + role = roles.find { |item| item['name'] == 'master' } + if role.nil? + msg :error, 'No master role found in Onegate' + exit 1 + end + + nodes = role.dig 'nodes' + if nodes.empty? + msg :error, 'No master nodes found in Onegate' + exit 1 + end + + vmids = nodes.map { |node| node.dig 'vm_info', 'VM', 'ID' } + + vmids.each_with_object [] do |vmid, acc| + acc << onegate_vm_show(vmid) + end +end + +def master_vm_show + onegate_service = onegate_service_show + + roles = onegate_service.dig 'SERVICE', 'roles' + if roles.empty? + msg :error, 'No roles found in Onegate' + exit 1 + end + + role = roles.find { |item| item['name'] == 'master' } + if role.nil? + msg :error, 'No master role found in Onegate' + exit 1 + end + + nodes = role.dig 'nodes' + if nodes.empty? + msg :error, 'No nodes found in Onegate' + exit 1 + end + + vmid = nodes.first.dig 'vm_info', 'VM', 'ID' + + onegate_vm_show vmid +end + +def external_ipv4s + onegate_vm = onegate_vm_show + + nics = onegate_vm.dig 'VM', 'TEMPLATE', 'NIC' + if nics.empty? + msg :error, 'No nics found in Onegate' + exit 1 + end + + ip_addr = ip_addr_show + if ip_addr.empty? + msg :error, 'No local addresses found' + exit 1 + end + + ipv4s = nics.each_with_object [] do |nic, acc| + addr = ip_addr.find do |item| + next unless item['address'].downcase == nic['MAC'].downcase + + item['addr_info'].find do |info| + info['family'] == 'inet' && info['local'] == nic['IP'] + end + end + acc << nic['IP'] unless addr.nil? + end + + if ipv4s.empty? + msg :error, 'No IPv4 addresses found' + exit 1 + end + + ipv4s +end diff --git a/appliances/OneKE/appliance/onegate_spec.rb b/appliances/OneKE/appliance/onegate_spec.rb new file mode 100644 index 00000000..67aa0e7e --- /dev/null +++ b/appliances/OneKE/appliance/onegate_spec.rb @@ -0,0 +1,559 @@ +# frozen_string_literal: true + +require 'json' +require 'rspec' + +require_relative 'onegate.rb' + +RSpec.describe 'all_vms_show' do + before do + @svc = JSON.parse(<<~JSON) + { + "SERVICE": { + "name": "asd", + "id": "21", + "state": 2, + "roles": [ + { + "name": "master", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 49, + "running": null, + "vm_info": { + "VM": { + "ID": "49", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_0_(service_21)" + } + } + } + ] + }, + { + "name": "worker", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 50, + "running": null, + "vm_info": { + "VM": { + "ID": "50", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "worker_0_(service_21)" + } + } + } + ] + }, + { + "name": "storage", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 51, + "running": null, + "vm_info": { + "VM": { + "ID": "51", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "storage_0_(service_21)" + } + } + } + ] + } + ] + } + } + JSON + @vms = [] + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_0_(service_21)", + "ID": "49", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_HASH": "09a9ed140fec2fa1a2281a3125952d6f2951b67a67534647b0a606ae2d478f60", + "ONEGATE_K8S_MASTER": "172.20.0.100", + "ONEGATE_K8S_TOKEN": "sg7711.p19vy0eqxefc0lqz", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.100", + "MAC": "02:00:ac:14:00:64", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "worker_0_(service_21)", + "ID": "50", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "READY": "YES", + "ROLE_NAME": "worker", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.101", + "MAC": "02:00:ac:14:00:65", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "storage_0_(service_21)", + "ID": "51", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "READY": "YES", + "ROLE_NAME": "storage", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.102", + "MAC": "02:00:ac:14:00:66", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + end + it 'should return all vms belonging to svc' do + allow(self).to receive(:onegate_service_show).and_return(@svc) + allow(self).to receive(:onegate_vm_show).and_return(*@vms) + expect(all_vms_show.map { |item| item['VM']['TEMPLATE']['NIC'][0]['IP'] }).to eq ['172.20.0.100', '172.20.0.101', '172.20.0.102'] + end +end + +RSpec.describe 'master_vms_show' do + before do + @svc = JSON.parse(<<~JSON) + { + "SERVICE": { + "name": "asd", + "id": "4", + "state": 10, + "roles": [ + { + "name": "vnf", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 12, + "running": null, + "vm_info": { + "VM": { + "ID": "12", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "vnf_0_(service_4)" + } + } + } + ] + }, + { + "name": "master", + "cardinality": 3, + "state": 10, + "nodes": [ + { + "deploy_id": 13, + "running": null, + "vm_info": { + "VM": { + "ID": "13", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_0_(service_4)" + } + } + }, + { + "deploy_id": 14, + "running": null, + "vm_info": { + "VM": { + "ID": "14", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_1_(service_4)" + } + } + }, + { + "deploy_id": 15, + "running": null, + "vm_info": { + "VM": { + "ID": "15", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_2_(service_4)" + } + } + } + ] + }, + { + "name": "worker", + "cardinality": 0, + "state": 2, + "nodes": [] + }, + { + "name": "storage", + "cardinality": 0, + "state": 2, + "nodes": [] + } + ] + } + } + JSON + @vms = [] + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_0_(service_4)", + "ID": "13", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_PORT,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_HASH": "c74201821cb4878b6896d3284f825be738cb11dbc2c5153e88c84da0b3d3ab04", + "ONEGATE_K8S_KEY": "146ecb3e9d8bce9f584f55b234bd2700d2a7747177fb8fd60f42a161a48e7c07", + "ONEGATE_K8S_MASTER": "10.2.11.201", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-10-2-11-201", + "ONEGATE_K8S_TOKEN": "ifv2c4.h8d88lzjlyl5mkod", + "ONEGATE_LB0_IP": "10.2.11.86", + "ONEGATE_LB0_PORT": "6443", + "ONEGATE_LB0_PROTOCOL": "TCP", + "ONEGATE_LB0_SERVER_HOST": "10.2.11.201", + "ONEGATE_LB0_SERVER_PORT": "6443", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "4", + "USER_INPUTS": { + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "10.2.11.201", + "MAC": "02:00:0a:02:0b:c9", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_1_(service_4)", + "ID": "14", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_PORT,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-10-2-11-202", + "ONEGATE_LB0_IP": "10.2.11.86", + "ONEGATE_LB0_PORT": "6443", + "ONEGATE_LB0_PROTOCOL": "TCP", + "ONEGATE_LB0_SERVER_HOST": "10.2.11.202", + "ONEGATE_LB0_SERVER_PORT": "6443", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "4", + "USER_INPUTS": { + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "10.2.11.202", + "MAC": "02:00:0a:02:0b:ca", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_2_(service_4)", + "ID": "15", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_PORT,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-10-2-11-203", + "ONEGATE_LB0_IP": "10.2.11.86", + "ONEGATE_LB0_PORT": "6443", + "ONEGATE_LB0_PROTOCOL": "TCP", + "ONEGATE_LB0_SERVER_HOST": "10.2.11.203", + "ONEGATE_LB0_SERVER_PORT": "6443", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "4", + "USER_INPUTS": { + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "10.2.11.203", + "MAC": "02:00:0a:02:0b:cb", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + end + it 'should return all vms belonging to the master role' do + allow(self).to receive(:onegate_service_show).and_return(@svc) + allow(self).to receive(:onegate_vm_show).and_return(*@vms) + expect(master_vms_show.map { |item| item['VM']['TEMPLATE']['NIC'][0]['IP'] }).to eq ['10.2.11.201', '10.2.11.202', '10.2.11.203'] + end +end + +RSpec.describe 'external_ipv4s' do + it 'should return list of ipv4 addresses' do + allow(self).to receive(:onegate_vm_show).and_return JSON.parse <<~JSON + { + "VM": { + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.100", + "MAC": "02:00:ac:14:00:64", + "NAME": "_NIC0", + "NETWORK": "service" + } + ] + } + } + } + JSON + allow(self).to receive(:ip_addr_show).and_return JSON.parse <<~JSON + [ + { + "ifindex": 1, + "ifname": "lo", + "flags": [ + "LOOPBACK", + "UP", + "LOWER_UP" + ], + "mtu": 65536, + "qdisc": "noqueue", + "operstate": "UNKNOWN", + "group": "default", + "txqlen": 1000, + "link_type": "loopback", + "address": "00:00:00:00:00:00", + "broadcast": "00:00:00:00:00:00", + "addr_info": [ + { + "family": "inet", + "local": "127.0.0.1", + "prefixlen": 8, + "scope": "host", + "label": "lo", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + }, + { + "family": "inet6", + "local": "::1", + "prefixlen": 128, + "scope": "host", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + } + ] + }, + { + "ifindex": 2, + "ifname": "eth0", + "flags": [ + "BROADCAST", + "MULTICAST", + "UP", + "LOWER_UP" + ], + "mtu": 1500, + "qdisc": "pfifo_fast", + "operstate": "UP", + "group": "default", + "txqlen": 1000, + "link_type": "ether", + "address": "02:00:ac:14:00:64", + "broadcast": "ff:ff:ff:ff:ff:ff", + "addr_info": [ + { + "family": "inet", + "local": "172.20.0.100", + "prefixlen": 24, + "broadcast": "172.20.0.255", + "scope": "global", + "label": "eth0", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + }, + { + "family": "inet6", + "local": "fe80::acff:fe14:64", + "prefixlen": 64, + "scope": "link", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + } + ] + }, + { + "ifindex": 3, + "ifname": "docker0", + "flags": [ + "NO-CARRIER", + "BROADCAST", + "MULTICAST", + "UP" + ], + "mtu": 1500, + "qdisc": "noqueue", + "operstate": "DOWN", + "group": "default", + "link_type": "ether", + "address": "02:42:04:21:6f:5d", + "broadcast": "ff:ff:ff:ff:ff:ff", + "addr_info": [ + { + "family": "inet", + "local": "172.17.0.1", + "prefixlen": 16, + "broadcast": "172.17.255.255", + "scope": "global", + "label": "docker0", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + } + ] + } + ] + JSON + expect(external_ipv4s).to eq ['172.20.0.100'] + end +end diff --git a/appliances/OneKE/appliance/traefik.rb b/appliances/OneKE/appliance/traefik.rb new file mode 100644 index 00000000..7b52c373 --- /dev/null +++ b/appliances/OneKE/appliance/traefik.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +require 'base64' +require 'tmpdir' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def install_traefik(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install Traefik' + fetch_traefik addon_dir +end + +def fetch_traefik(addon_dir = ONE_ADDON_DIR) + bash <<~SCRIPT + helm repo add traefik https://helm.traefik.io/traefik + helm repo update + SCRIPT + + manifest = <<~MANIFEST + --- + apiVersion: v1 + kind: Namespace + metadata: + name: traefik-system + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: one-traefik + namespace: kube-system + spec: + bootstrap: false + targetNamespace: traefik-system + chartContent: "%s" + valuesContent: | + deployment: + replicas: 2 + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: traefik + service: + type: NodePort + ports: + web: + nodePort: 32080 + websecure: + nodePort: 32443 + MANIFEST + + msg :info, "Generate Traefik addon manifest: #{ONEAPP_K8S_TRAEFIK_CHART_VERSION}" + Dir.mktmpdir do |temp_dir| + bash <<~SCRIPT + cd #{temp_dir}/ + helm pull traefik/traefik --version '#{ONEAPP_K8S_TRAEFIK_CHART_VERSION}' + SCRIPT + + manifest %= { chart_b64: slurp("#{temp_dir}/traefik-#{ONEAPP_K8S_TRAEFIK_CHART_VERSION}.tgz") } + + file "#{addon_dir}/one-traefik.yaml", manifest, overwrite: true + end +end diff --git a/appliances/OneKE/appliance/vnf.rb b/appliances/OneKE/appliance/vnf.rb new file mode 100644 index 00000000..37af622a --- /dev/null +++ b/appliances/OneKE/appliance/vnf.rb @@ -0,0 +1,144 @@ +# frozen_string_literal: true + +require_relative 'config.rb' +require_relative 'helpers.rb' +require_relative 'onegate.rb' + +def configure_vnf(gw_ipv4 = ONEAPP_VROUTER_ETH1_VIP0) + gw_ok = !gw_ipv4.nil? && ipv4?(gw_ipv4) + + if gw_ok + msg :debug, 'Configure default gateway (temporarily)' + bash "ip route replace default via #{gw_ipv4} dev eth0" + end + + msg :info, 'Install the vnf-restore service' + + file '/etc/systemd/system/vnf-restore.service', <<~SERVICE + [Unit] + After=network.target + + [Service] + Type=oneshot + ExecStart=/bin/sh -ec '#{gw_ok ? "ip route replace default via #{gw_ipv4} dev eth0" : ':'}' + + [Install] + WantedBy=multi-user.target + SERVICE + + # Make sure vnf-restore is triggered everytime one-context-reconfigure.service runs + file '/etc/systemd/system/one-context-reconfigure.service.d/vnf-restore.conf', <<~SERVICE + [Service] + ExecStartPost=/usr/bin/systemctl restart vnf-restore.service + SERVICE + + msg :info, 'Enable and start the vnf-restore service' + bash <<~SCRIPT + systemctl daemon-reload + systemctl enable vnf-restore.service --now + SCRIPT +end + +def vnf_supervisor_setup_backend(index = 0, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = 9345) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{lb_port}" + ] +end + +def vnf_control_plane_setup_backend(index = 1, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = 6443) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{lb_port}" + ] +end + +def vnf_ingress_setup_https_backend(index = 2, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = ONEAPP_VNF_HAPROXY_LB2_PORT) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + server_port = lb_port.to_i + 32_000 + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{server_port}" + ] +end + +def vnf_ingress_setup_http_backend(index = 3, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = ONEAPP_VNF_HAPROXY_LB3_PORT) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + server_port = lb_port.to_i + 32_000 + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{server_port}" + ] +end diff --git a/appliances/lib/common.sh b/appliances/lib/common.sh new file mode 100644 index 00000000..253aeb72 --- /dev/null +++ b/appliances/lib/common.sh @@ -0,0 +1,503 @@ +#!/usr/bin/env bash + +# ---------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + + +# shellcheck disable=SC2086 +true + + +# args: +msg() +{ + msg_type="$1" + shift + + case "$msg_type" in + info) + printf "[%s] => " "$(date)" + echo 'INFO:' "$@" + ;; + debug) + printf "[%s] => " "$(date)" >&2 + echo 'DEBUG:' "$@" >&2 + ;; + warning) + printf "[%s] => " "$(date)" >&2 + echo 'WARNING [!]:' "$@" >&2 + ;; + error) + printf "[%s] => " "$(date)" >&2 + echo 'ERROR [!!]:' "$@" >&2 + return 1 + ;; + *) + printf "[%s] => " "$(date)" >&2 + echo 'UNKNOWN [?!]:' "$@" >&2 + return 2 + ;; + esac + return 0 +} + +# arg: +gen_password() +{ + pw_length="${1:-16}" + new_pw='' + + while true ; do + if command -v pwgen >/dev/null ; then + new_pw=$(pwgen -s "${pw_length}" 1) + break + elif command -v openssl >/dev/null ; then + new_pw="${new_pw}$(openssl rand -base64 ${pw_length} | tr -dc '[:alnum:]')" + else + new_pw="${new_pw}$(head /dev/urandom | tr -dc '[:alnum:]')" + fi + # shellcheck disable=SC2000 + [ "$(echo $new_pw | wc -c)" -ge "$pw_length" ] && break + done + + echo "$new_pw" | cut -c1-${pw_length} +} + +# arg: +is_ipv4_address() +{ + echo "$1" | grep '^[0-9.]*$' | awk ' + BEGIN { + FS = "."; + octet = 0; + } + { + for(i = 1; i <= NF; i++) + if (($i >= 0) && ($i <= 255)) + octet++; + } + END { + if (octet == 4) + exit 0; + else + exit 1; + }' +} + +get_local_ip() +{ + extif=$(ip r | awk '{if ($1 == "default") print $5;}') + local_ip=$(ip a show dev "$extif" | \ + awk '{if ($1 == "inet") print $2;}' | sed -e '/^127\./d' -e 's#/.*##') + + echo "${local_ip:-127.0.0.1}" +} + +# arg: +is_my_ip() +( + _ip="$1" + + _local_ips=$(ip a | \ + sed -n 's#^[[:space:]]*inet[[:space:]]\+\([^/[:space:]]\+\)[/[:space:]].*#\1#p') + + for _local_ip in ${_local_ips} ; do + if [ "$_ip" = "$_local_ip" ] ; then + return 0 + fi + done + + return 1 +) + +# returns an netmask in the old notation, eg.: 255.255.255.255 +# arg: +# +# NOTE: shamelessly copied from here: +# https://forums.gentoo.org/viewtopic-t-888736-start-0.html +cidr_to_mask () +( + # Number of args to shift, 255..255, first non-255 byte, zeroes + set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 + [ $1 -gt 1 ] && shift $1 || shift + echo ${1-0}.${2-0}.${3-0}.${4-0} +) + +# Gets the network part of an IP +# arg: +get_network_ip() +( + awk -v ip="$1" -v mask="$2" 'END { + split(ip, ip_b, "."); split(mask, mask_b, "."); + for (i=1; i<=4; ++i) x = x "." and(ip_b[i], mask_b[i]); + sub(/^./, "", x); print x; }' +# +# NOTE: this originally never worked properly: +# https://gitlab.com/openconnect/vpnc-scripts/-/merge_requests/5 +# +# The fix is to first find the network address. +get_gw_ip() +( + _ip=$(echo "$1" | awk 'BEGIN{FS="/"}{print $1;}') + _mask=$(echo "$1" | awk 'BEGIN{FS="/"}{print $2;}') + + if echo "$_mask" | grep -q '^[0-9][0-9]*$' && [ "$_mask" -le 32 ] ; then + # ip had cidr prefix - we will find network ip + _mask=$(cidr_to_mask "$_mask") + _ip=$(get_network_ip "$_ip" "$_mask") + elif [ -n "$_mask" ] ; then + # netmask is garbage + return 1 + fi + + ip r g "$_ip" 2>/dev/null | awk ' + { + for(i = 1; i <= NF; i++) + { + if ($i == "src") + { + print $(i + 1); + exit 0; + } + } + } + ' +) + +# it will create a new hostname from an ip address, but only if the current one +# is just localhost and in that case it will also prints it on the stdout +# arg: [] +generate_hostname() +( + if [ "$(hostname -s)" = localhost ] ; then + if [ -n "$1" ] ; then + _new_hostname="$(echo $1 | tr -d '[:space:]' | tr '.' '-')" + else + _new_hostname="one-$(get_local_ip | tr '.' '-')" + fi + hostname "$_new_hostname" + hostname > /etc/hostname + hostname -s + fi +) + +# show default help based on the ONE_SERVICE_PARAMS +# service_help in appliance.sh may override this function +default_service_help() +{ + echo "USAGE: " + + for _command in 'help' 'install' 'configure' 'bootstrap'; do + echo " $(basename "$0") ${_command}" + + case "${_command}" in + help) echo ' Prints this help' ;; + install) echo ' Installs service' ;; + configure) echo ' Configures service via contextualization or defaults' ;; + bootstrap) echo ' Bootstraps service via contextualization' ;; + esac + + local _index=0 + while [ -n "${ONE_SERVICE_PARAMS[${_index}]}" ]; do + local _name="${ONE_SERVICE_PARAMS[${_index}]}" + local _type="${ONE_SERVICE_PARAMS[$((_index + 1))]}" + local _desc="${ONE_SERVICE_PARAMS[$((_index + 2))]}" + local _input="${ONE_SERVICE_PARAMS[$((_index + 3))]}" + _index=$((_index + 4)) + + if [ "${_command}" = "${_type}" ]; then + if [ -z "${_input}" ]; then + echo -n ' ' + else + echo -n ' * ' + fi + + printf "%-25s - %s\n" "${_name}" "${_desc}" + fi + done + + echo + done + + echo 'Note: (*) variables are provided to the user via USER_INPUTS' +} + +#TODO: more or less duplicate to common.sh/service_help() +params2md() +{ + local _command=$1 + + local _index=0 + local _count=0 + while [ -n "${ONE_SERVICE_PARAMS[${_index}]}" ]; do + local _name="${ONE_SERVICE_PARAMS[${_index}]}" + local _type="${ONE_SERVICE_PARAMS[$((_index + 1))]}" + local _desc="${ONE_SERVICE_PARAMS[$((_index + 2))]}" + local _input="${ONE_SERVICE_PARAMS[$((_index + 3))]}" + _index=$((_index + 4)) + + if [ "${_command}" = "${_type}" ] && [ -n "${_input}" ]; then + # shellcheck disable=SC2016 + printf '* `%s` - %s\n' "${_name}" "${_desc}" + _count=$((_count + 1)) + fi + done + + if [ "${_count}" -eq 0 ]; then + echo '* none' + fi +} + +create_one_service_metadata() +{ + # shellcheck disable=SC2001 + cat >"${ONE_SERVICE_METADATA}" < [] +# use in pipe with yum -y --showduplicates list +# yum version follows these rules: +# starting at the first colon (:) and up to the first hyphen (-) +# example: +# 3:18.09.1-3.el7 -> 18.09.1 +yum_pkg_filter() +{ + _pkg="$1" + _version="$2" + + awk -v pkg="$_pkg" '{if ($1 ~ "^" pkg) print $2;}' | \ + sed -e 's/^[^:]*://' -e 's/-.*//' | \ + if [ -n "$_version" ] ; then + # only the correct versions + awk -v version="$_version" ' + { + if ($1 ~ "^" version) + print $1; + }' + else + cat + fi +} + +# arg: +is_in_list() +{ + _word="$1" + shift + + # shellcheck disable=SC2048 + for i in $* ; do + if [ "$_word" = "$i" ] ; then + return 0 + fi + done + + return 1 +} + +# arg: +is_true() +{ + _value=$(eval echo "\$${1}" | tr '[:upper:]' '[:lower:]') + case "$_value" in + 1|true|yes|y) + return 0 + ;; + esac + + return 1 +} + +# arg: [context file] +save_context_base64() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + msg info "Store current context in the file: ${_context_file}" + _context_vars=$(set | sed -n 's/^\(ONEAPP_[^=[:space:]]\+\)=.*/\1/p') + + if ! [ -f "$_context_file" ] ; then + echo '{}' > "$_context_file" + fi + + _old_context=$(cat "$_context_file") + + { + echo "$_old_context" + + for _context_var in ${_context_vars} ; do + _value=$(eval "printf \"\$${_context_var}\"") + echo '{}' | jq -S --arg val "$_value" ". + {\"${_context_var}\": \$val | @base64}" + done + } | jq -sS add > "$_context_file" +} + +# arg: [context file] +save_context() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + msg info "Store current context in the file: ${_context_file}" + + "${ONE_SERVICE_SETUP_DIR}/bin/context-helper" \ + update "${_context_file}" +} + +# arg: [context file] +load_context() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + if ! [ -f "${_context_file}" ] ; then + msg info "Create empty context file: ${_context_file}" + echo '{}' > "${_context_file}" + return 0 + fi + + msg info "Load last context from the file: ${_context_file}" + + _vars=$("${ONE_SERVICE_SETUP_DIR}/bin/context-helper" \ + -t names load "${_context_file}") + + for i in $_vars ; do + _value=$(get_value_from_context_file "${i}" "${_context_file}") + eval "${i}=\$(echo \"\$_value\")" + # shellcheck disable=SC2163 + export "${i}" + done +} + +# arg: [context file] +get_changed_context_vars() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + if ! [ -f "${_context_file}" ] ; then + return 0 + fi + + "${ONE_SERVICE_SETUP_DIR}/bin/context-helper" \ + -t names compare "${_context_file}" +} + +# arg: [] +get_value_from_context_file() +{ + _var="$1" + _context_file="${2:-$ONE_SERVICE_CONTEXTFILE}" + + [ -z "${_var}" ] && return 1 + + jq -cr ".${_var}" < "${_context_file}" +} + +# arg: +is_context_variable_updated() +{ + _varname="$1" + + for v in $(get_changed_context_vars "${ONE_SERVICE_CONTEXTFILE}") ; do + if [ "$v" = "${_varname}" ] ; then + # variable has been updated + return 0 + fi + done + + return 1 +} + +# arg: +check_pidfile() +{ + _pidfile="$1" + + if [ -f "${_pidfile}" ] ; then + _pid=$(grep '^[0-9]\+$' "${_pidfile}") + else + _pid= + fi + + if [ -n "${_pid}" ] ; then + kill -0 ${_pid} + return $? + fi + + return 1 +} + +# arg: +wait_for_pidfile() +{ + _pidfile="$1" + _timeout=60 # we wait at most one minute... + + while [ "$_timeout" -gt 0 ]; do + # we wait for the pidfile to emerge... + if [ -f "$_pidfile" ] ; then + _pid=$(cat "$_pidfile") + # we retry until the pid in pidfile is a number... + if echo "$_pid" | grep -q '^[0-9]\+$' ; then + # the pid must be stable for 3 seconds... + _check_time=3 + while [ "$_check_time" -gt 0 ] ; do + sleep 1s + if kill -0 "$_pid" ; then + _check_time=$(( _check_time - 1 )) + else + break + fi + done + if [ "$_check_time" -eq 0 ] ; then + # we succeeded - we have valid pid... + break + fi + fi + fi + + sleep 1s + _timeout=$(( _timeout - 1 )) + done +} + +wait_for_file() +( + _timeout=60 # we wait at most one minute... + + while [ "$_timeout" -gt 0 ] ; do + if [ -e "$1" ] ; then + return 0 + fi + + sleep 1s + _timeout=$(( _timeout - 1 )) + done + + return 1 +) + diff --git a/appliances/lib/context-helper.py b/appliances/lib/context-helper.py new file mode 100755 index 00000000..d923616d --- /dev/null +++ b/appliances/lib/context-helper.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python3 + +# --------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# --------------------------------------------------------------------------- # + + +import sys +import os +import argparse +import re +import json + + +JSON_INDENT = 4 + + +class SaveFileError(Exception): + """When there is an issue with writing to the context file.""" + pass + + +class OpenFileError(Exception): + """When there is an issue with opening the context file.""" + pass + + +def get_current_context(env_prefix): + """ Returns all env. variables where names start with 'env_prefix'. """ + + context = {} + regex = re.compile("^" + env_prefix) + for env_var in os.environ: + if regex.search(env_var): + context[env_var] = os.environ[env_var] + + return context + + +def get_file_context(env_prefix, context_file): + """ + Returns all env. variables from 'context_file' where names start with + 'env_prefix'. + . + """ + + # load context file + with open(context_file, "r") as f: + file_context = json.load(f) + + # mark all not matching prefix + regex = re.compile("^" + env_prefix) + to_delete = [] + for env_var in file_context: + if not regex.search(env_var): + to_delete.append(env_var) + + # delete all non-matching env. vars + for env_var in to_delete: + del file_context[env_var] + + return file_context + + +def save_context(env_prefix, context_file, json_indent=JSON_INDENT): + """ + Saves current context (env. variables with matching 'env_prefix') into the + 'context_file'. + + It will overwrite the existing file if it exists! + + Returns context. + """ + + context = get_current_context(env_prefix) + with open(context_file, "w") as f: + f.write(json.dumps(context, indent=json_indent)) + f.write("\n") + + return context + + +def load_context(env_prefix, context_file): + """ + It loads context from the 'context_file'. It will load only those + variables matching 'env_prefix' and which are not yet in the current + context. + + It will NOT overwrite any variable in the current context! + + Returns result context as described above. + + NOTE: + Because it is impossible to modify environment of the caller - the result + from this function should dumped to the stdout as a json, which must be + sourced later by the caller (eg: shell script). + """ + + # load context file + file_context = get_file_context(env_prefix, context_file) + + # filter only those not in context already + context = get_current_context(env_prefix) + result = {} + for file_env in file_context: + if context.get(file_env) is None: + result[file_env] = file_context[file_env] + + return result + + +def update_context(env_prefix, context_file, json_indent=JSON_INDENT): + """ + Similar to save but it will only update the file - it will overwrite + existing variables in the 'context_file' with those from the current + context but it will leave the rest intact. + + Returns full content of the file as context. + """ + + # load context file + file_context = get_file_context(env_prefix, context_file) + + # load current context + context = get_current_context(env_prefix) + + # update file context with current context + for env_var in context: + file_context[env_var] = context[env_var] + + # write updated content back + with open(context_file, "w") as f: + f.write(json.dumps(file_context, indent=json_indent)) + f.write("\n") + + return file_context + + +def compare_context(env_prefix, context_file): + """ + It will return keypairs of context variables which differs from the + 'context_file' and the current context. + """ + + # load context file + file_context = get_file_context(env_prefix, context_file) + + # load current context + context = get_current_context(env_prefix) + + # find all changed + result = {} + for env_var in context: + if file_context.get(env_var) != context.get(env_var): + result[env_var] = context[env_var] + + # when variable was not changed but deleted + # TO NOTE: currently not usable because VNF is setting defaults in context.json + # + #for env_var in file_context: + # if context.get(env_var) is None: + # result[env_var] = "" + + return result + + +def error_msg(msg): + length = 80 + line = "" + for word in msg.split(' '): + if (len(line + ' ' + word)) < length: + line = line.strip() + ' ' + word + else: + print(line, file=sys.stderr) + line = word + if (line != ""): + print(line, file=sys.stderr) + + +def print_result(context, output_type, json_indent=JSON_INDENT): + """ + Prints context according to output type (the whole json, or just variable + names - each on separate line - for simple usage). + """ + + if output_type == 'json': + print(json.dumps(context, indent=json_indent)) + elif output_type == 'names': + for i in context: + print(i) + elif output_type == 'shell': + for i in context: + print("%s='%s'" % (i, context[i])) + + +def main(): + parser = argparse.ArgumentParser(description="ONE context helper") + parser.add_argument("-f", "--force", + dest="context_overwrite", + required=False, + action='store_const', + const=True, + default=False, + help="Forces overwrite of the file if needed") + parser.add_argument("-e", "--env-prefix", + required=False, + metavar="", + default="ONEAPP_", + help="Prefix of the context variables " + "(default: 'ONEAPP_')") + parser.add_argument("-t", "--output-type", + required=False, + metavar="json|names|shell", + choices=["json", "names", "shell"], + default="json", + help="Output type (affects only load and compare) " + "(default: 'json')") + parser.add_argument("context_action", + metavar="save|load|update|compare", + choices=["save", "load", "update", "compare"], + help=("Save/update context into the file," + " or load from it," + " or compare it with the current context.")) + parser.add_argument("context_file", + metavar="", + help="Filepath of the context file") + + args = parser.parse_args() + + if args.context_action == "save": + try: + if (os.path.isfile(args.context_file) + and (not args.context_overwrite)): + # file exists and no --force used... + raise SaveFileError + except SaveFileError: + error_msg("ERROR: Trying to save context but the file: '" + + args.context_file + "' already exists!") + error_msg("Hint 1: Try '--force' if you wish to overwrite it") + error_msg("Hint 2: Or maybe you want to use 'update'...") + return 1 + context = save_context(args.env_prefix, args.context_file) + + elif args.context_action == "load": + try: + if not os.path.isfile(args.context_file): + raise OpenFileError + except OpenFileError: + error_msg("ERROR: Trying to open the context file: '" + + args.context_file + "' but it doesn't exist!") + return 1 + context = load_context(args.env_prefix, args.context_file) + + # dump context values which should be sourced by caller + print_result(context, args.output_type) + + elif args.context_action == "update": + if os.path.isfile(args.context_file): + # update existing + context = update_context(args.env_prefix, args.context_file) + else: + # no file yet, so simply save context instead + context = save_context(args.env_prefix, args.context_file) + + elif args.context_action == "compare": + try: + if not os.path.isfile(args.context_file): + raise OpenFileError + except OpenFileError: + error_msg("ERROR: Trying to open the context file: '" + + args.context_file + "' but it doesn't exist!") + return 1 + context = compare_context(args.env_prefix, args.context_file) + + # dump context values which should be sourced by caller + print_result(context, args.output_type) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) + diff --git a/appliances/lib/functions.sh b/appliances/lib/functions.sh new file mode 100644 index 00000000..c382eb50 --- /dev/null +++ b/appliances/lib/functions.sh @@ -0,0 +1,407 @@ +# ---------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + +# args: "$@" +_parse_arguments() +{ + _ACTION=nil + state=nil + while [ -n "$1" ] ; do + case "$state" in + nil) + case "$1" in + -h|--help|help) + _ACTION=help + state=done + ;; + install) + _ACTION=install + state=install + ;; + configure|bootstrap) + _ACTION="$1" + state=configure + ;; + *) + _ACTION=badargs + msg unknown "BAD USAGE: unknown argument: $1" + break + ;; + esac + ;; + configure) + case "$1" in + reconfigure) + ONE_SERVICE_RECONFIGURE=true + state=done + ;; + *) + _ACTION=badargs + msg unknown "BAD USAGE: unknown argument: $1" + break + ;; + esac + ;; + install) + ONE_SERVICE_VERSION="$1" + state=done + ;; + done) + _ACTION=badargs + msg unknown "BAD USAGE: extraneous argument(s)" + break + ;; + esac + shift + done +} + +# args: "$0" "${@}" +_lock_or_fail() +{ + this_script="$1" + if [ "${_SERVICE_LOCK}" != "$this_script" ] ; then + exec env _SERVICE_LOCK="$this_script" flock -xn $this_script "$@" + fi +} + +_on_exit() +{ + # this is the exit handler - I want to clean up as much as I can + set +e + + # first do whatever the service appliance needs to clean after itself + service_cleanup + + # delete temporary working file(s) + if [ -n "$_SERVICE_LOG_PIPE" ] ; then + rm -f "$_SERVICE_LOG_PIPE" + fi + + # exiting while the stage was interrupted - change status to failure + _status=$(_get_current_service_result) + case "$_status" in + started) + _set_service_status failure + ;; + esac + + # all done - delete pid file and exit + rm -f "$ONE_SERVICE_PIDFILE" +} + +_trap_exit() +{ + trap '_on_exit 2>/dev/null' INT QUIT TERM EXIT +} + +_is_running() +{ + pid=$(_get_pid) + + if echo "$pid" | grep -q '^[0-9]\+$' ; then + kill -0 $pid + return $? + fi + + return 1 +} + +_get_pid() +{ + if [ -f "$ONE_SERVICE_PIDFILE" ] ; then + cat "$ONE_SERVICE_PIDFILE" + fi +} + +_write_pid() +{ + echo $$ > "$ONE_SERVICE_PIDFILE" +} + +_get_service_status() +{ + if [ -f "$ONE_SERVICE_STATUS" ] ; then + cat "$ONE_SERVICE_STATUS" + fi +} + +_get_current_service_step() +{ + _get_service_status | sed -n 's/^\(install\|configure\|bootstrap\)_.*/\1/p' +} + +_get_current_service_result() +{ + _result=$(_get_service_status | sed -n 's/^\(install\|configure\|bootstrap\)_\(.*\)/\2/p') + case "$_result" in + started|success|failure) + echo "$_result" + ;; + esac +} + +# arg: install|configure|bootstrap [| +_check_service_status() +{ + _reconfigure="$2" + + case "$1" in + install) + case "$(_get_service_status)" in + '') + # nothing was done so far + return 0 + ;; + install_success) + msg warning "Installation was already done - skip" + return 1 + ;; + install_started) + msg error "Installation was probably interrupted - abort" + _set_service_status failure + exit 1 + ;; + install_failure) + msg error "Last installation attempt failed - abort" + exit 1 + ;; + *) + msg error "Install step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + ;; + esac + ;; + configure) + case "$(_get_service_status)" in + '') + # nothing was done so far - missing install + msg error "Cannot proceed with configuration - missing installation step" + exit 1 + ;; + install_success) + # installation was successfull - can continue + return 0 + ;; + configure_success) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg warning "Configuration was already done - skip" + return 1 + fi + ;; + configure_started) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg error "Configuration was probably interrupted - abort" + _set_service_status failure + exit 1 + fi + ;; + configure_failure) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg error "Last configuration attempt failed - abort" + exit 1 + fi + ;; + bootstrap*) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg error "Configure step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + fi + ;; + *) + msg error "Configure step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + ;; + esac + ;; + bootstrap) + case "$(_get_service_status)" in + '') + # nothing was done so far - missing install + msg error "Cannot proceed with bootstrapping - missing installation step" + exit 1 + ;; + configure_success) + # configuration was successfull - can continue + return 0 + ;; + bootstrap_success) + if is_true _reconfigure ; then + msg info "Redo bootstrap of the service" + return 0 + else + msg warning "Bootstrap was already done - skip" + return 1 + fi + ;; + bootstrap_started) + if is_true _reconfigure ; then + msg info "Redo bootstrap of the service" + return 0 + else + msg error "Bootstrap was probably interrupted - abort" + _set_service_status failure + exit 1 + fi + ;; + bootstrap_failure) + if is_true _reconfigure ; then + msg info "Redo bootstrap of the service" + return 0 + else + msg error "Last bootstrap attempt failed - abort" + exit 1 + fi + ;; + *) + msg error "Bootstrap step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + ;; + esac + ;; + esac + + msg error "THIS SHOULD NOT HAPPEN!" + msg unknown "Possibly a bug, wrong usage, action etc." + exit 1 +} + +# arg: install|configure|bootstrap|success|failure +_set_service_status() +{ + _status="$1" + case "$_status" in + install|configure|bootstrap) + echo ${_status}_started > "$ONE_SERVICE_STATUS" + _set_motd "$_status" started + ;; + success|failure) + _step=$(_get_current_service_step) + echo ${_step}_${_status} > "$ONE_SERVICE_STATUS" + _set_motd "$_step" "$_status" + ;; + *) + msg unknown "THIS SHOULD NOT HAPPEN!" + msg unknown "Possibly a bug, wrong usage, action etc." + exit 1 + ;; + esac +} + +_print_logo() +{ + cat > ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} < +_start_log() +{ + _logfile="$1" + _SERVICE_LOG_PIPE="$ONE_SERVICE_LOGDIR"/one_service_log.pipe + + # create named pipe + mknod "$_SERVICE_LOG_PIPE" p + + # connect tee to the pipe and let it write to the log and screen + tee <"$_SERVICE_LOG_PIPE" -a "$_logfile" & + + # save stdout to fd 3 and force shell to write to the pipe + exec 3>&1 >"$_SERVICE_LOG_PIPE" +} + +_end_log() +{ + # restore stdout for the shell and close fd 3 + exec >&3 3>&- +} + diff --git a/appliances/scripts/context_service_net-90.sh b/appliances/scripts/context_service_net-90.sh new file mode 100644 index 00000000..ca494b20 --- /dev/null +++ b/appliances/scripts/context_service_net-90.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Runs OpenNebula service appliances configuration & bootstrap script + +#TODO: just single run based on "status" +_oneapp_service='/etc/one-appliance/service' + +# one-context 6.2.0+ shifts the command argument +if [ $# -eq 2 ]; then + _reconfigure="$2" +else + _reconfigure="$1" +fi + +if [ -x "${_oneapp_service}" ]; then + "${_oneapp_service}" configure "$_reconfigure" && \ + "${_oneapp_service}" bootstrap +fi diff --git a/appliances/scripts/context_service_net-99.sh b/appliances/scripts/context_service_net-99.sh new file mode 100644 index 00000000..7633ce15 --- /dev/null +++ b/appliances/scripts/context_service_net-99.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env} + +if [ "$REPORT_READY" != "YES" ]; then + exit 0 +fi + +# $TOKENTXT is available only through the env. file +if [ -f "${ENV_FILE}" ]; then + . "${ENV_FILE}" +fi + +# Reports only if ONE service appliance bootstrapped successfully +if [ -x '/etc/one-appliance/service' ]; then + _status=$(cat '/etc/one-appliance/status' 2>/dev/null) + if [ "${_status}" != 'bootstrap_success' ]; then + exit 0 + fi +fi + +### + +if which onegate >/dev/null 2>&1; then + onegate vm update --data "READY=YES" + + if [ "$?" = "0" ]; then + exit 0 + fi +fi + +if which curl >/dev/null 2>&1; then + curl -X "PUT" "${ONEGATE_ENDPOINT}/vm" \ + --header "X-ONEGATE-TOKEN: $TOKENTXT" \ + --header "X-ONEGATE-VMID: $VMID" \ + -d "READY=YES" + + if [ "$?" = "0" ]; then + exit 0 + fi +fi + +if which wget >/dev/null 2>&1; then + wget --method=PUT "${ONEGATE_ENDPOINT}/vm" \ + --body-data="READY=YES" \ + --header "X-ONEGATE-TOKEN: $TOKENTXT" \ + --header "X-ONEGATE-VMID: $VMID" + + if [ "$?" = "0" ]; then + exit 0 + fi +fi diff --git a/appliances/service b/appliances/service new file mode 100755 index 00000000..d02530af --- /dev/null +++ b/appliances/service @@ -0,0 +1,133 @@ +#!/usr/bin/env bash + +# ---------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + +# USAGE: +# service [-h|--help|help] +# Print help and usage +# +# service install [] +# Download files and install packages for the desired version of a service +# +# service configure +# Configure the service via contextualization or with defaults +# +# service bootstrap +# Use user's predefined values for the final setup and start the service + +ONE_SERVICE_DIR=/etc/one-appliance +ONE_SERVICE_LOGDIR=/var/log/one-appliance +ONE_SERVICE_STATUS="${ONE_SERVICE_DIR}/status" +ONE_SERVICE_TEMPLATE="${ONE_SERVICE_DIR}/template" +ONE_SERVICE_METADATA="${ONE_SERVICE_DIR}/metadata" +ONE_SERVICE_REPORT="${ONE_SERVICE_DIR}/config" +ONE_SERVICE_FUNCTIONS="${ONE_SERVICE_DIR}/service.d/functions.sh" +ONE_SERVICE_COMMON="${ONE_SERVICE_DIR}/service.d/common.sh" +ONE_SERVICE_APPLIANCE="${ONE_SERVICE_DIR}/service.d/appliance.sh" +ONE_SERVICE_SETUP_DIR="/opt/one-appliance" +ONE_SERVICE_MOTD='/etc/motd' +ONE_SERVICE_PIDFILE="/var/run/one-appliance-service.pid" +ONE_SERVICE_CONTEXTFILE="${ONE_SERVICE_DIR}/context.json" +ONE_SERVICE_RECONFIGURE=false # the first time is always a full configuration +ONE_SERVICE_VERSION= # can be set by argument or to default +ONE_SERVICE_RECONFIGURABLE= # can be set by the appliance script + +# security precautions +set -e +umask 0077 + +# -> TODO: read all from ONE_SERVICE_DIR + +# source common functions +. "$ONE_SERVICE_COMMON" + +# source this script's functions +. "$ONE_SERVICE_FUNCTIONS" + +# source service appliance implementation (following functions): +# service_help +# service_install +# service_configure +# service_bootstrap +# service_cleanup +. "$ONE_SERVICE_APPLIANCE" + +# parse arguments and set _ACTION +_parse_arguments "$@" + +# execute requested action or fail +case "$_ACTION" in + nil|help) + # check if the appliance defined a help function + if type service_help >/dev/null 2>&1 ; then + # use custom appliance help + service_help + else + # use default + default_service_help + fi + ;; + badargs) + exit 1 + ;; + # all stages do basically this: + # 1. check status file if _ACTION can be run at all + # 2. set service status file + # 3. set motd (message of the day) + # 4. execute stage (install, configure or bootstrap) + # 5. set service status file again + # 6. set motd to normal or to signal failure + install|configure|bootstrap) + # check the status (am I running already) + if _is_running ; then + msg warning "Service script is running already - PID: $(_get_pid)" + exit 0 + fi + + # secure lock or fail (only one running instance of this script is allowed) + _lock_or_fail "$0" "$@" + + # set a trap for an exit (cleanup etc.) + _trap_exit + + # write a pidfile + _write_pid + + # analyze the current stage and either proceed or abort + if ! _check_service_status $_ACTION "$ONE_SERVICE_RECONFIGURABLE" ; then + exit 0 + fi + + # mark the start of a stage (install, configure or bootstrap) + _set_service_status $_ACTION + + # here we make sure that log directory exists + mkdir -p "$ONE_SERVICE_LOGDIR" + chmod 0700 "$ONE_SERVICE_LOGDIR" + + # execute action + _start_log "${ONE_SERVICE_LOGDIR}/ONE_${_ACTION}.log" + service_${_ACTION} 2>&1 + _end_log + + # if we reached this point then the current stage was successfull + _set_service_status success + ;; +esac + +exit 0 + diff --git a/appliances/wordpress.sh b/appliances/wordpress.sh new file mode 100644 index 00000000..0e74cc16 --- /dev/null +++ b/appliances/wordpress.sh @@ -0,0 +1,627 @@ +# ---------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + +### Important notes ################################################## +# +# The contextualization variable 'ONEAPP_SITE_HOSTNAME' IS (!) mandatory and +# must be correct (resolveable, reachable) otherwise the web will be broken. +# It defaults to first non-loopback address it finds - if no address is found +# then the 'localhost' is used - and then wordpress will function correctly +# only from within the instance. +# +# 'ONEAPP_SITE_HOSTNAME' can be changed in the wordpress settings but it should +# be set to something sensible from the beginning so you can be able to login +# to the wordpress and change the settings... +# +### Important notes ################################################## + + +# List of contextualization parameters +ONE_SERVICE_PARAMS=( + 'ONEAPP_PASSWORD_LENGTH' 'configure' 'Database password length' '' + 'ONEAPP_DB_NAME' 'configure' 'Database name' '' + 'ONEAPP_DB_USER' 'configure' 'Database service user' '' + 'ONEAPP_DB_PASSWORD' 'configure' 'Database service password' '' + 'ONEAPP_DB_ROOT_PASSWORD' 'configure' 'Database password for root' '' + 'ONEAPP_SITE_HOSTNAME' 'configure' 'Fully qualified domain name or IP' '' + 'ONEAPP_SSL_CERT' 'configure' 'SSL certificate' 'O|text64' + 'ONEAPP_SSL_PRIVKEY' 'configure' 'SSL private key' 'O|text64' + 'ONEAPP_SSL_CHAIN' 'configure' 'SSL CA chain' 'O|text64' + 'ONEAPP_SITE_TITLE' 'bootstrap' '** Site Title (set all or none)' 'O|text' + 'ONEAPP_ADMIN_USERNAME' 'bootstrap' '** Site Administrator Login (set all or none)' 'O|text' + 'ONEAPP_ADMIN_PASSWORD' 'bootstrap' '** Site Administrator Password (set all or none)' 'O|password' + 'ONEAPP_ADMIN_EMAIL' 'bootstrap' '** Site Administrator E-mail (set all or none)' 'O|text' +) + + +### Appliance metadata ############################################### + +# Appliance metadata +ONE_SERVICE_NAME='Service WordPress - KVM' +ONE_SERVICE_VERSION='6.3.2' #latest +ONE_SERVICE_BUILD=$(date +%s) +ONE_SERVICE_SHORT_DESCRIPTION='Appliance with preinstalled WordPress for KVM hosts' +ONE_SERVICE_DESCRIPTION=$(cat < "$_cert" + echo "$ONEAPP_SSL_PRIVKEY" | base64 -d > "$_certkey" + if [ -n "$ONEAPP_SSL_CHAIN" ] ; then + echo "$ONEAPP_SSL_CHAIN" | base64 -d > "$_cacert" + chmod +r "$_cacert" + fi + chmod +r "$_cert" + + # ssl check + msg info "Checking if private key match certificate:" + _ssl_check_cert=$(openssl x509 -noout -modulus -in "$_cert") + _ssl_check_key=$(openssl rsa -noout -modulus -in "$_certkey") + if [ "$_ssl_check_cert" = "$_ssl_check_key" ] ; then + msg info "OK" + else + msg error "Private SSL key does not belong to the certificate" + return 1 + fi + + # fixing ssl.conf + msg info "Fixing ssl.conf" + sed -i 's|[[:space:]#]*SSLCertificateFile.*|'"$_cert_line"'|' /etc/httpd/conf.d/ssl.conf + sed -i 's|[[:space:]#]*SSLCertificateKeyFile.*|'"$_certkey_line"'|' /etc/httpd/conf.d/ssl.conf + if [ -n "$ONEAPP_SSL_CHAIN" ] ; then + sed -i 's|[[:space:]#]*SSLCACertificateFile.*|'"$_cacert_line"'|' /etc/httpd/conf.d/ssl.conf + else + sed -i 's|[[:space:]#]*\(SSLCACertificateFile.*\)|#\1|' /etc/httpd/conf.d/ssl.conf + fi + + msg info "Configuring https vhost" + cat > /etc/httpd/conf.d/wordpress-ssl.conf < + ServerAdmin root@localhost + DocumentRoot /var/www/html + ServerName ${ONEAPP_SITE_HOSTNAME} + SSLEngine On + ${_cert_line} + ${_certkey_line} + ${ONEAPP_SSL_CHAIN:+${_cacert_line}} + + ErrorLog /var/log/httpd/wordpress-ssl-error.log + CustomLog /var/log/httpd/wordpress-ssl-access.log combined + + + Options FollowSymLinks + AllowOverride None + Require all granted + + +EOF +} + +configure_apache() +{ + msg info "Apache setup" + + msg info "Configuring http vhost" + cat > /etc/httpd/conf.d/wordpress.conf < + ServerAdmin root@localhost + DocumentRoot /var/www/html + ErrorLog /var/log/httpd/wordpress-error.log + CustomLog /var/log/httpd/wordpress-access.log combined + + + Options FollowSymLinks + AllowOverride None + Require all granted + + +EOF + + # SSL setup if everything needed is provided + if [ -n "$ONEAPP_SSL_CERT" ] && [ -n "$ONEAPP_SSL_PRIVKEY" ] ; then + msg info "SSL setup" + msg info "DISCLAIMER: site address and certs must match for this to work..." + + if [ -f /etc/httpd/conf.modules.d/00-ssl.conf ] ; then + sed -i 's/.*\(LoadModule.*\)/\1/' /etc/httpd/conf.modules.d/00-ssl.conf + else + printf 'LoadModule ssl_module modules/mod_ssl.so' > /etc/httpd/conf.modules.d/00-ssl.conf + fi + + if ! [ -f /etc/httpd/conf.d/ssl.conf ] ; then + if [ -f /etc/httpd/conf.d/ssl.conf-disabled ] ; then + mv /etc/httpd/conf.d/ssl.conf-disabled /etc/httpd/conf.d/ssl.conf + else + msg error "Missing ssl.conf" + return 1 + fi + fi + + configure_apache_ssl + else + msg info "No SSL setup (no cert files provided)" + + rm -f /etc/httpd/conf.d/wordpress-ssl.conf + + if [ -f /etc/httpd/conf.modules.d/00-ssl.conf ] ; then + sed -i 's/.*\(LoadModule.*\)/#\1/' /etc/httpd/conf.modules.d/00-ssl.conf + fi + + if [ -f /etc/httpd/conf.d/ssl.conf ] ; then + mv /etc/httpd/conf.d/ssl.conf /etc/httpd/conf.d/ssl.conf-disabled + fi + + fi + + # fail if config is wrong + msg info "Apache configtest..." + apachectl configtest + + return $? +} + +configure_wordpress() +{ + msg info "WordPress setup" + + rm -rf /var/www/html + cp -a "$ONE_SERVICE_SETUP_DIR"/wordpress /var/www/html + mkdir /var/www/html/wp-content/uploads + + mv /var/www/html/wp-config-sample.php /var/www/html/wp-config.php + + sed -i \ + -e "s#^[[:space:]]*define([[:space:]]*'DB_NAME'.*#define('DB_NAME', '${ONEAPP_DB_NAME}');#" \ + -e "s#^[[:space:]]*define([[:space:]]*'DB_USER'.*#define('DB_USER', '${ONEAPP_DB_USER}');#" \ + -e "s#^[[:space:]]*define([[:space:]]*'DB_PASSWORD'.*#define('DB_PASSWORD', '${ONEAPP_DB_PASSWORD}');#" \ + /var/www/html/wp-config.php + + chown -R apache:apache /var/www/html/ + find /var/www/html -type d -exec chmod 750 '{}' \; + find /var/www/html -type f -exec chmod 640 '{}' \; + + return 0 +} + +report_config() +{ + msg info "Credentials and config values are saved in: ${ONE_SERVICE_REPORT}" + + cat > "$ONE_SERVICE_REPORT" < "$MARIADB_CREDENTIALS" < "$MARIADB_CONFIG" <} LICENSE=${LICENSE:-Apache 2.0} diff --git a/packer/alma/10-upgrade-distro.sh b/packer/alma/10-upgrade-distro.sh new file mode 100644 index 00000000..6a40e288 --- /dev/null +++ b/packer/alma/10-upgrade-distro.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +dnf install -y epel-release + +dnf update -y --skip-broken + +# Ensure packages needed for post-processing scripts do exist. +dnf install -y curl gawk grep jq sed + +sync diff --git a/packer/alma/11-update-grub.sh b/packer/alma/11-update-grub.sh new file mode 100644 index 00000000..684719ab --- /dev/null +++ b/packer/alma/11-update-grub.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# Drop unwanted. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "crashkernel=no") } +{ print } +EOF + +# Ensure required. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF + +gawk -i inplace -f- /etc/default/grub <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +# Cleanup. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF + +dnf install -y dracut-config-generic dracut-network + +INITRAMFS_IMG=$(find /boot/ -maxdepth 1 -name 'initramfs-*.img' ! -name '*rescue*' ! -name '*kdump*' | sort -V | tail -1) +INITRAMFS_VER=$(sed -e 's/^.*initramfs-//' -e 's/\.img$//' <<< "$INITRAMFS_IMG") +dracut --force "$INITRAMFS_IMG" "$INITRAMFS_VER" + +grub2-mkconfig -o /boot/grub2/grub.cfg + +sync + +reboot diff --git a/packer/alma/80-install-context.sh.8 b/packer/alma/80-install-context.sh.8 new file mode 100644 index 00000000..9fcf01bc --- /dev/null +++ b/packer/alma/80-install-context.sh.8 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el8.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +dnf install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + dnf install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/alma/80-install-context.sh.9 b/packer/alma/80-install-context.sh.9 new file mode 100644 index 00000000..0203743b --- /dev/null +++ b/packer/alma/80-install-context.sh.9 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el9.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +dnf install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + dnf install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/alma/81-configure-ssh.sh b/packer/alma/81-configure-ssh.sh new file mode 100644 index 00000000..ca55c22b --- /dev/null +++ b/packer/alma/81-configure-ssh.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/alma/98-collect-garbage.sh b/packer/alma/98-collect-garbage.sh new file mode 100644 index 00000000..ee5c456c --- /dev/null +++ b/packer/alma/98-collect-garbage.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# Cleans DNF caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +kdump="kdump.service" +systemctl list-units --full -all | grep -Fq "$kdump" && systemctl disable "$kdump" + +# Remove old kernels. +dnf remove -y $(dnf repoquery --installonly --latest-limit=-1 -q) + +dnf remove -y fwupd linux-firmware + +dnf clean -y all + +rm -rf /boot/*-rescue-* +rm -rf /context/ + +sync diff --git a/packer/alma/alma.pkr.hcl b/packer/alma/alma.pkr.hcl new file mode 100644 index 00000000..1e3b5f46 --- /dev/null +++ b/packer/alma/alma.pkr.hcl @@ -0,0 +1,76 @@ +# Build cloud init iso +source "file" "user_data" { + source = "${var.input_dir}/cloud-init.yml" + target = "${var.input_dir}/${var.appliance_name}-userdata" +} + +build { + sources = ["sources.file.user_data"] + + provisioner "shell-local" { + inline = [ + "cloud-localds ${var.input_dir}/${var.appliance_name}-cloud-init.iso ${var.input_dir}/${var.appliance_name}-userdata", + ] + } +} + +# Build VM image +source "qemu" "alma" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.alma, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.alma, var.version, {}), "iso_checksum", "") + + headless = var.headless + + disk_image = true + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = var.output_dir + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"], + ["-cdrom", "${var.input_dir}/${var.appliance_name}-cloud-init.iso"] + ] + + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.alma"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "sudo -iu root {{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/alma/cloud-init.yml b/packer/alma/cloud-init.yml new file mode 100644 index 00000000..a2e97b34 --- /dev/null +++ b/packer/alma/cloud-init.yml @@ -0,0 +1,22 @@ +#cloud-config +growpart: + mode: auto + devices: [/] + +users: + - name: root + lock_passwd: false + hashed_passwd: $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 + +disable_root: false +ssh_pwauth: true + +runcmd: + - | + gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' + BEGIN { update = "PermitRootLogin yes" } + /^#*PermitRootLogin/ { $0 = update; found = 1 } + { print } + END { if (!found) print update >>FILENAME } + EOF + - systemctl reload sshd diff --git a/packer/alma/plugins.pkr.hcl b/packer/alma/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/alma/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/alma/variables.pkr.hcl b/packer/alma/variables.pkr.hcl new file mode 100644 index 00000000..f668bd3b --- /dev/null +++ b/packer/alma/variables.pkr.hcl @@ -0,0 +1,38 @@ +variable "appliance_name" { + type = string + default = "alma" +} + +variable "version" { + type = string + default = "8" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "alma" { + type = map(map(string)) + + default = { + "8" = { + iso_url = "https://repo.almalinux.org/almalinux/8/cloud/x86_64/images/AlmaLinux-8-GenericCloud-latest.x86_64.qcow2" + iso_checksum = "c0ad09255d91288dac590d99c95197d83a2846f1bcbec3f4222fb04265a2a4d7" + } + + "9" = { + iso_url = "https://repo.almalinux.org/almalinux/9/cloud/x86_64/images/AlmaLinux-9-GenericCloud-latest.x86_64.qcow2" + iso_checksum = "207d885ca8140e3106098e946cfc04088b0e21f50d24815051520d452eae0a50" + } + } +} diff --git a/packer/alpine/10-upgrade-distro.sh b/packer/alpine/10-upgrade-distro.sh new file mode 100644 index 00000000..66c003df --- /dev/null +++ b/packer/alpine/10-upgrade-distro.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env sh + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -ex + +# Ensure packages needed for post-processing scripts do exist. +apk --no-cache add bash curl gawk grep jq sed + +gawk -i inplace -f- /etc/apk/repositories <<'EOF' +/community$/ && !/edge/ { gsub(/^#\s*/, "") } +{ print } +EOF + +apk update +apk upgrade + +sync diff --git a/packer/alpine/11-update-boot.sh b/packer/alpine/11-update-boot.sh new file mode 100644 index 00000000..4fc16c3a --- /dev/null +++ b/packer/alpine/11-update-boot.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# Updates various settings that require reboot. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/inittab <<'EOF' +/^ttyS/ { $0 = "#" $0 } +{ print } +EOF + +gawk -i inplace -f- /boot/extlinux.conf <<'EOF' +BEGIN { update = "TIMEOUT 3" } +/^TIMEOUT\s/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/alpine/80-install-context.sh b/packer/alpine/80-install-context.sh new file mode 100644 index 00000000..21d600c1 --- /dev/null +++ b/packer/alpine/80-install-context.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +: "${CTX_SUFFIX:=.apk}" + +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +apk --no-cache add tzdata haveged open-vm-tools-plugins-all +apk --no-cache add --allow-untrusted /context/one-context*$CTX_SUFFIX + +rc-update add qemu-guest-agent default +rc-update add open-vm-tools default +rc-update add haveged boot + +sync diff --git a/packer/alpine/81-configure-ssh.sh b/packer/alpine/81-configure-ssh.sh new file mode 100644 index 00000000..ca55c22b --- /dev/null +++ b/packer/alpine/81-configure-ssh.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/alpine/98-collect-garbage.sh b/packer/alpine/98-collect-garbage.sh new file mode 100644 index 00000000..90a37560 --- /dev/null +++ b/packer/alpine/98-collect-garbage.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# Cleans APK caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -f /etc/motd + +rm -rf /var/cache/apk/* +rm -rf /context/ + +sync diff --git a/packer/alpine/alpine.init b/packer/alpine/alpine.init new file mode 100644 index 00000000..83c3fc41 --- /dev/null +++ b/packer/alpine/alpine.init @@ -0,0 +1,57 @@ +#!/bin/ash + +# set root password +passwd root <<'EOF' +opennebula +opennebula +EOF + +cat > answers.txt < /dev/vda + +sed -i '/cdrom\|usbdisk/d' /mnt/etc/fstab + +# enable haveged to prevent slow boot due missing to entropy +chroot /mnt apk --no-cache add haveged +chroot /mnt rc-update add haveged default + +# enable root+password login temporarily (will be disabled in post-processing) +echo 'PermitRootLogin yes' >> /mnt/etc/ssh/sshd_config + +reboot diff --git a/packer/alpine/alpine.pkr.hcl b/packer/alpine/alpine.pkr.hcl new file mode 100644 index 00000000..a96620d5 --- /dev/null +++ b/packer/alpine/alpine.pkr.hcl @@ -0,0 +1,66 @@ +source "qemu" "alpine" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.alpine, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.alpine, var.version, {}), "iso_checksum", "") + + headless = var.headless + + http_directory = "${var.input_dir}" + boot_command = [ + "root", + "ifconfig eth0 up && udhcpc -i eth0", + "wget -qO alpine.init http://{{ .HTTPIP }}:{{ .HTTPPort }}/alpine.init", + "/bin/ash alpine.init" + ] + boot_wait = "20s" + + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + disk_size = 256 + format = "qcow2" + + output_directory = "${var.output_dir}" + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"] + ] + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.alpine"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "{{.Vars}} ash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/alpine/plugins.pkr.hcl b/packer/alpine/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/alpine/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/alpine/variables.pkr.hcl b/packer/alpine/variables.pkr.hcl new file mode 100644 index 00000000..280e0530 --- /dev/null +++ b/packer/alpine/variables.pkr.hcl @@ -0,0 +1,38 @@ +variable "appliance_name" { + type = string + default = "alpine" +} + +variable "version" { + type = string + default = "316" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "alpine" { + type = map(map(string)) + + default = { + "316" = { + iso_url = "https://dl-cdn.alpinelinux.org/alpine/v3.16/releases/x86_64/alpine-virt-3.16.7-x86_64.iso" + iso_checksum = "6b447e9b2e2ca561c01b03a7b21b6839c718ed85323d2d100ff2e10ea5191470" + } + + "317" = { + iso_url = "https://dl-cdn.alpinelinux.org/alpine/v3.17/releases/x86_64/alpine-virt-3.17.5-x86_64.iso" + iso_checksum = "d3aec585da8327095edb37b4b7b5eed4623a993196edf12e74641ee5f16291f6" + } + } +} diff --git a/packer/alt/10-upgrade-distro.sh b/packer/alt/10-upgrade-distro.sh new file mode 100644 index 00000000..9d333a5c --- /dev/null +++ b/packer/alt/10-upgrade-distro.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +apt-get update -y + +apt-get install -y --fix-broken + +# Ensure packages needed for post-processing scripts do exist. +apt-get install -y curl gawk grep jq sed + +sync diff --git a/packer/alt/11-update-grub.sh b/packer/alt/11-update-grub.sh new file mode 100644 index 00000000..19b70f28 --- /dev/null +++ b/packer/alt/11-update-grub.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# Drop unwanted. + +gawk -i inplace -f- /etc/sysconfig/grub2 <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "crashkernel=no") } +{ print } +EOF + +# Ensure required. + +gawk -i inplace -f- /etc/sysconfig/grub2 <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF + +gawk -i inplace -f- /etc/sysconfig/grub2 <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +# Cleanup. + +gawk -i inplace -f- /etc/sysconfig/grub2 <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF + +INITRAMFS_IMG=$(find /boot/ -maxdepth 1 -name 'vmlinuz-*' | sort -V | tail -1) +INITRAMFS_VER=$(sed -e 's/^.*vmlinuz-//' <<< "$INITRAMFS_IMG") +make-initrd -k "$INITRAMFS_VER" + +update-grub + +sync diff --git a/packer/alt/80-install-context.sh b/packer/alt/80-install-context.sh new file mode 100644 index 00000000..606eb025 --- /dev/null +++ b/packer/alt/80-install-context.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_INFIX:=alt}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +if ! stat /context/one-context*$CTX_INFIX*; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(test(\"$CTX_INFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +apt-get remove --purge -y cloud-init +apt-get install -y /context/one-context*$CTX_INFIX* haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +# - Fix Alt favour of NM by removing etcnet plugin and pointing to correct sysctl.conf +# - Disable Alt custom resolv.conf generation +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + apt-get install -y NetworkManager + systemctl enable NetworkManager.service + sed -i -e 's/^\(\s*plugins\s*\)=.*/\1=keyfile/' /etc/NetworkManager/NetworkManager.conf + sed -i -e 's#^\(\s*NM_SYSCTL_CONF\)=.*#\1=/etc/sysctl.conf#' /etc/sysconfig/NetworkManager + systemctl mask altlinux-libresolv + systemctl mask altlinux-libresolv.path + systemctl mask altlinux-openresolv + systemctl mask altlinux-openresolv.path + systemctl mask altlinux-simpleresolv + systemctl mask altlinux-simpleresolv.path +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/alt/81-configure-ssh.sh b/packer/alt/81-configure-ssh.sh new file mode 100644 index 00000000..96d2f13b --- /dev/null +++ b/packer/alt/81-configure-ssh.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/openssh/sshd_config <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/openssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/openssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/alt/98-collect-garbage.sh b/packer/alt/98-collect-garbage.sh new file mode 100644 index 00000000..cd522fdc --- /dev/null +++ b/packer/alt/98-collect-garbage.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Cleans APT caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +apt-get remove --purge -y cloud-init fwupd + +apt-get autoremove -y + +apt-get clean -y && rm -rf /var/lib/apt/lists/* + +install -d /var/lib/apt/lists/partial/ + +rm -rf /context/ + +sync diff --git a/packer/alt/alt.pkr.hcl b/packer/alt/alt.pkr.hcl new file mode 100644 index 00000000..a716d9f6 --- /dev/null +++ b/packer/alt/alt.pkr.hcl @@ -0,0 +1,76 @@ +# Build cloud init iso +source "file" "user_data" { + source = "${var.input_dir}/cloud-init.yml" + target = "${var.input_dir}/${var.appliance_name}-userdata" +} + +build { + sources = ["sources.file.user_data"] + + provisioner "shell-local" { + inline = [ + "cloud-localds ${var.input_dir}/${var.appliance_name}-cloud-init.iso ${var.input_dir}/${var.appliance_name}-userdata", + ] + } +} + +# Build VM image +source "qemu" "alt" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.alt, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.alt, var.version, {}), "iso_checksum", "") + + headless = var.headless + + disk_image = true + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = var.output_dir + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"], + ["-cdrom", "${var.input_dir}/${var.appliance_name}-cloud-init.iso"] + ] + + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.alt"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "{{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/alt/cloud-init.yml b/packer/alt/cloud-init.yml new file mode 100644 index 00000000..b0dd9625 --- /dev/null +++ b/packer/alt/cloud-init.yml @@ -0,0 +1,22 @@ +#cloud-config +growpart: + mode: auto + devices: [/] + +users: + - name: root + lock_passwd: false + hashed_passwd: $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 + +disable_root: false +ssh_pwauth: true + +runcmd: + - | + gawk -i inplace -f- /etc/openssh/sshd_config <<'EOF' + BEGIN { update = "PermitRootLogin yes" } + /^#*PermitRootLogin/ { $0 = update; found = 1 } + { print } + END { if (!found) print update >>FILENAME } + EOF + - systemctl reload sshd diff --git a/packer/alt/plugins.pkr.hcl b/packer/alt/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/alt/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/alt/variables.pkr.hcl b/packer/alt/variables.pkr.hcl new file mode 100644 index 00000000..e4ed7c54 --- /dev/null +++ b/packer/alt/variables.pkr.hcl @@ -0,0 +1,38 @@ +variable "appliance_name" { + type = string + default = "alt" +} + +variable "version" { + type = string + default = "9" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "alt" { + type = map(map(string)) + + default = { + "9" = { + iso_url = "https://mirror.yandex.ru/altlinux/p9/images/cloud/x86_64/alt-p9-cloud-x86_64.qcow2" + iso_checksum = "f3837a01518003f4ecaeca4148c3a1c5904a4657f72d9b55d6e8bd0903ca270f" + } + + "10" = { + iso_url = "https://mirror.yandex.ru/altlinux/p10/images/cloud/x86_64/alt-p10-cloud-x86_64.qcow2" + iso_checksum = "c20730ca87b8cb026ced7dd254abce05cd0deb33f60f4dab6c17968f8bc968d5" + } + } +} diff --git a/packer/amazon/10-upgrade-distro.sh b/packer/amazon/10-upgrade-distro.sh new file mode 100644 index 00000000..14dd2197 --- /dev/null +++ b/packer/amazon/10-upgrade-distro.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +# NOTE: in this old version of OL, dnf is not available. + +yum update -y --skip-broken + +yum upgrade -y util-linux + +# Ensure packages needed for post-processing scripts do exist. +yum install -y curl gawk grep jq sed + +sync diff --git a/packer/amazon/11-update-grub.sh b/packer/amazon/11-update-grub.sh new file mode 100644 index 00000000..de8372fe --- /dev/null +++ b/packer/amazon/11-update-grub.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# NOTE: in this old version of OL, gawk does not understand +# the "-i inplace" option. + +# Drop unwanted. + +gawk -f- /etc/default/grub >/etc/default/grub.new <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "crashkernel=no") } +{ print } +EOF +mv /etc/default/grub{.new,} + +# Ensure required. + +gawk -f- /etc/default/grub >/etc/default/grub.new <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF +mv /etc/default/grub{.new,} + +gawk -f- /etc/default/grub >/etc/default/grub.new <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF +mv /etc/default/grub{.new,} + +# Cleanup. + +gawk -f- /etc/default/grub >/etc/default/grub.new <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF +mv /etc/default/grub{.new,} + +yum install -y dracut-config-generic dracut-network + +INITRAMFS_IMG=$(find /boot/ -maxdepth 1 -name 'initramfs-*.img' ! -name '*rescue*' ! -name '*kdump*' | sort -V | tail -1) +INITRAMFS_VER=$(sed -e 's/^.*initramfs-//' -e 's/\.img$//' <<< "$INITRAMFS_IMG") +dracut --force "$INITRAMFS_IMG" "$INITRAMFS_VER" + +grub2-mkconfig -o /boot/grub2/grub.cfg + +sync diff --git a/packer/amazon/80-install-context.sh b/packer/amazon/80-install-context.sh new file mode 100644 index 00000000..c9789ec7 --- /dev/null +++ b/packer/amazon/80-install-context.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el7.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +yum install -y /context/one-context*$CTX_SUFFIX open-vm-tools + +sync diff --git a/packer/amazon/81-configure-ssh.sh b/packer/amazon/81-configure-ssh.sh new file mode 100644 index 00000000..686a18d1 --- /dev/null +++ b/packer/amazon/81-configure-ssh.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +# NOTE: in this old version of OL, gawk does not understand +# the "-i inplace" option. + +gawk -f- /etc/ssh/sshd_config >/etc/ssh/sshd_config.new <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF +mv /etc/ssh/sshd_config{.new,} + +gawk -f- /etc/ssh/sshd_config >/etc/ssh/sshd_config.new <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF +mv /etc/ssh/sshd_config{.new,} + +gawk -f- /etc/ssh/sshd_config >/etc/ssh/sshd_config.new <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF +mv /etc/ssh/sshd_config{.new,} + +sync diff --git a/packer/amazon/98-collect-garbage.sh b/packer/amazon/98-collect-garbage.sh new file mode 100644 index 00000000..80efba61 --- /dev/null +++ b/packer/amazon/98-collect-garbage.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# Cleans YUM caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +systemctl mask gssproxy.service + +package-cleanup --oldkernels --count=1 -y + +yum remove -y NetworkManager +yum remove -y fwupd linux-firmware + +yum clean -y all + +rm -rf /context/ + +sync diff --git a/packer/amazon/amazon.pkr.hcl b/packer/amazon/amazon.pkr.hcl new file mode 100644 index 00000000..b97adeda --- /dev/null +++ b/packer/amazon/amazon.pkr.hcl @@ -0,0 +1,76 @@ +# Build cloud init iso +source "file" "user_data" { + source = "${var.input_dir}/cloud-init.yml" + target = "${var.input_dir}/${var.appliance_name}-userdata" +} + +build { + sources = ["sources.file.user_data"] + + provisioner "shell-local" { + inline = [ + "cloud-localds ${var.input_dir}/${var.appliance_name}-cloud-init.iso ${var.input_dir}/${var.appliance_name}-userdata", + ] + } +} + +# Build VM image +source "qemu" "amazon" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.amazon, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.amazon, var.version, {}), "iso_checksum", "") + + headless = var.headless + + disk_image = true + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = var.output_dir + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"], + ["-cdrom", "${var.input_dir}/${var.appliance_name}-cloud-init.iso"] + ] + + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.amazon"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "sudo -iu root {{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/amazon/cloud-init.yml b/packer/amazon/cloud-init.yml new file mode 100644 index 00000000..a2e97b34 --- /dev/null +++ b/packer/amazon/cloud-init.yml @@ -0,0 +1,22 @@ +#cloud-config +growpart: + mode: auto + devices: [/] + +users: + - name: root + lock_passwd: false + hashed_passwd: $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 + +disable_root: false +ssh_pwauth: true + +runcmd: + - | + gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' + BEGIN { update = "PermitRootLogin yes" } + /^#*PermitRootLogin/ { $0 = update; found = 1 } + { print } + END { if (!found) print update >>FILENAME } + EOF + - systemctl reload sshd diff --git a/packer/amazon/plugins.pkr.hcl b/packer/amazon/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/amazon/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/amazon/variables.pkr.hcl b/packer/amazon/variables.pkr.hcl new file mode 100644 index 00000000..3c8ca0ed --- /dev/null +++ b/packer/amazon/variables.pkr.hcl @@ -0,0 +1,34 @@ +variable "appliance_name" { + type = string + default = "amazon" +} + +variable "version" { + type = string + default = "2" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "amazon" { + type = map(map(string)) + + default = { + "2" = { + # navigate via https://cdn.amazonlinux.com/os-images/latest/kvm/ + iso_url = "https://cdn.amazonlinux.com/os-images/2.0.20231020.1/kvm/amzn2-kvm-2.0.20231020.1-x86_64.xfs.gpt.qcow2" + iso_checksum = "01d411368e724b6bc5fa448c4a97cc7641fcf0da6e8bba00543310681fa2cd2a" + } + } +} diff --git a/packer/build.sh b/packer/build.sh new file mode 100755 index 00000000..6b31a357 --- /dev/null +++ b/packer/build.sh @@ -0,0 +1,27 @@ +#!/bin/bash +DISTRO_NAME=$1 # e.g. debian +DISTRO_VER=$2 # e.g. 11 +DISTRO=${DISTRO_NAME}${DISTRO_VER} # e.g. debian11 +DST=$3 # e.g. export/debian11-6.6.1-1.qcow2 +INPUT_DIR="$(dirname "$0")/$DISTRO_NAME" # e.g. packer/debian +OUTPUT_DIR="$DIR_BUILD/$DISTRO" # e.g. build/debian11 (working dir) +mkdir -p "$OUTPUT_DIR" + +packer init "$INPUT_DIR" + +packer build -force \ + -var "appliance_name=${DISTRO}" \ + -var "version=${DISTRO_VER}" \ + -var "input_dir=${INPUT_DIR}" \ + -var "output_dir=${OUTPUT_DIR}" \ + -var "headless=${PACKER_HEADLESS}" \ + "$INPUT_DIR" # loads all *.pkr.hcl from dir + +# delete potential temporary cloud-init files +rm -f "$INPUT_DIR"/"$DISTRO_NAME"-{cloud-init.iso,userdata} ||: + +# convert working image to the destination +qemu-img convert -c -O qcow2 "$OUTPUT_DIR/$DISTRO" "$DST" + +# delete workig directory +rm -rf "$OUTPUT_DIR" diff --git a/packer/centos/10-upgrade-distro.sh.7 b/packer/centos/10-upgrade-distro.sh.7 new file mode 100644 index 00000000..354325ce --- /dev/null +++ b/packer/centos/10-upgrade-distro.sh.7 @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +# NOTE: in this "ancient" version of OL, dnf is not available. + +yum install -y epel-release yum-utils + +yum update -y --skip-broken + +# Ensure packages needed for post-processing scripts do exist. +yum install -y curl gawk grep jq sed + +sync diff --git a/packer/centos/10-upgrade-distro.sh.8stream b/packer/centos/10-upgrade-distro.sh.8stream new file mode 100644 index 00000000..6a40e288 --- /dev/null +++ b/packer/centos/10-upgrade-distro.sh.8stream @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +dnf install -y epel-release + +dnf update -y --skip-broken + +# Ensure packages needed for post-processing scripts do exist. +dnf install -y curl gawk grep jq sed + +sync diff --git a/packer/centos/11-update-grub.sh.7 b/packer/centos/11-update-grub.sh.7 new file mode 100644 index 00000000..a3ac2151 --- /dev/null +++ b/packer/centos/11-update-grub.sh.7 @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# NOTE: in this old version of CL, gawk does not understand +# the "-i inplace" option. + +# Drop unwanted. + +gawk -f- /etc/default/grub >/etc/default/grub.new <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "crashkernel=no") } +{ print } +EOF +mv /etc/default/grub{.new,} + +# Ensure required. + +gawk -f- /etc/default/grub >/etc/default/grub.new <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF +mv /etc/default/grub{.new,} + +gawk -f- /etc/default/grub >/etc/default/grub.new <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF +mv /etc/default/grub{.new,} + +# Cleanup. + +gawk -f- /etc/default/grub >/etc/default/grub.new <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF +mv /etc/default/grub{.new,} + +yum install -y dracut-config-generic dracut-network + +INITRAMFS_IMG=$(find /boot/ -maxdepth 1 -name 'initramfs-*.img' ! -name '*rescue*' ! -name '*kdump*' | sort -V | tail -1) +INITRAMFS_VER=$(sed -e 's/^.*initramfs-//' -e 's/\.img$//' <<< "$INITRAMFS_IMG") +dracut --force "$INITRAMFS_IMG" "$INITRAMFS_VER" + +grub2-mkconfig -o /boot/grub2/grub.cfg + +sync diff --git a/packer/centos/11-update-grub.sh.8stream b/packer/centos/11-update-grub.sh.8stream new file mode 100644 index 00000000..b979e882 --- /dev/null +++ b/packer/centos/11-update-grub.sh.8stream @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# Drop unwanted. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "crashkernel=no") } +{ print } +EOF + +# Ensure required. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF + +gawk -i inplace -f- /etc/default/grub <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +# Cleanup. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF + +dnf install -y dracut-config-generic dracut-network + +INITRAMFS_IMG=$(find /boot/ -maxdepth 1 -name 'initramfs-*.img' ! -name '*rescue*' ! -name '*kdump*' | sort -V | tail -1) +INITRAMFS_VER=$(sed -e 's/^.*initramfs-//' -e 's/\.img$//' <<< "$INITRAMFS_IMG") +dracut --force "$INITRAMFS_IMG" "$INITRAMFS_VER" + +grub2-mkconfig -o /boot/grub2/grub.cfg + +sync diff --git a/packer/centos/80-install-context.sh.7 b/packer/centos/80-install-context.sh.7 new file mode 100644 index 00000000..834877d0 --- /dev/null +++ b/packer/centos/80-install-context.sh.7 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el7.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +yum install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + yum install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/centos/80-install-context.sh.8stream b/packer/centos/80-install-context.sh.8stream new file mode 100644 index 00000000..9fcf01bc --- /dev/null +++ b/packer/centos/80-install-context.sh.8stream @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el8.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +dnf install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + dnf install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/centos/81-configure-ssh.sh.7 b/packer/centos/81-configure-ssh.sh.7 new file mode 100644 index 00000000..7c8825eb --- /dev/null +++ b/packer/centos/81-configure-ssh.sh.7 @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +# NOTE: in this old version of CL, gawk does not understand +# the "-i inplace" option. + +gawk -f- /etc/ssh/sshd_config >/etc/ssh/sshd_config.new <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF +mv /etc/ssh/sshd_config{.new,} + +gawk -f- /etc/ssh/sshd_config >/etc/ssh/sshd_config.new <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF +mv /etc/ssh/sshd_config{.new,} + +gawk -f- /etc/ssh/sshd_config >/etc/ssh/sshd_config.new <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF +mv /etc/ssh/sshd_config{.new,} + +sync diff --git a/packer/centos/81-configure-ssh.sh.8stream b/packer/centos/81-configure-ssh.sh.8stream new file mode 100644 index 00000000..ca55c22b --- /dev/null +++ b/packer/centos/81-configure-ssh.sh.8stream @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/centos/98-collect-garbage.sh.7 b/packer/centos/98-collect-garbage.sh.7 new file mode 100644 index 00000000..07ebb9fe --- /dev/null +++ b/packer/centos/98-collect-garbage.sh.7 @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Cleans DNF caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +systemctl disable kdump.service + +package-cleanup --oldkernels --count=1 -y + +yum remove -y fwupd linux-firmware + +yum clean -y all + +rm -rf /context/ + +sync diff --git a/packer/centos/98-collect-garbage.sh.8stream b/packer/centos/98-collect-garbage.sh.8stream new file mode 100644 index 00000000..ea5f3442 --- /dev/null +++ b/packer/centos/98-collect-garbage.sh.8stream @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Cleans DNF caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +systemctl disable kdump.service + +# Remove old kernels. +dnf remove -y $(dnf repoquery --installonly --latest-limit=-1 -q) + +dnf remove -y fwupd linux-firmware + +dnf clean -y all + +rm -rf /boot/*-rescue-* +rm -rf /context/ + +sync diff --git a/packer/centos/centos.pkr.hcl b/packer/centos/centos.pkr.hcl new file mode 100644 index 00000000..e1fb7557 --- /dev/null +++ b/packer/centos/centos.pkr.hcl @@ -0,0 +1,76 @@ +# Build cloud init iso +source "file" "user_data" { + source = "${var.input_dir}/cloud-init.yml" + target = "${var.input_dir}/${var.appliance_name}-userdata" +} + +build { + sources = ["sources.file.user_data"] + + provisioner "shell-local" { + inline = [ + "cloud-localds ${var.input_dir}/${var.appliance_name}-cloud-init.iso ${var.input_dir}/${var.appliance_name}-userdata", + ] + } +} + +# Build VM image +source "qemu" "centos" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.centos, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.centos, var.version, {}), "iso_checksum", "") + + headless = var.headless + + disk_image = true + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = var.output_dir + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"], + ["-cdrom", "${var.input_dir}/${var.appliance_name}-cloud-init.iso"] + ] + + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.centos"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "sudo -iu root {{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/centos/cloud-init.yml b/packer/centos/cloud-init.yml new file mode 100644 index 00000000..a2e97b34 --- /dev/null +++ b/packer/centos/cloud-init.yml @@ -0,0 +1,22 @@ +#cloud-config +growpart: + mode: auto + devices: [/] + +users: + - name: root + lock_passwd: false + hashed_passwd: $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 + +disable_root: false +ssh_pwauth: true + +runcmd: + - | + gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' + BEGIN { update = "PermitRootLogin yes" } + /^#*PermitRootLogin/ { $0 = update; found = 1 } + { print } + END { if (!found) print update >>FILENAME } + EOF + - systemctl reload sshd diff --git a/packer/centos/plugins.pkr.hcl b/packer/centos/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/centos/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/centos/variables.pkr.hcl b/packer/centos/variables.pkr.hcl new file mode 100644 index 00000000..6f4e23ed --- /dev/null +++ b/packer/centos/variables.pkr.hcl @@ -0,0 +1,38 @@ +variable "appliance_name" { + type = string + default = "centos" +} + +variable "version" { + type = string + default = "7" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "centos" { + type = map(map(string)) + + default = { + "7" = { + iso_url = "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2c" + iso_checksum = "8540fcfb73b41d2322644b7c4301b52cb1753c6daf9539866214d725870db673" + } + + "8stream" = { + iso_url = "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-latest.x86_64.qcow2" + iso_checksum = "95438a1d3c781871a4bfdedf1d234967f9a70a1119fedbc52c405cb0e7e8a9ad" + } + } +} diff --git a/packer/debian/10-upgrade-distro.sh b/packer/debian/10-upgrade-distro.sh new file mode 100644 index 00000000..9fa165a2 --- /dev/null +++ b/packer/debian/10-upgrade-distro.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +policy_rc_d_disable() (echo "exit 101" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) +policy_rc_d_enable() (echo "exit 0" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +apt-get update -y + +policy_rc_d_disable + +apt-get install -y --fix-broken + +apt-get upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +# Ensure packages needed for post-processing scripts do exist. +apt-get install -y curl gawk grep jq + +policy_rc_d_enable + +sync diff --git a/packer/debian/11-update-grub.sh b/packer/debian/11-update-grub.sh new file mode 100644 index 00000000..f65d0902 --- /dev/null +++ b/packer/debian/11-update-grub.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# Drop unwanted. + +# NOTE: console=ttyS*, earlyprintk=ttyS* may cause kernel panic during first boot. +# The exact problem is identical to https://github.com/dmacvicar/terraform-provider-libvirt/issues/948. +# A correct workaround is described here: https://bugs.launchpad.net/ubuntu/+source/cloud-initramfs-tools/+bug/1123220. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_TERMINAL=/ { gsub(/\/, "") } +{ print } +EOF + +# Ensure required. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF + +gawk -i inplace -f- /etc/default/grub <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +# Cleanup. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF + +update-initramfs -vu +update-grub2 + +sync diff --git a/packer/debian/80-install-context.sh.10 b/packer/debian/80-install-context.sh.10 new file mode 100644 index 00000000..67fd6e25 --- /dev/null +++ b/packer/debian/80-install-context.sh.10 @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.deb}" + +policy_rc_d_disable() (echo "exit 101" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) +policy_rc_d_enable() (echo "exit 0" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + apt-get install -y qemu-guest-agent open-vm-tools ruby virt-what parted + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +policy_rc_d_disable + +dpkg -i /context/one-context*$CTX_SUFFIX || apt-get install -y -f +dpkg -i /context/one-context*$CTX_SUFFIX + +apt-get install -y haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! dpkg-query -W --showformat '${Version}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + apt-get install -y --no-install-recommends --no-install-suggests netplan.io network-manager +fi +# <<< Apply only on one-context >= 6.1 <<< + +policy_rc_d_enable + +sync diff --git a/packer/debian/80-install-context.sh.11 b/packer/debian/80-install-context.sh.11 new file mode 100644 index 00000000..67fd6e25 --- /dev/null +++ b/packer/debian/80-install-context.sh.11 @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.deb}" + +policy_rc_d_disable() (echo "exit 101" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) +policy_rc_d_enable() (echo "exit 0" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + apt-get install -y qemu-guest-agent open-vm-tools ruby virt-what parted + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +policy_rc_d_disable + +dpkg -i /context/one-context*$CTX_SUFFIX || apt-get install -y -f +dpkg -i /context/one-context*$CTX_SUFFIX + +apt-get install -y haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! dpkg-query -W --showformat '${Version}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + apt-get install -y --no-install-recommends --no-install-suggests netplan.io network-manager +fi +# <<< Apply only on one-context >= 6.1 <<< + +policy_rc_d_enable + +sync diff --git a/packer/debian/80-install-context.sh.12 b/packer/debian/80-install-context.sh.12 new file mode 100644 index 00000000..b73b9437 --- /dev/null +++ b/packer/debian/80-install-context.sh.12 @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.deb}" + +policy_rc_d_disable() (echo "exit 101" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) +policy_rc_d_enable() (echo "exit 0" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +policy_rc_d_disable + +dpkg -i /context/one-context*$CTX_SUFFIX || apt-get install -y -f +dpkg -i /context/one-context*$CTX_SUFFIX + +apt-get install -y haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! dpkg-query -W --showformat '${Version}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + apt-get install -y --no-install-recommends --no-install-suggests netplan.io network-manager + systemctl enable systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +policy_rc_d_enable + +sync diff --git a/packer/debian/81-configure-ssh.sh b/packer/debian/81-configure-ssh.sh new file mode 100644 index 00000000..ca55c22b --- /dev/null +++ b/packer/debian/81-configure-ssh.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/debian/98-collect-garbage.sh b/packer/debian/98-collect-garbage.sh new file mode 100644 index 00000000..2cd3ffe9 --- /dev/null +++ b/packer/debian/98-collect-garbage.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# Cleans APT caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +apt-get purge -y cloud-init fwupd snapd + +apt-get autoremove -y + +apt-get clean -y && rm -rf /var/lib/apt/lists/* + +rm -f /etc/hostname +rm -f /etc/network/cloud-ifupdown-helper +rm -f /etc/network/cloud-interfaces-template +rm -f /etc/network/if-post-down.d/cloud_inet6 +rm -f /etc/network/if-pre-up.d/cloud_inet6 +rm -f /etc/udev/rules.d/75-cloud-ifupdown.rules + +rm -rf /context/ + +sync diff --git a/packer/debian/cloud-init.yml b/packer/debian/cloud-init.yml new file mode 100644 index 00000000..066fcc74 --- /dev/null +++ b/packer/debian/cloud-init.yml @@ -0,0 +1,16 @@ +#cloud-config +growpart: + mode: auto + devices: [/] + +users: + - name: root + lock_passwd: false + hashed_passwd: $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 + +disable_root: false +ssh_pwauth: true + +runcmd: + - perl -i -pe 's/# *PermitRootLogin .*/PermitRootLogin yes/' /etc/ssh/sshd_config + - systemctl reload sshd diff --git a/packer/debian/debian.pkr.hcl b/packer/debian/debian.pkr.hcl new file mode 100644 index 00000000..0279af56 --- /dev/null +++ b/packer/debian/debian.pkr.hcl @@ -0,0 +1,75 @@ +# Build cloud init iso +source "file" "user_data" { + source = "${var.input_dir}/cloud-init.yml" + target = "${var.input_dir}/${var.appliance_name}-userdata" +} + +build { + sources = ["sources.file.user_data"] + + provisioner "shell-local" { + inline = [ + "cloud-localds ${var.input_dir}/${var.appliance_name}-cloud-init.iso ${var.input_dir}/${var.appliance_name}-userdata", + ] + } +} + +# Build VM image +source "qemu" "debian" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.debian, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.debian, var.version, {}), "iso_checksum", "") + + headless = var.headless + + disk_image = true + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = var.output_dir + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"], + ["-cdrom", "${var.input_dir}/${var.appliance_name}-cloud-init.iso"] + ] + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "600s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.debian"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "sudo -iu root {{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/debian/plugins.pkr.hcl b/packer/debian/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/debian/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/debian/variables.pkr.hcl b/packer/debian/variables.pkr.hcl new file mode 100644 index 00000000..6138979c --- /dev/null +++ b/packer/debian/variables.pkr.hcl @@ -0,0 +1,41 @@ +variable "appliance_name" { + type = string + default = "debian" +} + +variable "version" { + type = string + default = "10" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "debian" { + type = map(map(string)) + + default = { + "10" = { + iso_url = "https://cdimage.debian.org/cdimage/cloud/buster/latest/debian-10-generic-amd64.qcow2" + iso_checksum = "a6293eb7c80ca12cc0c458a540ba11c677e15480f460ad1a271aacda41881687c0486dd80cb5ead7382daa9a93ce6252c72bd5b93a8c44144fc44209a18ac682" + } + "11" = { + iso_url = "https://cdimage.debian.org/cdimage/cloud/bullseye/latest/debian-11-generic-amd64.qcow2" + iso_checksum = "78fe9e9a71fa2d63715a2e156939964b45cfaa5c91b634af1b5a06fa359dd612f027332f65319ec08d4aa204672df95a75812d7a6a016659112b931b4d94f6b6" + } + "12" = { + iso_url = "https://cdimage.debian.org/cdimage/cloud/bookworm/latest/debian-12-generic-amd64.qcow2" + iso_checksum = "b2ddc01e8d13dabbcfde6661541aae92219be2d442653950f0e44613ddebaeb80dc7a83e0202c5509c5e72f4bd1f4edee4c83f35191f2562b3f31e20e9e87ec2" + } + } +} diff --git a/packer/devuan/10-upgrade-distro.sh.3 b/packer/devuan/10-upgrade-distro.sh.3 new file mode 100644 index 00000000..3836ee24 --- /dev/null +++ b/packer/devuan/10-upgrade-distro.sh.3 @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +policy_rc_d_disable() (echo "exit 101" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) +policy_rc_d_enable() (echo "exit 0" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +sed -i '/^deb cdrom/d' /etc/apt/sources.list +echo "deb http://deb.devuan.org/merged beowulf main" >> /etc/apt/sources.list +echo "deb http://deb.devuan.org/merged beowulf-updates main" >> /etc/apt/sources.list +echo "deb http://deb.devuan.org/merged beowulf-security main" >> /etc/apt/sources.list + +apt-get update -y + +policy_rc_d_disable + +debconf-set-selections <<< 'grub-pc grub-pc/install_devices multiselect /dev/sda' + +apt-get install -y --fix-broken + +apt-get upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +# Ensure packages needed for post-processing scripts do exist. +apt-get install -y curl gawk grep jq + +policy_rc_d_enable + +ln -sf /usr/share/zoneinfo/UTC /etc/localtime + +sync diff --git a/packer/devuan/10-upgrade-distro.sh.4 b/packer/devuan/10-upgrade-distro.sh.4 new file mode 100644 index 00000000..74cc1720 --- /dev/null +++ b/packer/devuan/10-upgrade-distro.sh.4 @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +policy_rc_d_disable() (echo "exit 101" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) +policy_rc_d_enable() (echo "exit 0" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +sed -i '/^deb cdrom/d' /etc/apt/sources.list +echo "deb http://deb.devuan.org/merged chimaera main" >> /etc/apt/sources.list +echo "deb http://deb.devuan.org/merged chimaera-updates main" >> /etc/apt/sources.list +echo "deb http://deb.devuan.org/merged chimaera-security main" >> /etc/apt/sources.list + +apt-get update -y + +policy_rc_d_disable + +debconf-set-selections <<< 'grub-pc grub-pc/install_devices multiselect /dev/sda' + +apt-get install -y --fix-broken + +apt-get upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +# Ensure packages needed for post-processing scripts do exist. +apt-get install -y curl gawk grep jq + +policy_rc_d_enable + +ln -sf /usr/share/zoneinfo/UTC /etc/localtime + +sync diff --git a/packer/devuan/11-update-grub.sh b/packer/devuan/11-update-grub.sh new file mode 100644 index 00000000..f65d0902 --- /dev/null +++ b/packer/devuan/11-update-grub.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# Drop unwanted. + +# NOTE: console=ttyS*, earlyprintk=ttyS* may cause kernel panic during first boot. +# The exact problem is identical to https://github.com/dmacvicar/terraform-provider-libvirt/issues/948. +# A correct workaround is described here: https://bugs.launchpad.net/ubuntu/+source/cloud-initramfs-tools/+bug/1123220. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_TERMINAL=/ { gsub(/\/, "") } +{ print } +EOF + +# Ensure required. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF + +gawk -i inplace -f- /etc/default/grub <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +# Cleanup. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF + +update-initramfs -vu +update-grub2 + +sync diff --git a/packer/devuan/80-install-context.sh b/packer/devuan/80-install-context.sh new file mode 100644 index 00000000..4ae221e3 --- /dev/null +++ b/packer/devuan/80-install-context.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.deb}" + +policy_rc_d_disable() (echo "exit 101" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) +policy_rc_d_enable() (echo "exit 0" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +policy_rc_d_disable + +dpkg -i /context/one-context*$CTX_SUFFIX || apt-get install -y -f +dpkg -i /context/one-context*$CTX_SUFFIX + +apt-get install -y open-vm-tools + +policy_rc_d_enable + +sync diff --git a/packer/devuan/81-configure-ssh.sh b/packer/devuan/81-configure-ssh.sh new file mode 100644 index 00000000..ca55c22b --- /dev/null +++ b/packer/devuan/81-configure-ssh.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/devuan/98-collect-garbage.sh b/packer/devuan/98-collect-garbage.sh new file mode 100644 index 00000000..7a4a409d --- /dev/null +++ b/packer/devuan/98-collect-garbage.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Cleans APT caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +apt-get purge -y cloud-init fwupd + +apt-get autoremove -y + +apt-get clean -y && rm -rf /var/lib/apt/lists/* + +rm -rf /context/ + +sync diff --git a/packer/devuan/devuan.pkr.hcl b/packer/devuan/devuan.pkr.hcl new file mode 100644 index 00000000..b4b97aca --- /dev/null +++ b/packer/devuan/devuan.pkr.hcl @@ -0,0 +1,61 @@ +source "qemu" "devuan" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.devuan, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.devuan, var.version, {}), "iso_checksum", "") + + headless = var.headless + + http_directory = "${var.input_dir}" + boot_command = [" auto=true url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/${var.appliance_name}.preseed hostname=localhost domain=localdomain interface=auto "] + boot_wait = "10s" + + disk_cache = "unsafe" + disk_interface = "virtio-scsi" + net_device = "virtio-net" + disk_size = 4096 + format = "qcow2" + + output_directory = "${var.output_dir}" + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"] + ] + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.devuan"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "{{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/devuan/devuan3.preseed b/packer/devuan/devuan3.preseed new file mode 100644 index 00000000..716d250c --- /dev/null +++ b/packer/devuan/devuan3.preseed @@ -0,0 +1,54 @@ +popularity-contest popularity-contest/participate boolean false + +d-i debian-installer/language string en +d-i debian-installer/locale string en_US +d-i debian-installer/country string US + +d-i keyboard-configuration/xkb-keymap select us + +d-i netcfg/choose_interface select auto +d-i netcfg/wireless_wep string + +d-i clock-setup/utc boolean true +d-i time/zone string US/Eastern +d-i clock-setup/ntp boolean true + +d-i partman/confirm boolean true +d-i partman/confirm_nooverwrite boolean true +d-i partman/choose_partition select finish +d-i partman-basicfilesystems/no_swap boolean false +d-i partman-auto/method string regular +d-i partman-auto/disk string /dev/sda +d-i partman-auto/choose_recipe select root-only +d-i partman-auto/expert_recipe string \ +root-only :: 2048 50 -1 ext4 \ + $primary{ } $bootable{ } \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ / } \ + . + +d-i apt-setup/cdrom/set-first boolean false +d-i apt-setup/disable-cdrom-entries boolean true +d-i apt-setup/use_mirror boolean false + +d-i tasksel/first multiselect none +d-i pkgsel/run_tasksel boolean false +d-i pkgsel/include string openssh-server +d-i pkgsel/upgrade select safe-upgrade + +d-i passwd/make-user boolean false +d-i passwd/root-login boolean true +d-i passwd/root-password password opennebula +d-i passwd/root-password-again password opennebula + +d-i preseed/late_command string \ +in-target sed -i 's/^#PermitRootLogin .*/PermitRootLogin yes/' /etc/ssh/sshd_config + +d-i choose-init/select_init select sysvinit +d-i choose-init/selected_sysvinit bool true + +d-i grub-installer/only_debian boolean true +d-i grub-installer/bootdev string /dev/sda + +d-i finish-install/reboot_in_progress note diff --git a/packer/devuan/devuan4.preseed b/packer/devuan/devuan4.preseed new file mode 100644 index 00000000..a8a6251d --- /dev/null +++ b/packer/devuan/devuan4.preseed @@ -0,0 +1,53 @@ +popularity-contest popularity-contest/participate boolean false + +d-i debian-installer/language string en +d-i debian-installer/locale string en_US +d-i debian-installer/country string US + +d-i keyboard-configuration/xkb-keymap select us + +d-i netcfg/choose_interface select auto +d-i netcfg/wireless_wep string + +d-i clock-setup/utc boolean true +d-i time/zone string US/Eastern +d-i clock-setup/ntp boolean true + +d-i partman/confirm boolean true +d-i partman/confirm_nooverwrite boolean true +d-i partman/choose_partition select finish +d-i partman-basicfilesystems/no_swap boolean false +d-i partman-auto/method string regular +d-i partman-auto/disk string /dev/sda +d-i partman-auto/choose_recipe select root-only +d-i partman-auto/expert_recipe string \ +root-only :: 2048 50 -1 ext4 \ + $primary{ } $bootable{ } \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ / } \ + . + +d-i apt-setup/cdrom/set-first boolean false +d-i apt-setup/disable-cdrom-entries boolean true +d-i apt-setup/use_mirror boolean false + +d-i pkgsel/run_tasksel boolean false +d-i pkgsel/include string openssh-server +d-i pkgsel/upgrade select safe-upgrade + +d-i passwd/make-user boolean false +d-i passwd/root-login boolean true +d-i passwd/root-password password opennebula +d-i passwd/root-password-again password opennebula + +d-i preseed/late_command string \ +in-target sed -i 's/^#PermitRootLogin .*/PermitRootLogin yes/' /etc/ssh/sshd_config + +d-i choose-init/select_init select sysvinit +d-i choose-init/selected_sysvinit bool true + +d-i grub-installer/only_debian boolean true +d-i grub-installer/bootdev string /dev/sda + +d-i finish-install/reboot_in_progress note diff --git a/packer/devuan/plugins.pkr.hcl b/packer/devuan/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/devuan/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/devuan/variables.pkr.hcl b/packer/devuan/variables.pkr.hcl new file mode 100644 index 00000000..d31d6bf3 --- /dev/null +++ b/packer/devuan/variables.pkr.hcl @@ -0,0 +1,38 @@ +variable "appliance_name" { + type = string + default = "devuan" +} + +variable "version" { + type = string + default = "3" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "devuan" { + type = map(map(string)) + + default = { + "3" = { + iso_url = "https://files.devuan.org/devuan_beowulf/installer-iso/devuan_beowulf_3.1.1_amd64_server.iso" + iso_checksum = "e6e3fc1bdbf626a871d8c27608129c4788623121c8ea059f60607a93c30892de" + } + + "4" = { + iso_url = "https://files.devuan.org/devuan_chimaera/installer-iso/devuan_chimaera_4.0.0_amd64_server.iso" + iso_checksum = "b2c0d159e9d7219422ef9e40673c3126aee118b57df79484384e7995abd2ba0f" + } + } +} diff --git a/packer/fedora/10-upgrade-distro.sh b/packer/fedora/10-upgrade-distro.sh new file mode 100644 index 00000000..f73792ff --- /dev/null +++ b/packer/fedora/10-upgrade-distro.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +dnf update -y --skip-broken + +# Ensure packages needed for post-processing scripts do exist. +dnf install -y curl gawk grep jq sed + +sync diff --git a/packer/fedora/11-update-grub.sh b/packer/fedora/11-update-grub.sh new file mode 100644 index 00000000..b979e882 --- /dev/null +++ b/packer/fedora/11-update-grub.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# Drop unwanted. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "crashkernel=no") } +{ print } +EOF + +# Ensure required. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF + +gawk -i inplace -f- /etc/default/grub <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +# Cleanup. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF + +dnf install -y dracut-config-generic dracut-network + +INITRAMFS_IMG=$(find /boot/ -maxdepth 1 -name 'initramfs-*.img' ! -name '*rescue*' ! -name '*kdump*' | sort -V | tail -1) +INITRAMFS_VER=$(sed -e 's/^.*initramfs-//' -e 's/\.img$//' <<< "$INITRAMFS_IMG") +dracut --force "$INITRAMFS_IMG" "$INITRAMFS_VER" + +grub2-mkconfig -o /boot/grub2/grub.cfg + +sync diff --git a/packer/fedora/12-selinux-fixfiles.sh b/packer/fedora/12-selinux-fixfiles.sh new file mode 100644 index 00000000..cd46b328 --- /dev/null +++ b/packer/fedora/12-selinux-fixfiles.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +sed -i -e 's:^SELINUX=.*:SELINUX=permissive:' /etc/selinux/config +fixfiles -F onboot +reboot diff --git a/packer/fedora/13-selinux-enforcing.sh b/packer/fedora/13-selinux-enforcing.sh new file mode 100644 index 00000000..7a9f8f03 --- /dev/null +++ b/packer/fedora/13-selinux-enforcing.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +sed -i -e 's:^SELINUX=.*:SELINUX=enforcing:' /etc/selinux/config diff --git a/packer/fedora/80-install-context.sh b/packer/fedora/80-install-context.sh new file mode 100644 index 00000000..e6c1793e --- /dev/null +++ b/packer/fedora/80-install-context.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el8.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +dnf install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then +# >>> Apply only on one-context >= 6.1 >>> + dnf install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd + + systemctl enable systemd-networkd + + # This is a workaround for systemd-networkd-wait-online timeout when networkd not used. + # Although this effectively breaks reaching network.target correctly, it is still better + # not to slowdown the boot by (120s) timeout as networkd is rather marginal in RHEL. + systemctl disable systemd-networkd-wait-online + +# <<< Apply only on one-context >= 6.1 <<< +else + systemctl enable network +fi + +sync diff --git a/packer/fedora/81-configure-ssh.sh b/packer/fedora/81-configure-ssh.sh new file mode 100644 index 00000000..057bf3fd --- /dev/null +++ b/packer/fedora/81-configure-ssh.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/ssh/sshd_config /etc/ssh/sshd_config.d/50-redhat.conf <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +rm -rf /etc/ssh/sshd_config.d/50-cloud-init.conf + +sync diff --git a/packer/fedora/98-collect-garbage.sh b/packer/fedora/98-collect-garbage.sh new file mode 100644 index 00000000..2daaf112 --- /dev/null +++ b/packer/fedora/98-collect-garbage.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# Cleans DNF caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +# Remove old kernels. +dnf remove -y $(dnf repoquery --installonly --latest-limit=-1 -q) + +dnf remove -y fwupd linux-firmware + +dnf clean -y all + +rm -rf /context/ + +sync diff --git a/packer/fedora/cloud-init.yml b/packer/fedora/cloud-init.yml new file mode 100644 index 00000000..b59cd2e1 --- /dev/null +++ b/packer/fedora/cloud-init.yml @@ -0,0 +1,23 @@ +#cloud-config +growpart: + mode: auto + devices: [/] + +disable_root: false + +users: + - name: root + lock_passwd: false + hashed_passwd: $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 + +ssh_pwauth: true + +runcmd: + - | + gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' + BEGIN { update = "PermitRootLogin yes" } + /^#*PermitRootLogin/ { $0 = update; found = 1 } + { print } + END { if (!found) print update >>FILENAME } + EOF + - systemctl reload sshd diff --git a/packer/fedora/fedora.pkr.hcl b/packer/fedora/fedora.pkr.hcl new file mode 100644 index 00000000..a5f68290 --- /dev/null +++ b/packer/fedora/fedora.pkr.hcl @@ -0,0 +1,75 @@ +# Build cloud init iso +source "file" "user_data" { + source = "${var.input_dir}/cloud-init.yml" + target = "${var.input_dir}/${var.appliance_name}-userdata" +} + +build { + sources = ["sources.file.user_data"] + + provisioner "shell-local" { + inline = [ + "cloud-localds ${var.input_dir}/${var.appliance_name}-cloud-init.iso ${var.input_dir}/${var.appliance_name}-userdata", + ] + } +} + +# Build VM image +source "qemu" "fedora" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.fedora, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.fedora, var.version, {}), "iso_checksum", "") + + headless = var.headless + + disk_image = true + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = var.output_dir + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"], + ["-cdrom", "${var.input_dir}/${var.appliance_name}-cloud-init.iso"] + ] + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "600s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.fedora"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "sudo -iu root {{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/fedora/plugins.pkr.hcl b/packer/fedora/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/fedora/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/fedora/variables.pkr.hcl b/packer/fedora/variables.pkr.hcl new file mode 100644 index 00000000..78b04946 --- /dev/null +++ b/packer/fedora/variables.pkr.hcl @@ -0,0 +1,37 @@ +variable "appliance_name" { + type = string + default = "fedora" +} + +variable "version" { + type = string + default = "37" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "fedora" { + type = map(map(string)) + + default = { + "37" = { + iso_url = "https://download.fedoraproject.org/pub/fedora/linux/releases/37/Cloud/x86_64/images/Fedora-Cloud-Base-37-1.7.x86_64.qcow2" + iso_checksum = "b5b9bec91eee65489a5745f6ee620573b23337cbb1eb4501ce200b157a01f3a0" + } + "38" = { + iso_url = "https://download.fedoraproject.org/pub/fedora/linux/releases/38/Cloud/x86_64/images/Fedora-Cloud-Base-38-1.6.x86_64.qcow2" + iso_checksum = "d334670401ff3d5b4129fcc662cf64f5a6e568228af59076cc449a4945318482" + } + } +} diff --git a/packer/freebsd/freebsd.pkr.hcl b/packer/freebsd/freebsd.pkr.hcl new file mode 100644 index 00000000..72ea51c3 --- /dev/null +++ b/packer/freebsd/freebsd.pkr.hcl @@ -0,0 +1,50 @@ +# Build VM image +source "qemu" "freebsd" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.freebsd, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.freebsd, var.version, {}), "iso_checksum", "") + + headless = var.headless + + boot_wait = "45s" + boot_command = lookup(var.boot_cmd, var.version, []) + + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = var.output_dir + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"] + ] + + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.freebsd"] + + # be carefull with shell inline provisioners, FreeBSD csh is tricky + provisioner "shell" { + execute_command = "chmod +x {{ .Path }}; env {{ .Vars }} {{ .Path }}" + scripts = ["${var.input_dir}/mkdir"] + } + + provisioner "file" { + destination = "/tmp/context" + source = "context-linux/out/" + } + + provisioner "shell" { + execute_command = "chmod +x {{ .Path }}; env {{ .Vars }} {{ .Path }}" + scripts = ["${var.input_dir}/script.sh"] + } +} diff --git a/packer/freebsd/mkdir b/packer/freebsd/mkdir new file mode 100644 index 00000000..5ec04a17 --- /dev/null +++ b/packer/freebsd/mkdir @@ -0,0 +1,3 @@ +#!/bin/sh + +mkdir /tmp/context diff --git a/packer/freebsd/plugins.pkr.hcl b/packer/freebsd/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/freebsd/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/freebsd/script.sh b/packer/freebsd/script.sh new file mode 100644 index 00000000..954105db --- /dev/null +++ b/packer/freebsd/script.sh @@ -0,0 +1,50 @@ +#!/bin/sh + +set -ex + +# upgrade system +env PAGER=cat freebsd-update fetch --not-running-from-cron || : +freebsd-update install --not-running-from-cron || : + +# contextualize +export ASSUME_ALWAYS_YES=yes +pkg install -y curl bash sudo base64 ruby open-vm-tools-nox11 gawk virt-what isc-dhcp44-client +pkg install -y /tmp/context/one-context-[0-9]*.txz +pkg clean -ay + +# reconfigure SSH server +sed -i '' -e '/^[[:space:]]*PasswordAuthentication[[:space:]]/d' /etc/ssh/sshd_config +sed -i '' -e '/^[[:space:]]*ChallengeResponseAuthentication[[:space:]]/d' /etc/ssh/sshd_config +sed -i '' -e '/^[[:space:]]*PermitRootLogin[[:space:]]/d' /etc/ssh/sshd_config +sed -i '' -e '/^[[:space:]]*UseDNS[[:space:]]/d' /etc/ssh/sshd_config + +echo 'PasswordAuthentication no' >>/etc/ssh/sshd_config +echo 'ChallengeResponseAuthentication no' >>/etc/ssh/sshd_config +echo 'PermitRootLogin without-password' >>/etc/ssh/sshd_config +echo 'UseDNS no' >>/etc/ssh/sshd_config + +sysrc -f /boot/loader.conf autoboot_delay=3 beastie_disable=YES +sysrc sendmail_enable="NONE" +sysrc syslogd_flags="-ss" + +# Reconfigure for custom DHCP wrapper script +if [ -x /usr/sbin/one-dual-dhclient ]; then + sysrc dhclient_program="/usr/sbin/one-dual-dhclient" +fi + +# VMware +sysrc vmware_guest_kmod_enable=="YES" +sysrc vmware_guestd_enable="YES" + +pw user mod root -w no + +# cleanups +rm -rf /var/db/freebsd-update/* +rm -rf /var/db/pkg/repo-FreeBSD.sqlite +rm -rf /etc/ssh/ssh_host_* +rm -rf /tmp/context +[ -s /etc/machine-id ] && rm -f /etc/machine-id || true + +# zero free space +# dd if=/dev/zero of=/.zero bs=1m || : +# rm -rf /.zero diff --git a/packer/freebsd/variables.pkr.hcl b/packer/freebsd/variables.pkr.hcl new file mode 100644 index 00000000..ca568617 --- /dev/null +++ b/packer/freebsd/variables.pkr.hcl @@ -0,0 +1,142 @@ +variable "appliance_name" { + type = string + default = "freebsd" +} + +variable "version" { + type = string + default = "12" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "freebsd" { + type = map(map(string)) + + default = { + "12" = { + iso_url = "https://download.freebsd.org/ftp/releases/amd64/amd64/ISO-IMAGES/12.4/FreeBSD-12.4-RELEASE-amd64-disc1.iso" + iso_checksum = "606435637b76991f96df68f561badf03266f3d5452e9f72ed9b130d96b188800" + } + + "13" = { + iso_url = "https://download.freebsd.org/ftp/releases/amd64/amd64/ISO-IMAGES/13.2/FreeBSD-13.2-RELEASE-amd64-disc1.iso" + iso_checksum = "b76ab084e339ee05f59be81354c8cb7dfadf9518e0548f88017d2759a910f17c" + } + } +} + +variable "boot_cmd" { + type = map(list(string)) + + default = { + "12" = [ + "I", # Welcome: Install + "", # Keymap Selection: Continue with default + + "localhost", # Set hostname + "", + + "", # Distribution Select, OK + + "", # Partitioning Auto (UFS) + "E", # Entire Disk + "G", # GPT + "D", # Delete swap partition + "M", # Modify second partition + "rootfs", # Set rootfs label on root partition + "F", # Finish + "C", # Commit + + "", + + "opennebula", # Root password + "opennebula", + + "", # Network Configuration vtnet0 + "Y", # IPv4 yes + "Y", # DHCP yes + "N", # IPv6 no + "", # Resolver configuration + + "0", # Time Zone Selector: UTC + Time&Date + "Y", # Confirm + "S", # Skip date + "S", # Skip time + + "", # System Configuration, OK + "", # System Hardening, OK + "N", # Add User Accounts, no + "E", # Final Configuration, Exit + + "Y", # Manual configuration, Yes + "sed -i '' -e 's/^#PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config", + "sed -i '' -e 's/^#PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config", + "sed -i '' -e 's/^.*\\([[:space:]]\\/[[:space:]]\\)/\\/dev\\/gpt\\/rootfs\\1/' /etc/fstab", + "syncexit", + "R" # Complete: Reboot + ] + + "13" = [ + "I", # Welcome: Install + "", # Keymap Selection: Continue with default + + "localhost", # Set hostname + "", + + "", # Distribution Select + + "", # Partitioning, Auto (UFS) + "E", # Entire Disk + "G", # GPT + "D", # Delete swap partition + "M", # Modify second partition + "rootfs", # Set rootfs label on root p. + "F", # Finish + "C", # Commit + "", # Wait for base install + + "opennebula", # Root password + "opennebula", + + "", # Network, vtnet0 + "Y", # IPv4 yes + "Y", # DHCP yes + "N", # IPv6 no + "", # Resolver configuration + + "0", # Time zone selector + "Y", # UTC + "S", # Skip date + "S", # Skip time + + "", # System Configuration, OK + "", # System Hardening, OK + + "N", # Add User Accounts, no + "E", # Final Configuration, exit + "Y", # Yes + + # Manual Configuration + "sed -i '' -e 's/^#PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config", + "sed -i '' -e 's/^#PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config", + "sed -i '' -e 's/^.*\\([[:space:]]\\/[[:space:]]\\)/\\/dev\\/gpt\\/rootfs\\1/' /etc/fstab", + "syncexit", + + "R" # Complete: Reboot + ] + } +} + + diff --git a/packer/ol/10-upgrade-distro.sh.8 b/packer/ol/10-upgrade-distro.sh.8 new file mode 100644 index 00000000..941cfe63 --- /dev/null +++ b/packer/ol/10-upgrade-distro.sh.8 @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +dnf install -y oracle-epel-release-el8 + +dnf update -y --skip-broken + +# Ensure packages needed for post-processing scripts do exist. +dnf install -y curl gawk grep jq sed + +sync diff --git a/packer/ol/10-upgrade-distro.sh.9 b/packer/ol/10-upgrade-distro.sh.9 new file mode 100644 index 00000000..48614349 --- /dev/null +++ b/packer/ol/10-upgrade-distro.sh.9 @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +dnf install -y oracle-epel-release-el9 + +dnf update -y --skip-broken + +# Ensure packages needed for post-processing scripts do exist. +dnf install -y curl gawk grep jq sed + +sync diff --git a/packer/ol/11-update-grub.sh b/packer/ol/11-update-grub.sh new file mode 100644 index 00000000..684719ab --- /dev/null +++ b/packer/ol/11-update-grub.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# Drop unwanted. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "crashkernel=no") } +{ print } +EOF + +# Ensure required. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF + +gawk -i inplace -f- /etc/default/grub <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +# Cleanup. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF + +dnf install -y dracut-config-generic dracut-network + +INITRAMFS_IMG=$(find /boot/ -maxdepth 1 -name 'initramfs-*.img' ! -name '*rescue*' ! -name '*kdump*' | sort -V | tail -1) +INITRAMFS_VER=$(sed -e 's/^.*initramfs-//' -e 's/\.img$//' <<< "$INITRAMFS_IMG") +dracut --force "$INITRAMFS_IMG" "$INITRAMFS_VER" + +grub2-mkconfig -o /boot/grub2/grub.cfg + +sync + +reboot diff --git a/packer/ol/80-install-context.sh.8 b/packer/ol/80-install-context.sh.8 new file mode 100644 index 00000000..9fcf01bc --- /dev/null +++ b/packer/ol/80-install-context.sh.8 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el8.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +dnf install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + dnf install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/ol/80-install-context.sh.9 b/packer/ol/80-install-context.sh.9 new file mode 100644 index 00000000..0203743b --- /dev/null +++ b/packer/ol/80-install-context.sh.9 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el9.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +dnf install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + dnf install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/ol/81-configure-ssh.sh b/packer/ol/81-configure-ssh.sh new file mode 100644 index 00000000..ca55c22b --- /dev/null +++ b/packer/ol/81-configure-ssh.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/ol/98-collect-garbage.sh b/packer/ol/98-collect-garbage.sh new file mode 100644 index 00000000..ea5f3442 --- /dev/null +++ b/packer/ol/98-collect-garbage.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Cleans DNF caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +systemctl disable kdump.service + +# Remove old kernels. +dnf remove -y $(dnf repoquery --installonly --latest-limit=-1 -q) + +dnf remove -y fwupd linux-firmware + +dnf clean -y all + +rm -rf /boot/*-rescue-* +rm -rf /context/ + +sync diff --git a/packer/ol/cloud-init.yml b/packer/ol/cloud-init.yml new file mode 100644 index 00000000..a2e97b34 --- /dev/null +++ b/packer/ol/cloud-init.yml @@ -0,0 +1,22 @@ +#cloud-config +growpart: + mode: auto + devices: [/] + +users: + - name: root + lock_passwd: false + hashed_passwd: $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 + +disable_root: false +ssh_pwauth: true + +runcmd: + - | + gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' + BEGIN { update = "PermitRootLogin yes" } + /^#*PermitRootLogin/ { $0 = update; found = 1 } + { print } + END { if (!found) print update >>FILENAME } + EOF + - systemctl reload sshd diff --git a/packer/ol/ol.pkr.hcl b/packer/ol/ol.pkr.hcl new file mode 100644 index 00000000..a74bc8dd --- /dev/null +++ b/packer/ol/ol.pkr.hcl @@ -0,0 +1,76 @@ +# Build cloud init iso +source "file" "user_data" { + source = "${var.input_dir}/cloud-init.yml" + target = "${var.input_dir}/${var.appliance_name}-userdata" +} + +build { + sources = ["sources.file.user_data"] + + provisioner "shell-local" { + inline = [ + "cloud-localds ${var.input_dir}/${var.appliance_name}-cloud-init.iso ${var.input_dir}/${var.appliance_name}-userdata", + ] + } +} + +# Build VM image +source "qemu" "ol" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.ol, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.ol, var.version, {}), "iso_checksum", "") + + headless = var.headless + + disk_image = true + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = var.output_dir + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"], + ["-cdrom", "${var.input_dir}/${var.appliance_name}-cloud-init.iso"] + ] + + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.ol"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "sudo -iu root {{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/ol/plugins.pkr.hcl b/packer/ol/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/ol/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/ol/variables.pkr.hcl b/packer/ol/variables.pkr.hcl new file mode 100644 index 00000000..558c4f47 --- /dev/null +++ b/packer/ol/variables.pkr.hcl @@ -0,0 +1,38 @@ +variable "appliance_name" { + type = string + default = "ol" +} + +variable "version" { + type = string + default = "8" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "ol" { + type = map(map(string)) + + default = { + "8" = { + iso_url = "https://yum.oracle.com/templates/OracleLinux/OL8/u8/x86_64/OL8U8_x86_64-kvm-b198.qcow" + iso_checksum = "67b644451efe5c9c472820922085cb5112e305fedfb5edb1ab7020b518ba8c3b" + } + + "9" = { + iso_url = "https://yum.oracle.com/templates/OracleLinux/OL9/u2/x86_64/OL9U2_x86_64-kvm-b197.qcow" + iso_checksum = "840345cb866837ac7cc7c347cd9a8196c3a17e9c054c613eda8c2a912434c956" + } + } +} diff --git a/packer/opensuse/10-upgrade-distro.sh b/packer/opensuse/10-upgrade-distro.sh new file mode 100644 index 00000000..f2bfa1d4 --- /dev/null +++ b/packer/opensuse/10-upgrade-distro.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +xfs_growfs / + +echo 'hostonly="no"' >/etc/dracut.conf.d/02-generic-image.conf + +zypper --non-interactive --gpg-auto-import-keys update -y + +# Ensure packages needed for post-processing scripts do exist. +zypper --non-interactive install -y curl gawk grep jq sed + +sync diff --git a/packer/opensuse/11-update-grub.sh b/packer/opensuse/11-update-grub.sh new file mode 100644 index 00000000..98b58704 --- /dev/null +++ b/packer/opensuse/11-update-grub.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# Drop unwanted. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "crashkernel=no") } +{ print } +EOF + +# Ensure required. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF + +gawk -i inplace -f- /etc/default/grub <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +# Cleanup. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF + +grub2-mkconfig -o /boot/grub2/grub.cfg + +sync diff --git a/packer/opensuse/80-install-context.sh b/packer/opensuse/80-install-context.sh new file mode 100644 index 00000000..bec73838 --- /dev/null +++ b/packer/opensuse/80-install-context.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.suse.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +zypper --non-interactive --no-gpg-checks install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +sync diff --git a/packer/opensuse/81-configure-ssh.sh b/packer/opensuse/81-configure-ssh.sh new file mode 100644 index 00000000..ca55c22b --- /dev/null +++ b/packer/opensuse/81-configure-ssh.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/opensuse/98-collect-garbage.sh b/packer/opensuse/98-collect-garbage.sh new file mode 100644 index 00000000..894aae96 --- /dev/null +++ b/packer/opensuse/98-collect-garbage.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# Cleans ZYPPER caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +cp -f /etc/zypp/zypp.conf /etc/zypp/zypp.conf.bak +sed -i 's/^\(multiversion.kernels\s*=\).*$/\1latest/' /etc/zypp/zypp.conf +zypper -n purge-kernels +mv -f /etc/zypp/zypp.conf.bak /etc/zypp/zypp.conf + +zypper remove --clean-deps -y salt salt-minion ||: + +zypper clean --all + +rm -f /etc/hostname +rm -rf /context/ + +# Remove jeos-firstboot file +# https://github.com/openSUSE/jeos-firstboot +rm -f /var/lib/YaST2/reconfig_system + +sync diff --git a/packer/opensuse/cloud-init.yml b/packer/opensuse/cloud-init.yml new file mode 100644 index 00000000..a2e97b34 --- /dev/null +++ b/packer/opensuse/cloud-init.yml @@ -0,0 +1,22 @@ +#cloud-config +growpart: + mode: auto + devices: [/] + +users: + - name: root + lock_passwd: false + hashed_passwd: $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 + +disable_root: false +ssh_pwauth: true + +runcmd: + - | + gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' + BEGIN { update = "PermitRootLogin yes" } + /^#*PermitRootLogin/ { $0 = update; found = 1 } + { print } + END { if (!found) print update >>FILENAME } + EOF + - systemctl reload sshd diff --git a/packer/opensuse/opensuse.pkr.hcl b/packer/opensuse/opensuse.pkr.hcl new file mode 100644 index 00000000..33f78647 --- /dev/null +++ b/packer/opensuse/opensuse.pkr.hcl @@ -0,0 +1,78 @@ +# Build cloud init iso +source "file" "user_data" { + source = "${var.input_dir}/cloud-init.yml" + target = "${var.input_dir}/${var.appliance_name}-userdata" +} + +build { + sources = ["sources.file.user_data"] + + provisioner "shell-local" { + inline = [ + "cloud-localds ${var.input_dir}/${var.appliance_name}-cloud-init.iso ${var.input_dir}/${var.appliance_name}-userdata", + ] + } +} + +# Build VM image +source "qemu" "opensuse" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.opensuse, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.opensuse, var.version, {}), "iso_checksum", "") + + headless = var.headless + + disk_image = true + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = var.output_dir + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"], + ["-cdrom", "${var.input_dir}/${var.appliance_name}-cloud-init.iso"] + ] + + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.opensuse"] + + /* provisioner "shell" { inline = ["sleep 1000"] } */ + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "sudo -iu root {{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/opensuse/plugins.pkr.hcl b/packer/opensuse/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/opensuse/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/opensuse/variables.pkr.hcl b/packer/opensuse/variables.pkr.hcl new file mode 100644 index 00000000..2c902740 --- /dev/null +++ b/packer/opensuse/variables.pkr.hcl @@ -0,0 +1,33 @@ +variable "appliance_name" { + type = string + default = "opensuse" +} + +variable "version" { + type = string + default = "15" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "opensuse" { + type = map(map(string)) + + default = { + "15" = { + iso_url = "https://download.opensuse.org/distribution/leap/15.5/appliances/openSUSE-Leap-15.5-Minimal-VM.x86_64-Cloud.qcow2" + iso_checksum = "ac40aa1069b244c4c17272994e8a5325863f9945d199eff1e2ed1ba525b52541" + } + } +} diff --git a/packer/plugins.pkr.hcl b/packer/plugins.pkr.hcl new file mode 100644 index 00000000..9083f811 --- /dev/null +++ b/packer/plugins.pkr.hcl @@ -0,0 +1,8 @@ +packer { + required_plugins { + qemu = { + source = "github.com/hashicorp/qemu" + version = "~> 1" + } + } +} diff --git a/packer/postprocess.sh b/packer/postprocess.sh new file mode 100755 index 00000000..236283b0 --- /dev/null +++ b/packer/postprocess.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +timeout 5m virt-sysprep \ + --add ${OUTPUT_DIR}/${APPLIANCE_NAME} \ + --selinux-relabel \ + --root-password disabled \ + --hostname localhost.localdomain \ + --run-command 'truncate -s0 -c /etc/machine-id' \ + --delete /etc/resolv.conf + +# virt-sparsify was haning sometimes +for I in 1 2 3; do + timeout 5m virt-sparsify \ + --in-place ${OUTPUT_DIR}/${APPLIANCE_NAME} && break +done diff --git a/packer/rhel/10-upgrade-distro.sh.8 b/packer/rhel/10-upgrade-distro.sh.8 new file mode 100644 index 00000000..dc2e14bc --- /dev/null +++ b/packer/rhel/10-upgrade-distro.sh.8 @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +# Make sure /etc/machine-id exists otherwise this can happen: +# https://bugzilla.redhat.com/show_bug.cgi?id=1737355 +systemd-machine-id-setup + +ln -sf ../usr/share/zoneinfo/UTC /etc/localtime + +subscription-manager register \ + --username "${RHEL_USER}" \ + --password "${RHEL_PASSWORD}" \ + --auto-attach \ + --force + +subscription-manager repos \ + --enable codeready-builder-for-rhel-8-x86_64-rpms + +dnf install -y "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm" + +dnf repolist enabled + +dnf update -y + +# Ensure packages needed for post-processing scripts do exist. +dnf install -y curl gawk grep jq sed + +sync diff --git a/packer/rhel/10-upgrade-distro.sh.9 b/packer/rhel/10-upgrade-distro.sh.9 new file mode 100644 index 00000000..fb1ee877 --- /dev/null +++ b/packer/rhel/10-upgrade-distro.sh.9 @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +# Make sure /etc/machine-id exists otherwise this can happen: +# https://bugzilla.redhat.com/show_bug.cgi?id=1737355 +systemd-machine-id-setup + +ln -sf ../usr/share/zoneinfo/UTC /etc/localtime + +subscription-manager register \ + --username "${RHEL_USER}" \ + --password "${RHEL_PASSWORD}" \ + --auto-attach \ + --force + +subscription-manager repos \ + --enable codeready-builder-for-rhel-9-x86_64-rpms + +dnf install -y "https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm" + +dnf repolist enabled + +dnf update -y + +# Ensure packages needed for post-processing scripts do exist. +dnf install -y curl gawk grep jq sed + +sync diff --git a/packer/rhel/11-update-grub.sh b/packer/rhel/11-update-grub.sh new file mode 100644 index 00000000..684719ab --- /dev/null +++ b/packer/rhel/11-update-grub.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# Drop unwanted. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "crashkernel=no") } +{ print } +EOF + +# Ensure required. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF + +gawk -i inplace -f- /etc/default/grub <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +# Cleanup. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF + +dnf install -y dracut-config-generic dracut-network + +INITRAMFS_IMG=$(find /boot/ -maxdepth 1 -name 'initramfs-*.img' ! -name '*rescue*' ! -name '*kdump*' | sort -V | tail -1) +INITRAMFS_VER=$(sed -e 's/^.*initramfs-//' -e 's/\.img$//' <<< "$INITRAMFS_IMG") +dracut --force "$INITRAMFS_IMG" "$INITRAMFS_VER" + +grub2-mkconfig -o /boot/grub2/grub.cfg + +sync + +reboot diff --git a/packer/rhel/80-install-context.sh.8 b/packer/rhel/80-install-context.sh.8 new file mode 100644 index 00000000..9fcf01bc --- /dev/null +++ b/packer/rhel/80-install-context.sh.8 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el8.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +dnf install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + dnf install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/rhel/80-install-context.sh.9 b/packer/rhel/80-install-context.sh.9 new file mode 100644 index 00000000..0203743b --- /dev/null +++ b/packer/rhel/80-install-context.sh.9 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el9.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +dnf install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + dnf install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/rhel/81-configure-ssh.sh b/packer/rhel/81-configure-ssh.sh new file mode 100644 index 00000000..ca55c22b --- /dev/null +++ b/packer/rhel/81-configure-ssh.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/rhel/98-collect-garbage.sh b/packer/rhel/98-collect-garbage.sh new file mode 100644 index 00000000..7de58d3a --- /dev/null +++ b/packer/rhel/98-collect-garbage.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# Cleans DNF caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +systemctl disable kdump.service + +# Remove old kernels. +dnf remove -y $(dnf repoquery --installonly --latest-limit=-1 -q) + +dnf remove -y fwupd linux-firmware + +subscription-manager remove --all +subscription-manager unregister +subscription-manager clean + +dnf clean -y all + +rm -rf /boot/*-rescue-* +rm -rf /context/ + +sync diff --git a/packer/rhel/99-unsubscribe.sh b/packer/rhel/99-unsubscribe.sh new file mode 100644 index 00000000..c1e8f9b9 --- /dev/null +++ b/packer/rhel/99-unsubscribe.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +subscription-manager remove --all +subscription-manager unregister +subscription-manager clean diff --git a/packer/rhel/cloud-init.yml b/packer/rhel/cloud-init.yml new file mode 100644 index 00000000..a2e97b34 --- /dev/null +++ b/packer/rhel/cloud-init.yml @@ -0,0 +1,22 @@ +#cloud-config +growpart: + mode: auto + devices: [/] + +users: + - name: root + lock_passwd: false + hashed_passwd: $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 + +disable_root: false +ssh_pwauth: true + +runcmd: + - | + gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' + BEGIN { update = "PermitRootLogin yes" } + /^#*PermitRootLogin/ { $0 = update; found = 1 } + { print } + END { if (!found) print update >>FILENAME } + EOF + - systemctl reload sshd diff --git a/packer/rhel/plugins.pkr.hcl b/packer/rhel/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/rhel/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/rhel/rhel.pkr.hcl b/packer/rhel/rhel.pkr.hcl new file mode 100644 index 00000000..a617152b --- /dev/null +++ b/packer/rhel/rhel.pkr.hcl @@ -0,0 +1,82 @@ +# Build cloud init iso +source "file" "user_data" { + source = "${var.input_dir}/cloud-init.yml" + target = "${var.input_dir}/${var.appliance_name}-userdata" +} + +build { + sources = ["sources.file.user_data"] + + provisioner "shell-local" { + inline = [ + "cloud-localds ${var.input_dir}/${var.appliance_name}-cloud-init.iso ${var.input_dir}/${var.appliance_name}-userdata", + ] + } +} + +# Build VM image +source "qemu" "rhel" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.rhel, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.rhel, var.version, {}), "iso_checksum", "") + + headless = var.headless + + disk_image = true + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = var.output_dir + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"], + ["-cdrom", "${var.input_dir}/${var.appliance_name}-cloud-init.iso"] + ] + + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.rhel"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "sudo -iu root {{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + + environment_vars = [ + "RHEL_USER=${var.rhel_user}", + "RHEL_PASSWORD=${var.rhel_password}" + ] + + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/rhel/variables.pkr.hcl b/packer/rhel/variables.pkr.hcl new file mode 100644 index 00000000..e414a8db --- /dev/null +++ b/packer/rhel/variables.pkr.hcl @@ -0,0 +1,48 @@ +variable "appliance_name" { + type = string + default = "rhel" +} + +variable "version" { + type = string + default = "8" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "rhel_user" { + type = string + default = false +} + +variable "rhel_password" { + type = string + default = false +} + +variable "rhel" { + type = map(map(string)) + + default = { + "8" = { + iso_url = "" + iso_checksum = "" + } + + "9" = { + iso_url = "" + iso_checksum = "" + } + } +} diff --git a/packer/rocky/10-upgrade-distro.sh b/packer/rocky/10-upgrade-distro.sh new file mode 100644 index 00000000..6a40e288 --- /dev/null +++ b/packer/rocky/10-upgrade-distro.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +dnf install -y epel-release + +dnf update -y --skip-broken + +# Ensure packages needed for post-processing scripts do exist. +dnf install -y curl gawk grep jq sed + +sync diff --git a/packer/rocky/11-update-grub.sh b/packer/rocky/11-update-grub.sh new file mode 100644 index 00000000..b979e882 --- /dev/null +++ b/packer/rocky/11-update-grub.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# Drop unwanted. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "crashkernel=no") } +{ print } +EOF + +# Ensure required. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF + +gawk -i inplace -f- /etc/default/grub <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +# Cleanup. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF + +dnf install -y dracut-config-generic dracut-network + +INITRAMFS_IMG=$(find /boot/ -maxdepth 1 -name 'initramfs-*.img' ! -name '*rescue*' ! -name '*kdump*' | sort -V | tail -1) +INITRAMFS_VER=$(sed -e 's/^.*initramfs-//' -e 's/\.img$//' <<< "$INITRAMFS_IMG") +dracut --force "$INITRAMFS_IMG" "$INITRAMFS_VER" + +grub2-mkconfig -o /boot/grub2/grub.cfg + +sync diff --git a/packer/rocky/80-install-context.sh b/packer/rocky/80-install-context.sh new file mode 100644 index 00000000..0203743b --- /dev/null +++ b/packer/rocky/80-install-context.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el9.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +dnf install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + dnf install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/rocky/80-install-context.sh.8 b/packer/rocky/80-install-context.sh.8 new file mode 100644 index 00000000..9fcf01bc --- /dev/null +++ b/packer/rocky/80-install-context.sh.8 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el8.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +dnf install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + dnf install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/rocky/80-install-context.sh.9 b/packer/rocky/80-install-context.sh.9 new file mode 100644 index 00000000..0203743b --- /dev/null +++ b/packer/rocky/80-install-context.sh.9 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.el9.noarch.rpm}" + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +dnf install -y /context/one-context*$CTX_SUFFIX haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! rpm -q --queryformat '%{VERSION}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + dnf install -y --setopt=install_weak_deps=False NetworkManager systemd-networkd +fi +# <<< Apply only on one-context >= 6.1 <<< + +sync diff --git a/packer/rocky/81-configure-ssh.sh b/packer/rocky/81-configure-ssh.sh new file mode 100644 index 00000000..ca55c22b --- /dev/null +++ b/packer/rocky/81-configure-ssh.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/rocky/98-collect-garbage.sh b/packer/rocky/98-collect-garbage.sh new file mode 100644 index 00000000..ee5c456c --- /dev/null +++ b/packer/rocky/98-collect-garbage.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# Cleans DNF caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +kdump="kdump.service" +systemctl list-units --full -all | grep -Fq "$kdump" && systemctl disable "$kdump" + +# Remove old kernels. +dnf remove -y $(dnf repoquery --installonly --latest-limit=-1 -q) + +dnf remove -y fwupd linux-firmware + +dnf clean -y all + +rm -rf /boot/*-rescue-* +rm -rf /context/ + +sync diff --git a/packer/rocky/plugins.pkr.hcl b/packer/rocky/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/rocky/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/rocky/rocky.pkr.hcl b/packer/rocky/rocky.pkr.hcl new file mode 100644 index 00000000..ca549175 --- /dev/null +++ b/packer/rocky/rocky.pkr.hcl @@ -0,0 +1,60 @@ +source "qemu" "rocky" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.rocky, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.rocky, var.version, {}), "iso_checksum", "") + + headless = var.headless + + http_directory = "${var.input_dir}" + boot_command = [" append rd.live.check=0 inst.text inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/${var.appliance_name}.ks"] + boot_wait = "20s" + + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = "${var.output_dir}" + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"] + ] + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.rocky"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "sudo -iu root {{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/rocky/rocky8.ks b/packer/rocky/rocky8.ks new file mode 100644 index 00000000..d15382a1 --- /dev/null +++ b/packer/rocky/rocky8.ks @@ -0,0 +1,98 @@ +# more information is available at +# https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Installation_Guide/sect-kickstart-syntax.html +install +#cmdline + +# System authorization information +authconfig --enableshadow --passalgo=sha512 --enablefingerprint + +# Use net installation media +url --url="http://download.rockylinux.org/pub/rocky/8/BaseOS/x86_64/os/" +repo --name="BaseOS" --baseurl="http://download.rockylinux.org/pub/rocky/8/BaseOS/x86_64/os/" --cost=100 +repo --name="AppStream" --baseurl="http://download.rockylinux.org/pub/rocky/8/AppStream/x86_64/os/" --cost=100 +repo --name="extras" --baseurl="http://download.rockylinux.org/pub/rocky/8/extras/x86_64/os/" --cost=100 +#repo --name="updates" --baseurl="http://mirror.centos.org/centos/8/updates/x86_64/os/" --cost=100 + +# Run the Setup Agent on first boot +firstboot --disable + +# Keyboard layouts +keyboard --vckeymap=us --xlayouts='us' + +# System language +lang en_US.UTF-8 + +# Network information +network --bootproto=dhcp --device=enp0s3 --ipv6=auto --activate +# network --hostname=localhost.localdomain +firewall --disabled + +# Root password +rootpw --iscrypted $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 +#selinux --permissive +selinux + +# System services +services --disabled="kdump" +%addon com_redhat_kdump --disable +%end + +# System timezone +timezone UTC --isUtc + +# System bootloader configuration +bootloader --location=mbr --timeout=1 +zerombr + +clearpart --all --initlabel +part / --fstype xfs --size=1024 --grow + +# Reboot the machine after successful installation +reboot --eject +#poweroff + +%post --erroronfail +yum -C -y remove linux-firmware +%end + +%packages --ignoremissing --excludedocs +@core +#deltarpm +openssh-clients +NetworkManager +dracut-config-generic +kernel +-firewalld +-aic94xx-firmware +-alsa-firmware +-alsa-lib +-alsa-tools-firmware +-biosdevname +-iprutils +#-linux-firmware +-ivtv-firmware +-iwl100-firmware +-iwl1000-firmware +-iwl105-firmware +-iwl135-firmware +-iwl2000-firmware +-iwl2030-firmware +-iwl3160-firmware +-iwl3945-firmware +-iwl4965-firmware +-iwl5000-firmware +-iwl5150-firmware +-iwl6000-firmware +-iwl6000g2a-firmware +-iwl6000g2b-firmware +-iwl6050-firmware +-iwl7260-firmware +-iwl7265-firmware +-libertas-sd8686-firmware +-libertas-sd8787-firmware +-libertas-usb8388-firmware +-plymouth +-dracut-config-rescue +-kexec-tools +-microcode_ctl +%end diff --git a/packer/rocky/rocky9.ks b/packer/rocky/rocky9.ks new file mode 100644 index 00000000..886351cf --- /dev/null +++ b/packer/rocky/rocky9.ks @@ -0,0 +1,97 @@ +# more information is available at +# https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Installation_Guide/sect-kickstart-syntax.html +#cmdline + +# System authorization information +authconfig --enableshadow --passalgo=sha512 --enablefingerprint + +# Use net installation media +url --url="https://download.rockylinux.org/pub/rocky/9/BaseOS/x86_64/os/" +repo --name="BaseOS" --baseurl="https://download.rockylinux.org/pub/rocky/9/BaseOS/x86_64/os/" --cost=100 +repo --name="AppStream" --baseurl="https://download.rockylinux.org/pub/rocky/9/AppStream/x86_64/os/" --cost=100 +repo --name="extras" --baseurl="https://download.rockylinux.org/pub/rocky/9/extras/x86_64/os/" --cost=100 + +# Run the Setup Agent on first boot +firstboot --disable + +# Keyboard layouts +keyboard --vckeymap=us --xlayouts='us' + +# System language +lang en_US.UTF-8 + +# Network information +network --bootproto=dhcp --device=enp0s3 --ipv6=auto --activate +# network --hostname=localhost.localdomain +firewall --disabled + +# Root password +rootpw --iscrypted $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 +#selinux --permissive +selinux + +# System services +services --disabled="kdump" +%addon com_redhat_kdump --disable +%end + +# System timezone +timezone UTC --isUtc + +# System bootloader configuration +bootloader --location=mbr --timeout=1 +zerombr + +clearpart --all --initlabel +part / --fstype ext4 --size=1024 --grow + +# Reboot the machine after successful installation +reboot --eject +#poweroff + +%post --erroronfail +yum -C -y remove linux-firmware +echo "PermitRootLogin yes" > /etc/ssh/sshd_config.d/01-permitrootlogin.conf +%end + +%packages --ignoremissing --excludedocs +@core +#deltarpm +openssh-clients +NetworkManager +dracut-config-generic +kernel +-firewalld +-aic94xx-firmware +-alsa-firmware +-alsa-lib +-alsa-tools-firmware +-biosdevname +-iprutils +#-linux-firmware +-ivtv-firmware +-iwl100-firmware +-iwl1000-firmware +-iwl105-firmware +-iwl135-firmware +-iwl2000-firmware +-iwl2030-firmware +-iwl3160-firmware +-iwl3945-firmware +-iwl4965-firmware +-iwl5000-firmware +-iwl5150-firmware +-iwl6000-firmware +-iwl6000g2a-firmware +-iwl6000g2b-firmware +-iwl6050-firmware +-iwl7260-firmware +-iwl7265-firmware +-libertas-sd8686-firmware +-libertas-sd8787-firmware +-libertas-usb8388-firmware +-plymouth +-dracut-config-rescue +-kexec-tools +-microcode_ctl +%end diff --git a/packer/rocky/variables.pkr.hcl b/packer/rocky/variables.pkr.hcl new file mode 100644 index 00000000..a688fa9f --- /dev/null +++ b/packer/rocky/variables.pkr.hcl @@ -0,0 +1,38 @@ +variable "appliance_name" { + type = string + default = "rocky" +} + +variable "version" { + type = string + default = "8" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "rocky" { + type = map(map(string)) + + default = { + "8" = { + iso_url = "https://download.rockylinux.org/pub/rocky/8/isos/x86_64/Rocky-x86_64-boot.iso" + iso_checksum = "96c9d96c33ebacc8e909dcf8abf067b6bb30588c0c940a9c21bb9b83f3c99868" + } + + "9" = { + iso_url = "https://download.rockylinux.org/pub/rocky/9/isos/x86_64/Rocky-x86_64-boot.iso" + iso_checksum = "11e42da96a7b336de04e60d05e54a22999c4d7f3e92c19ebf31f9c71298f5b42" + } + } +} diff --git a/packer/ubuntu/10-upgrade-distro.sh b/packer/ubuntu/10-upgrade-distro.sh new file mode 100644 index 00000000..9fa165a2 --- /dev/null +++ b/packer/ubuntu/10-upgrade-distro.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and upgrades +# the distro. + +policy_rc_d_disable() (echo "exit 101" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) +policy_rc_d_enable() (echo "exit 0" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +apt-get update -y + +policy_rc_d_disable + +apt-get install -y --fix-broken + +apt-get upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +# Ensure packages needed for post-processing scripts do exist. +apt-get install -y curl gawk grep jq + +policy_rc_d_enable + +sync diff --git a/packer/ubuntu/11-update-grub.sh b/packer/ubuntu/11-update-grub.sh new file mode 100644 index 00000000..f65d0902 --- /dev/null +++ b/packer/ubuntu/11-update-grub.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +# Sets kernel command line (net.ifnames=0 is particularily important), +# then updates initramfs/initrd and grub2. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +rm -rf /etc/default/grub.d/ + +# Drop unwanted. + +# NOTE: console=ttyS*, earlyprintk=ttyS* may cause kernel panic during first boot. +# The exact problem is identical to https://github.com/dmacvicar/terraform-provider-libvirt/issues/948. +# A correct workaround is described here: https://bugs.launchpad.net/ubuntu/+source/cloud-initramfs-tools/+bug/1123220. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_CMDLINE_LINUX[^=]*=/ { gsub(/\/, "") } +/^GRUB_TERMINAL=/ { gsub(/\/, "") } +{ print } +EOF + +# Ensure required. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +/^GRUB_CMDLINE_LINUX=/ { found = 1 } +/^GRUB_CMDLINE_LINUX=/ && !/net.ifnames=0/ { gsub(/"$/, " net.ifnames=0\"") } +/^GRUB_CMDLINE_LINUX=/ && !/biosdevname=0/ { gsub(/"$/, " biosdevname=0\"") } +{ print } +END { if (!found) print "GRUB_CMDLINE_LINUX=\" net.ifnames=0 biosdevname=0\"" >> FILENAME } +EOF + +gawk -i inplace -f- /etc/default/grub <<'EOF' +BEGIN { update = "GRUB_TIMEOUT=0" } +/^GRUB_TIMEOUT=/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +# Cleanup. + +gawk -i inplace -f- /etc/default/grub <<'EOF' +{ gsub(/(" *| *")/, "\""); gsub(/ */, " ") } +{ print } +EOF + +update-initramfs -vu +update-grub2 + +sync diff --git a/packer/ubuntu/80-install-context.sh b/packer/ubuntu/80-install-context.sh new file mode 100644 index 00000000..b0b36971 --- /dev/null +++ b/packer/ubuntu/80-install-context.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# Downloads and installs the latest one-context package. + +: "${CTX_SUFFIX:=.deb}" + +policy_rc_d_disable() (echo "exit 101" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) +policy_rc_d_enable() (echo "exit 0" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +if ! stat /context/one-context*$CTX_SUFFIX; then ( + install -d /context/ && cd /context/ + curl -fsSL https://api.github.com/repos/OpenNebula/addon-context-linux/releases \ + | jq -r ".[0].assets[].browser_download_url | select(endswith(\"$CTX_SUFFIX\"))" \ + | xargs -r -n1 curl -fsSLO +) fi + +policy_rc_d_disable + +dpkg -i /context/one-context*$CTX_SUFFIX || apt-get install -y -f +dpkg -i /context/one-context*$CTX_SUFFIX + +apt-get install -y haveged open-vm-tools + +systemctl enable haveged + +# >>> Apply only on one-context >= 6.1 >>> +if ! dpkg-query -W --showformat '${Version}' one-context | grep -E '^([1-5]\.|6\.0\.)'; then + apt-get install -y --no-install-recommends --no-install-suggests netplan.io network-manager +fi +# <<< Apply only on one-context >= 6.1 <<< + +policy_rc_d_enable + +sync diff --git a/packer/ubuntu/81-configure-ssh.sh b/packer/ubuntu/81-configure-ssh.sh new file mode 100644 index 00000000..ca55c22b --- /dev/null +++ b/packer/ubuntu/81-configure-ssh.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Configures critical settings for OpenSSH server. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PasswordAuthentication no" } +/^[#\s]*PasswordAuthentication\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "PermitRootLogin without-password" } +/^[#\s]*PermitRootLogin\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' +BEGIN { update = "UseDNS no" } +/^[#\s]*UseDNS\s*/ { $0 = update; found = 1 } +{ print } +END { if (!found) print update >> FILENAME } +EOF + +sync diff --git a/packer/ubuntu/98-collect-garbage.sh b/packer/ubuntu/98-collect-garbage.sh new file mode 100644 index 00000000..3c0bab43 --- /dev/null +++ b/packer/ubuntu/98-collect-garbage.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Cleans APT caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +apt-get purge -y cloud-init snapd fwupd + +apt-get autoremove -y --purge + +apt-get clean -y && rm -rf /var/lib/apt/lists/* + +rm -f /etc/sysctl.d/99-cloudimg-ipv6.conf + +rm -rf /context/ + +sync diff --git a/packer/ubuntu/cloud-init.yml b/packer/ubuntu/cloud-init.yml new file mode 100644 index 00000000..a2e97b34 --- /dev/null +++ b/packer/ubuntu/cloud-init.yml @@ -0,0 +1,22 @@ +#cloud-config +growpart: + mode: auto + devices: [/] + +users: + - name: root + lock_passwd: false + hashed_passwd: $6$rounds=4096$2RFfXKGPKTcdF.CH$dzLlW9Pg1jbeojxRxEraHwEMAPAbpChBdrMFV1SOa6etSF2CYAe.hC1dRDM1icTOk7M4yhVS1BtwJjah9essD0 + +disable_root: false +ssh_pwauth: true + +runcmd: + - | + gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' + BEGIN { update = "PermitRootLogin yes" } + /^#*PermitRootLogin/ { $0 = update; found = 1 } + { print } + END { if (!found) print update >>FILENAME } + EOF + - systemctl reload sshd diff --git a/packer/ubuntu/plugins.pkr.hcl b/packer/ubuntu/plugins.pkr.hcl new file mode 120000 index 00000000..44663b58 --- /dev/null +++ b/packer/ubuntu/plugins.pkr.hcl @@ -0,0 +1 @@ +../plugins.pkr.hcl \ No newline at end of file diff --git a/packer/ubuntu/ubuntu.pkr.hcl b/packer/ubuntu/ubuntu.pkr.hcl new file mode 100644 index 00000000..a7233204 --- /dev/null +++ b/packer/ubuntu/ubuntu.pkr.hcl @@ -0,0 +1,76 @@ +# Build cloud init iso +source "file" "user_data" { + source = "${var.input_dir}/cloud-init.yml" + target = "${var.input_dir}/${var.appliance_name}-userdata" +} + +build { + sources = ["sources.file.user_data"] + + provisioner "shell-local" { + inline = [ + "cloud-localds ${var.input_dir}/${var.appliance_name}-cloud-init.iso ${var.input_dir}/${var.appliance_name}-userdata", + ] + } +} + +# Build VM image +source "qemu" "ubuntu" { + cpus = 2 + memory = 2048 + accelerator = "kvm" + + iso_url = lookup(lookup(var.ubuntu, var.version, {}), "iso_url", "") + iso_checksum = lookup(lookup(var.ubuntu, var.version, {}), "iso_checksum", "") + + headless = var.headless + + disk_image = true + disk_cache = "unsafe" + disk_interface = "virtio" + net_device = "virtio-net" + format = "qcow2" + + output_directory = var.output_dir + + qemuargs = [ ["-serial", "stdio"], + ["-cpu", "host"], + ["-cdrom", "${var.input_dir}/${var.appliance_name}-cloud-init.iso"] + ] + + ssh_username = "root" + ssh_password = "opennebula" + ssh_wait_timeout = "900s" + vm_name = "${var.appliance_name}" +} + +build { + sources = ["source.qemu.ubuntu"] + + provisioner "shell" { inline = ["mkdir /context"] } + + provisioner "file" { + source = "context-linux/out/" + destination = "/context" + } + + provisioner "shell" { + execute_command = "sudo -iu root {{.Vars}} bash {{.Path}}" + + # execute *.sh + *.sh. from input_dir + scripts = sort(concat( + [for s in fileset(".", "*.sh") : "${var.input_dir}/${s}"], + [for s in fileset(".", "*.sh.${var.version}") : "${var.input_dir}/${s}"] + )) + expect_disconnect = true + } + + post-processor "shell-local" { + execute_command = ["bash", "-c", "{{.Vars}} {{.Script}}"] + environment_vars = [ + "OUTPUT_DIR=${var.output_dir}", + "APPLIANCE_NAME=${var.appliance_name}", + ] + scripts = [ "packer/postprocess.sh" ] + } +} diff --git a/packer/ubuntu/variables.pkr.hcl b/packer/ubuntu/variables.pkr.hcl new file mode 100644 index 00000000..71316712 --- /dev/null +++ b/packer/ubuntu/variables.pkr.hcl @@ -0,0 +1,48 @@ +variable "appliance_name" { + type = string + default = "ubuntu" +} + +variable "version" { + type = string + default = "2004" +} + +variable "input_dir" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "headless" { + type = bool + default = false +} + +variable "ubuntu" { + type = map(map(string)) + + default = { + "2004" = { + iso_url = "https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img" + iso_checksum = "bfa805bde8f2d199b8e4a306a3a5823e18b1547833b90d60d8a689e7270e43ff" + } + + "2004min" = { + iso_url = "https://cloud-images.ubuntu.com/minimal/releases/focal/release/ubuntu-20.04-minimal-cloudimg-amd64.img" + iso_checksum = "a48ab165c635403c2481d372d9bc8996e7ec93750b3a475b048e861d1caba7aa" + } + + "2204" = { + iso_url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img" + iso_checksum = "6bb5247f87919b803c211afd1af74b3096be6e834dac29cfac711dad72eafea8" + } + + "2204min" = { + iso_url = "https://cloud-images.ubuntu.com/minimal/releases/jammy/release/ubuntu-22.04-minimal-cloudimg-amd64.img" + iso_checksum = "afb95ee9e75a46c0d987daae3db5d0d344770004bfa359b1775fcf22cd98ca27" + } + } +}