diff --git a/.vscode/launch.json b/.vscode/launch.json
index 6ae3651e..b5975014 100644
--- a/.vscode/launch.json
+++ b/.vscode/launch.json
@@ -22,12 +22,13 @@
"mode": "auto",
"program": "${workspaceFolder}/cmd/porch/main.go",
"args": [
- "--secure-port=9443",
+ "--secure-port=4443",
"--v=7",
"--standalone-debug-mode",
- "--kubeconfig=${workspaceFolder}/deployments/local/kubeconfig",
+ "--kubeconfig=${env:KUBECONFIG}",
"--cache-directory=${workspaceFolder}/.cache",
- "--function-runner=192.168.8.202:9445"
+ "--function-runner=172.18.255.201:9445",
+ "--repo-sync-frequency=60s"
],
"cwd": "${workspaceFolder}"
},
@@ -44,29 +45,6 @@
"--",
"namespace=foo"
]
- },
- {
- "name": "Launch kpt",
- "type": "go",
- "request": "launch",
- "mode": "auto",
- "program": "${workspaceFolder}/../main.go",
- "args": [
- "alpha", "rpkg", "get"
- ],
- "cwd": "${workspaceFolder}/.."
- },
- {
- "name": "Launch kpt e2e test",
- "type": "go",
- "request": "launch",
- "mode": "test",
- "program": "${workspaceFolder}/../e2e",
- "args": [
- "-v",
- "-test.run",
- "TestPorch"
- ]
}
]
}
\ No newline at end of file
diff --git a/Makefile b/Makefile
index f8acabb8..7d7711ca 100644
--- a/Makefile
+++ b/Makefile
@@ -159,6 +159,10 @@ generate: generate-api
tidy:
@for f in $(MODULES); do (cd $$f; echo "Tidying $$f"; go mod tidy) || exit 1; done
+.PHONY: test-e2e
+test-e2e:
+ E2E=1 go test -v -race --count=1 -failfast ./test/e2e
+
.PHONY: configure-git
configure-git:
git config --global --add user.name test
@@ -266,8 +270,8 @@ push-and-deploy: push-images deploy
# `push-and-deploy` etc.)
.PHONY: deployment-config-no-sa
deployment-config-no-sa:
- rm -rf $(DEPLOYCONFIG_NO_SA_DIR) || true
mkdir -p $(DEPLOYCONFIG_NO_SA_DIR)
+ find $(DEPLOYCONFIG_NO_SA_DIR) ! -name 'resourcegroup.yaml' -type f -exec rm -f {} +
./scripts/create-deployment-blueprint.sh \
--destination "$(DEPLOYCONFIG_NO_SA_DIR)" \
--server-image "$(IMAGE_REPO)/$(PORCH_SERVER_IMAGE):$(IMAGE_TAG)" \
diff --git a/deployments/local/porch-api-endpoints.yaml b/deployments/local/porch-api-endpoints.yaml
new file mode 100644
index 00000000..98abbde6
--- /dev/null
+++ b/deployments/local/porch-api-endpoints.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Endpoints
+metadata:
+ annotations:
+ config.kubernetes.io/target-path: 3-porch-server-endpoints.yaml
+ name: api
+ namespace: porch-system
+subsets:
+- addresses:
+ - ip: 172.17.0.1
+ ports:
+ - appProtocol: https
+ name: api
+ port: 4443
+ protocol: TCP
+ - name: webhooks
+ port: 8443
+ protocol: TCP
\ No newline at end of file
diff --git a/docs/tutorials/porch-development-environment/README.md b/docs/tutorials/porch-development-environment/README.md
index 99e021b8..7a1c2d41 100644
--- a/docs/tutorials/porch-development-environment/README.md
+++ b/docs/tutorials/porch-development-environment/README.md
@@ -1,372 +1,79 @@
-# Setting up a development environment for Porch
+# Table of contents
-This tutorial gives short instructions on how to set up a development environment for Porch. It outlines the steps to get a [kind](https://kind.sigs.k8s.io/) cluster up
-and running to which a Porch instance running in Visual Studio Code can connect to and interact with.
+- [Table of contents](#table-of-contents)
+- [Setting up the development environment for Porch](#setting-up-the-development-environment-for-porch)
+ * [Setup the environment everything automatically](#setup-the-environment-everything-automatically)
+ * [Configure VSCode to run the Porch (api)server](#configure-vscode-to-run-the-porch-apiserver)
+ * [Build the CLI](#build-the-cli)
+ * [Test that everything works as expected](#test-that-everything-works-as-expected)
+ + [Run the porch unit tests](#run-the-porch-unit-tests)
+ + [Run the end-to-end tests](#run-the-end-to-end-tests)
+- [Create Repositories using your local Porch server](#create-repositories-using-your-local-porch-server)
+- [Restart from scratch](#restart-from-scratch)
-> **_NOTE:_** The code itself can be run on a remote VM and we can use the [VSCode Remote SSH](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-ssh) plugin to connect to it as our Dev environment.
+# Setting up the development environment for Porch
-
+This tutorial gives short instructions on how to set up a development environment for Porch. It outlines the steps to get
+a [kind](https://kind.sigs.k8s.io/) cluster up and running to which a Porch instance running in Visual Studio Code can connect to and interact with.
+if you are not familiar with how porch works, it is highly recommended that you go through the [Starting with Porch tutorial](https://github.com/nephio-project/porch/tree/main/docs/tutorials/starting-with-porch) before going through this one.
-# Setup kind with MetalLB and Gitea
+> **_NOTE:_** As your Dev environment, you can run the code on a remote VM and use the [VSCode Remote SSH](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-ssh) plugin to connect to it.
-Follow steps 1-5 inclusive of the [Starting with Porch](https://github.com/nephio-project/porch/tree/main/docs/tutorials/starting-with-porch) tutorial. You now have the Kind cluster `management` running with Gitea installed on it. Gitea has the repository `management` defined.
+### Extra steps for MacOS users
+The script the `make deployment-config-no-sa` target to generate the deployment files for porch. The scripts called by this make target use recent `bash` additions and expect the gnu version of the `sed` command. MacOS comes with `bash` 3.x.x by default and with a version of `sed` that is incompatible with gnu `sed`.
-> **_NOTE:_** This [setup script](bin/setup.sh) automates steps 1-5 of the Starting with Porch tutorial. You may need to adapt this script to your local environment and also have [pre requisites](https://github.com/nephio-project/porch/tree/main/docs/tutorials/starting-with-porch#prerequisites) installed on the target machine.
+1. Install `bash` 4.x.x or better of `bash` using homebrew, see [this this post for details](https://apple.stackexchange.com/questions/193411/update-bash-to-version-4-0-on-osx)
+2. Install `gsed` using homebrew, see [this post for details](https://stackoverflow.com/questions/4247068/sed-command-with-i-option-failing-on-mac-but-works-on-linux)
+3. `sudo ln -s /opt/homebrew/bin/gsed /opt/homebrew/bin/sed` create a soft link for `sed` from `gsed`
+4. Ensure that `/opt/homebrew/bin` is earlier in your path than `/bin` and `/usr/bin`
-> **_NOTE:_** This [cleardown script script](bin/cleardown.sh) clears everything down by deleting the `management` Kind cluster. USE WITH CARE.
+> **_NOTE:_** The changes above **permanently** change the `bash` version and `sed` for **all** applications and may cause side effects. You may wish to revert the changes (particularly the `sed-->gsed` soft link) when you complete your work on Porch.
-Switch to use the kind-management context if necessary:
-```
-kubectl config use-context kind-management
-```
-
-
-You can reach the Gitea web interface on the address reported by the following command:
-```
-kubectl get svc -n gitea gitea
-```
-Sample output:
-```
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-gitea LoadBalancer 10.197.10.118 172.18.255.200 22:31260/TCP,3000:31012/TCP 8m35s
-```
-
-
-
-# Install the Porch function runner
-
-The Porch server requires that the Porch function runner is executing. To install the Porch function runner on the Kind management cluster, execute the following commands:
-
-```
-kubectl apply -f https://raw.githubusercontent.com/nephio-project/catalog/main/nephio/core/porch/1-namespace.yaml
-kubectl apply -f https://raw.githubusercontent.com/nephio-project/catalog/main/nephio/core/porch/2-function-runner.yaml
-
-kubectl wait --namespace porch-system \
- --for=condition=ready pod \
- --selector=app=function-runner \
- --timeout=300s
-```
-
-The Porch function runner should now be executing:
-
-```
-kubectl get pod -n porch-system --selector=app=function-runner
-```
-Sample output:
-```
-NAME READY STATUS RESTARTS AGE
-function-runner-67d4c7c7b-7wm97 1/1 Running 0 16m
-function-runner-67d4c7c7b-czvvq 1/1 Running 0 16m
-```
-
-Expose the `function-runner` service so that the Porch server running in Visual Studio Code can reach it. Patch the service type from `ClusterIP` to `LoadBalancer`:
-
-```
-kubectl patch svc -n porch-system function-runner -p '{"spec": {"type": "LoadBalancer"}}'
-```
-Check that the `function-runner` service has been assigned an external IP address:
-```
-kubectl get svc -n porch-system function-runner
-```
-Sample output:
-```
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-function-runner LoadBalancer 10.197.168.148 172.18.255.201 9445:31794/TCP 22m
-```
-
+## Setup the environment for everything automatically
-# Install the Porch CRDs
+This [setup script](bin/setup.sh) automatically bulids a porch development environment.
+Please note that this is only one of many possible ways of building a working porch development environment so feel free to customize it to suit your own environment.
+The setup script will perform the following steps:
+1. Install a kind cluster.
+ The name of the cluster is read from the PORCH_TEST_CLUSTER environment variable, otherwise it defaults to `porch-test`.
+ The configuration of the cluster is taken from [here](bin/kind_porch_test_cluster.yaml).
-The Custom Resource Definitions can be applied to the cluster from the upstream porch kpt pkg as follows:
+1. Install the MetalLB load balancer into the cluster, in order to allow `LoadBalancer` typed Services to work properly.
-```
-kubectl apply -f https://raw.githubusercontent.com/nephio-project/catalog/main/nephio/core/porch/0-packagerevs.yaml
-kubectl apply -f https://raw.githubusercontent.com/nephio-project/catalog/main/nephio/core/porch/0-packagevariants.yaml
-kubectl apply -f https://raw.githubusercontent.com/nephio-project/catalog/main/nephio/core/porch/0-packagevariantsets.yaml
-kubectl apply -f https://raw.githubusercontent.com/nephio-project/catalog/main/nephio/core/porch/0-repositories.yaml
-```
+1. Install the Gitea git server into the cluster.
+ This can be used to test porch during development, but it is not used in automated end-to-end tests.
+ Gitea is exposed to the host via port 3000. The GUI is accessible via http://localhost:3000/nephio, or http://172.18.255.200:3000/nephio (username: nephio, password: secret).
-Check the entries:
-```
-kubectl get crd | grep porch
-```
-Sample output:
-```
-packagerevs.config.porch.kpt.dev 2024-03-11T15:07:12Z
-packagevariants.config.porch.kpt.dev 2024-03-11T15:07:12Z
-packagevariantsets.config.porch.kpt.dev 2024-03-11T15:07:13Z
-repositories.config.porch.kpt.dev 2024-03-11T15:07:14Z
-```
-These new `resources` are now also visible wihtin the kubernetes api-resources:
-```
-kubectl api-resources | grep -i porch
-```
-Sample output:
-```
-packagerevs config.porch.kpt.dev/v1alpha1 true PackageRev
-packagevariants config.porch.kpt.dev/v1alpha1 true PackageVariant
-packagevariantsets config.porch.kpt.dev/v1alpha2 true PackageVariantSet
-repositories config.porch.kpt.dev/v1alpha1 true Repository
+ > **_NOTE:_** If you are using WSL2 (Windows Subsystem for Linux), then Gitea is also accessible from the Windows host via the http://localhost:3000/nephio URL.
-```
+1. Generate the PKI resources (key pairs and certificates) required for end-to-end tests.
-
+1. Install porch CRDs into the cluster.
-# Deploy the porch APIService resources
+1. Build the porch containers and load them into the nodes of the kind cluster.
-The Porch api server requires that the following resources are defined in the K8S cluster where it is executed:
+1. Deploy all porch components in the kind cluster, except the porch-server (porch's aggregated API server).
+ The function-runner service will be exposed to the host via 172.18.255.201:9445.
-- A `porch-system` namespace
-- An APIService called `apiservice.apiregistration.k8s.io/v1alpha1.porch.kpt.dev`
-- A `service.api` service to route the API Service requests.
+1. Build the porch CLI binary.
+ The result will be generated as `.build/porchctl`.
-Slight differences in docker networking require a secific setup depending on the host OS.
+That's it! If you want to run the steps manually, please use the code of the script as a detailed description.
-## Mac OS example
+The setup script is idempotent in the sense that you can rerun it without cleaning up first. This also means that if the script is interrupted for any reason, and you run it again it should continue the process where it left off.
-Docker networking on Mac allows traffic to be routed via a default DNS name `host.docker.internal`, which is not available on Linux.
-A sample configuration is available at `deployments/local/localconfig.yaml`
+## Configure VSCode to run the Porch (api)server
-Apply the KRM:
-```
-kubectl apply -f https://raw.githubusercontent.com/nephio-project/porch/main/deployments/local/localconfig.yaml
-```
-Verify that the resources have been created
-```
-kubectl api-resources | grep -i porch
-functions config.porch.kpt.dev/v1alpha1 true Function
-packagerevs config.porch.kpt.dev/v1alpha1 true PackageRev
-packagevariants config.porch.kpt.dev/v1alpha1 true PackageVariant
-packagevariantsets config.porch.kpt.dev/v1alpha2 true PackageVariantSet
-repositories config.porch.kpt.dev/v1alpha1 true Repository
-```
-## Linux OS example
+Once the environment is set up you can start the porch API server locally on your machine. There are multiple ways to do that, the simplest way is to run it in a VSCode IDE:
-Linux docker networking between `kind` clusters and the host processes require the traffic to be routed through the default docker bridge. See [here](https://github.com/kubernetes-sigs/kind/issues/1200#issuecomment-1532192361) for more details.
+1. Edit your local `.vscode/launch.json` file as follows: Change the `--kubeconfig` argument of the `Launch Server` configuration to point to a KUBECONFIG file that is set to the kind cluster as the current context.
-Apply the following resources:
-```
-kubectl apply -f - <
-
-# Configure VSCode to run the Porch (api)server
-
-From the root of your checked out Porch repo.
-
-Edit your local `.vscode.launch.json` file as follows:
-1. Change the `--kubeconfig` value to point at your management cluster configuration file.
-2. Change the `--function-runner` IP address to the external IP of the function runner service running in the `management` cluster.
-3. You can alternatively specify `KUBECONFIG` in an `env` section of the configuration instead of using the `--kubeconfig` flag.
-
-```
- {
- "name": "Launch Server",
- "type": "go",
- "request": "launch",
- "mode": "auto",
- "program": "${workspaceFolder}/cmd/porch/main.go",
- "args": [
- "--secure-port=9443",
- "--v=7",
- "--standalone-debug-mode",
- "--kubeconfig=${userHome}/.kube/kind-management-config",
- "--cache-directory=${workspaceFolder}/.cache",
- "--function-runner=172.18.255.201:9445"
- ],
- "cwd": "${workspaceFolder}"
- },
-```
-
-You can now launch the Porch server locally in VSCode by selecting the "Launch Server" configuration on the VSCode "Run and Debug" window. for
+1. You can now launch the Porch server locally in VSCode by selecting the "Launch Server" configuration on the VSCode "Run and Debug" window. For
more information please refer to the [VSCode debugging documentation](https://code.visualstudio.com/docs/editor/debugging).
-
-Sample output
-
-```
-kubectl patch svc -n porch-system function-runner -p '{"spec": {"type": "LoadBalancer"}}'Starting: /home/ubuntu/go/bin/dlv dap --listen=127.0.0.1:40695 --log-dest=3 from /home/ubuntu/porch/cmd/porch
-DAP server listening at: 127.0.0.1:40695
-Type 'dlv help' for list of commands.
-I0311 15:53:26.802270 2101307 dynamic_serving_content.go:113] "Loaded a new cert/key pair" name="serving-cert::apiserver.local.config/certificates/apiserver.crt::apiserver.local.config/certificates/apiserver.key"
-W0311 15:53:26.963694 2101307 authentication.go:339] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work.
-W0311 15:53:26.963716 2101307 authentication.go:363] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work.
-W0311 15:53:26.963878 2101307 recommended.go:152] Neither kubeconfig is provided nor service-account is mounted, so APIPriorityAndFairness will be disabled
-I0311 15:53:26.963942 2101307 maxinflight.go:140] "Initialized nonMutatingChan" len=400
-I0311 15:53:26.963953 2101307 maxinflight.go:146] "Initialized mutatingChan" len=200
-I0311 15:53:26.963979 2101307 timing_ratio_histogram.go:202] "TimingRatioHistogramVec.NewForLabelValuesSafe hit the inefficient case" fqName="apiserver_flowcontrol_read_vs_write_current_requests" labelValues=[executing readOnly]
-I0311 15:53:26.963990 2101307 timing_ratio_histogram.go:202] "TimingRatioHistogramVec.NewForLabelValuesSafe hit the inefficient case" fqName="apiserver_flowcontrol_read_vs_write_current_requests" labelValues=[executing mutating]
-I0311 15:53:26.964000 2101307 maxinflight.go:117] "Set denominator for readonly requests" limit=400
-I0311 15:53:26.964006 2101307 maxinflight.go:121] "Set denominator for mutating requests" limit=200
-I0311 15:53:26.964030 2101307 config.go:762] Not requested to run hook priority-and-fairness-config-consumer
-I0311 15:53:26.966686 2101307 loader.go:373] Config loaded from file: /home/ubuntu/.kube/kind-management-config
-I0311 15:53:26.967652 2101307 round_trippers.go:463] GET https://127.0.0.1:31000/api?timeout=32s
-I0311 15:53:26.967666 2101307 round_trippers.go:469] Request Headers:
-I0311 15:53:26.967672 2101307 round_trippers.go:473] Accept: application/json;g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList,application/json
-I0311 15:53:26.967677 2101307 round_trippers.go:473] User-Agent: __debug_bin3534874763/v0.0.0 (linux/amd64) kubernetes/$Format
-I0311 15:53:26.975046 2101307 round_trippers.go:574] Response Status: 200 OK in 7 milliseconds
-I0311 15:53:26.975945 2101307 round_trippers.go:463] GET https://127.0.0.1:31000/apis?timeout=32s
-I0311 15:53:26.975958 2101307 round_trippers.go:469] Request Headers:
-I0311 15:53:26.975964 2101307 round_trippers.go:473] Accept: application/json;g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList,application/json
-I0311 15:53:26.975968 2101307 round_trippers.go:473] User-Agent: __debug_bin3534874763/v0.0.0 (linux/amd64) kubernetes/$Format
-I0311 15:53:26.976802 2101307 round_trippers.go:574] Response Status: 200 OK in 0 milliseconds
-I0311 15:53:26.979575 2101307 loader.go:373] Config loaded from file: /home/ubuntu/.kube/kind-management-config
-I0311 15:53:26.979853 2101307 grpcruntime.go:41] Dialing grpc function runner "172.18.255.201:9445"
-I0311 15:53:26.979897 2101307 clientconn.go:318] "[core] [Channel #1] Channel created\n"
-I0311 15:53:26.979924 2101307 logging.go:43] "[core] [Channel #1] original dial target is: \"172.18.255.201:9445\"\n"
-I0311 15:53:26.979941 2101307 logging.go:43] "[core] [Channel #1] dial target \"172.18.255.201:9445\" parse failed: parse \"172.18.255.201:9445\": first path segment in URL cannot contain colon\n"
-I0311 15:53:26.979951 2101307 logging.go:43] "[core] [Channel #1] fallback to scheme \"passthrough\"\n"
-I0311 15:53:26.979974 2101307 logging.go:43] "[core] [Channel #1] parsed dial target is: {URL:{Scheme:passthrough Opaque: User: Host: Path:/172.18.255.201:9445 RawPath: OmitHost:false ForceQuery:false RawQuery: Fragment: RawFragment:}}\n"
-I0311 15:53:26.979990 2101307 logging.go:43] "[core] [Channel #1] Channel authority set to \"172.18.255.201:9445\"\n"
-I0311 15:53:26.980145 2101307 logging.go:43] "[core] [Channel #1] Resolver state updated: {\n \"Addresses\": [\n {\n \"Addr\": \"172.18.255.201:9445\",\n \"ServerName\": \"\",\n \"Attributes\": null,\n \"BalancerAttributes\": null,\n \"Metadata\": null\n }\n ],\n \"Endpoints\": [\n {\n \"Addresses\": [\n {\n \"Addr\": \"172.18.255.201:9445\",\n \"ServerName\": \"\",\n \"Attributes\": null,\n \"BalancerAttributes\": null,\n \"Metadata\": null\n }\n ],\n \"Attributes\": null\n }\n ],\n \"ServiceConfig\": null,\n \"Attributes\": null\n} (resolver returned new addresses)\n"
-I0311 15:53:26.980192 2101307 logging.go:43] "[core] [Channel #1] Channel switches to new LB policy \"pick_first\"\n"
-I0311 15:53:26.980251 2101307 pickfirst.go:141] "[core] [pick-first-lb 0xc001813980] Received new config {\n \"shuffleAddressList\": false\n}, resolver state {\n \"Addresses\": [\n {\n \"Addr\": \"172.18.255.201:9445\",\n \"ServerName\": \"\",\n \"Attributes\": null,\n \"BalancerAttributes\": null,\n \"Metadata\": null\n }\n ],\n \"Endpoints\": [\n {\n \"Addresses\": [\n {\n \"Addr\": \"172.18.255.201:9445\",\n \"ServerName\": \"\",\n \"Attributes\": null,\n \"BalancerAttributes\": null,\n \"Metadata\": null\n }\n ],\n \"Attributes\": null\n }\n ],\n \"ServiceConfig\": null,\n \"Attributes\": null\n}\n"
-I0311 15:53:26.980284 2101307 clientconn.go:962] "[core] [Channel #1 SubChannel #2] Subchannel created\n"
-I0311 15:53:26.980300 2101307 logging.go:43] "[core] [Channel #1] Channel Connectivity change to CONNECTING\n"
-I0311 15:53:26.980365 2101307 logging.go:43] "[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to CONNECTING\n"
-I0311 15:53:26.980395 2101307 logging.go:43] "[core] [Channel #1 SubChannel #2] Subchannel picks a new address \"172.18.255.201:9445\" to connect\n"
-I0311 15:53:26.980492 2101307 pickfirst.go:184] "[core] [pick-first-lb 0xc001813980] Received SubConn state update: 0xc001813bc0, {ConnectivityState:CONNECTING ConnectionError:}\n"
-I0311 15:53:26.980803 2101307 logging.go:43] "[core] [Channel #1 SubChannel #2] Subchannel Connectivity change to READY\n"
-I0311 15:53:26.980831 2101307 pickfirst.go:184] "[core] [pick-first-lb 0xc001813980] Received SubConn state update: 0xc001813bc0, {ConnectivityState:READY ConnectionError:}\n"
-I0311 15:53:26.980845 2101307 logging.go:43] "[core] [Channel #1] Channel Connectivity change to READY\n"
-I0311 15:53:26.994444 2101307 apiserver.go:297] Cert storage dir not provided, skipping webhook setup
-I0311 15:53:26.994524 2101307 background.go:52] Background routine starting ...
-I0311 15:53:26.996985 2101307 healthz.go:176] Installing health checkers for (/healthz): "ping","log","poststarthook/max-in-flight-filter","poststarthook/storage-object-count-tracker-hook"
-I0311 15:53:26.997336 2101307 healthz.go:176] Installing health checkers for (/livez): "ping","log","poststarthook/max-in-flight-filter","poststarthook/storage-object-count-tracker-hook"
-I0311 15:53:26.997732 2101307 healthz.go:176] Installing health checkers for (/readyz): "ping","log","poststarthook/max-in-flight-filter","poststarthook/storage-object-count-tracker-hook","shutdown"
-I0311 15:53:26.998191 2101307 genericapiserver.go:484] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete
-I0311 15:53:26.998572 2101307 dynamic_serving_content.go:132] "Starting controller" name="serving-cert::apiserver.local.config/certificates/apiserver.crt::apiserver.local.config/certificates/apiserver.key"
-I0311 15:53:26.998666 2101307 tlsconfig.go:200] "Loaded serving cert" certName="serving-cert::apiserver.local.config/certificates/apiserver.crt::apiserver.local.config/certificates/apiserver.key" certDetail="\"localhost@1709900706\" [serving] validServingFor=[127.0.0.1,localhost,localhost] issuer=\"localhost-ca@1709900706\" (2024-03-08 11:25:05 +0000 UTC to 2025-03-08 11:25:05 +0000 UTC (now=2024-03-11 15:53:26.998640146 +0000 UTC))"
-I0311 15:53:26.998865 2101307 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopback" certDetail="\"apiserver-loopback-client@1710172406\" [serving] validServingFor=[apiserver-loopback-client] issuer=\"apiserver-loopback-client-ca@1710172406\" (2024-03-11 14:53:26 +0000 UTC to 2025-03-11 14:53:26 +0000 UTC (now=2024-03-11 15:53:26.998844612 +0000 UTC))"
-I0311 15:53:26.998890 2101307 secure_serving.go:210] Serving securely on [::]:9443
-I0311 15:53:26.998906 2101307 genericapiserver.go:589] [graceful-termination] waiting for shutdown to be initiated
-I0311 15:53:26.998920 2101307 tlsconfig.go:240] "Starting DynamicServingCertificateController"
-I0311 15:53:27.333650 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1/packagerevisions" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:27.333969 2101307 round_trippers.go:463] GET https://127.0.0.1:31000/apis/config.porch.kpt.dev/v1alpha1/repositories
-I0311 15:53:27.333982 2101307 round_trippers.go:469] Request Headers:
-I0311 15:53:27.333990 2101307 round_trippers.go:473] Accept: application/json, */*
-I0311 15:53:27.333995 2101307 round_trippers.go:473] User-Agent: __debug_bin3534874763/v0.0.0 (linux/amd64) kubernetes/$Format
-I0311 15:53:27.335504 2101307 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
-I0311 15:53:27.336224 2101307 httplog.go:132] "HTTP" verb="LIST" URI="/apis/porch.kpt.dev/v1alpha1/packagerevisions?limit=500&resourceVersion=0" latency="2.73421ms" userAgent="kube-controller-manager/v1.29.2 (linux/amd64) kubernetes/4b8e819/metadata-informers" audit-ID="2bcc1626-35fb-495b-a4cf-fd99a83c7689" srcIP="172.18.0.2:24183" resp=200
-I0311 15:53:27.337502 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1/packagerevisions" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:27.337598 2101307 get.go:257] "Starting watch" path="/apis/porch.kpt.dev/v1alpha1/packagerevisions" resourceVersion="" labels="" fields="" timeout="9m23s"
-I0311 15:53:27.337712 2101307 watchermanager.go:93] added watcher 0xc001a99080; there are now 1 active watchers and 1 slots
-I0311 15:53:27.337793 2101307 round_trippers.go:463] GET https://127.0.0.1:31000/apis/config.porch.kpt.dev/v1alpha1/repositories
-I0311 15:53:27.337803 2101307 round_trippers.go:469] Request Headers:
-I0311 15:53:27.337809 2101307 round_trippers.go:473] Accept: application/json, */*
-I0311 15:53:27.337813 2101307 round_trippers.go:473] User-Agent: __debug_bin3534874763/v0.0.0 (linux/amd64) kubernetes/$Format
-I0311 15:53:27.338998 2101307 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
-I0311 15:53:27.339083 2101307 watch.go:201] watch 0xc0025eb8f0: moving watch into streaming mode after sentAdd 0, sentBacklog 0, sentNewBacklog 0
-I0311 15:53:27.995010 2101307 background.go:76] Starting watch ...
-I0311 15:53:27.995248 2101307 round_trippers.go:463] GET https://127.0.0.1:31000/apis/config.porch.kpt.dev/v1alpha1/repositories?allowWatchBookmarks=true&watch=true
-I0311 15:53:27.995260 2101307 round_trippers.go:469] Request Headers:
-I0311 15:53:27.995268 2101307 round_trippers.go:473] Accept: application/json, */*
-I0311 15:53:27.995273 2101307 round_trippers.go:473] User-Agent: __debug_bin3534874763/v0.0.0 (linux/amd64) kubernetes/$Format
-I0311 15:53:27.996235 2101307 round_trippers.go:574] Response Status: 200 OK in 0 milliseconds
-I0311 15:53:27.996299 2101307 background.go:88] Watch successfully started.
-I0311 15:53:29.226891 2101307 handler.go:133] porch-apiserver: GET "/apis" satisfied by gorestful with webservice /apis
-I0311 15:53:29.227219 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis" latency="433.918µs" userAgent="" audit-ID="8a1a3928-6cb3-4cc1-87b2-1650858cbe05" srcIP="172.18.0.2:24183" resp=406
-I0311 15:53:29.227817 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:29.228088 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis/porch.kpt.dev/v1alpha1" latency="394.332µs" userAgent="" audit-ID="7d0e929b-2f88-40de-8332-6070b1511f1e" srcIP="172.18.0.2:24183" resp=200
-I0311 15:53:29.318292 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:29.318395 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:29.318402 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:29.318466 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis/porch.kpt.dev/v1alpha1" latency="265.744µs" userAgent="Go-http-client/2.0" audit-ID="75639942-377e-4154-8415-68a76ef6f3d4" srcIP="172.18.0.2:6653" resp=200
-I0311 15:53:29.318478 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:29.318527 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis/porch.kpt.dev/v1alpha1" latency="197.304µs" userAgent="Go-http-client/2.0" audit-ID="ae8435b4-c8db-4660-86c5-19cd8305459e" srcIP="172.18.0.2:6653" resp=200
-I0311 15:53:29.318562 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis/porch.kpt.dev/v1alpha1" latency="267.733µs" userAgent="Go-http-client/2.0" audit-ID="eda93660-81fa-4528-b30c-17769417f8b0" srcIP="172.18.0.2:6653" resp=200
-I0311 15:53:29.318589 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis/porch.kpt.dev/v1alpha1" latency="163.598µs" userAgent="Go-http-client/2.0" audit-ID="df104da4-f2a8-4ac9-a18f-016efc24cd5d" srcIP="172.18.0.2:6653" resp=200
-I0311 15:53:29.318593 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:29.318746 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis/porch.kpt.dev/v1alpha1" latency="220.87µs" userAgent="Go-http-client/2.0" audit-ID="1b821621-376c-494c-8490-4646d04a88e0" srcIP="172.18.0.2:6653" resp=200
-I0311 15:53:29.320659 2101307 secure_serving.go:296] http: TLS handshake error from 172.18.0.2:44304: EOF
-I0311 15:53:29.320719 2101307 secure_serving.go:296] http: TLS handshake error from 172.18.0.2:11578: EOF
-I0311 15:53:59.314770 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:59.314797 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:59.314774 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:59.314774 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:59.314996 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis/porch.kpt.dev/v1alpha1" latency="400.618µs" userAgent="Go-http-client/2.0" audit-ID="82ea0403-a6b2-4222-abb4-de91f0d26a0c" srcIP="172.18.0.2:6653" resp=200
-I0311 15:53:59.315027 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis/porch.kpt.dev/v1alpha1" latency="394.241µs" userAgent="Go-http-client/2.0" audit-ID="3e956684-e5aa-48ab-a2c3-fb896ef80017" srcIP="172.18.0.2:6653" resp=200
-I0311 15:53:59.314996 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis/porch.kpt.dev/v1alpha1" latency="368.308µs" userAgent="Go-http-client/2.0" audit-ID="6f327a77-a5bb-4eec-b632-8ef9300d7472" srcIP="172.18.0.2:6653" resp=200
-I0311 15:53:59.314777 2101307 handler.go:143] porch-apiserver: GET "/apis/porch.kpt.dev/v1alpha1" satisfied by gorestful with webservice /apis/porch.kpt.dev/v1alpha1
-I0311 15:53:59.315181 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis/porch.kpt.dev/v1alpha1" latency="587.412µs" userAgent="Go-http-client/2.0" audit-ID="ab5efec5-0f49-41fb-a058-b64d96933437" srcIP="172.18.0.2:6653" resp=200
-I0311 15:53:59.315200 2101307 httplog.go:132] "HTTP" verb="GET" URI="/apis/porch.kpt.dev/v1alpha1" latency="594.044µs" userAgent="Go-http-client/2.0" audit-ID="eecef25f-6825-4be7-94c9-86cc7e1966d4" srcIP="172.18.0.2:6653" resp=200
-I0311 15:54:00.364529 2101307 handler.go:153] porch-apiserver: GET "/openapi/v2" satisfied by nonGoRestful
-I0311 15:54:00.364560 2101307 pathrecorder.go:241] porch-apiserver: "/openapi/v2" satisfied by exact match
-I0311 15:54:00.380302 2101307 handler.go:153] porch-apiserver: GET "/openapi/v2" satisfied by nonGoRestful
-I0311 15:54:00.380326 2101307 pathrecorder.go:241] porch-apiserver: "/openapi/v2" satisfied by exact match
-I0311 15:54:00.380694 2101307 httplog.go:132] "HTTP" verb="GET" URI="/openapi/v2" latency="16.285147ms" userAgent="" audit-ID="35d93c8e-31a0-4b5e-8f5b-6bc76760b905" srcIP="172.18.0.2:24183" resp=304
-I0311 15:54:00.380740 2101307 httplog.go:132] "HTTP" verb="GET" URI="/openapi/v2" latency="540.523µs" userAgent="" audit-ID="b313e554-539d-4e3d-b4e0-223c2baf5a48" srcIP="172.18.0.2:24183" resp=304
-I0311 15:54:26.995316 2101307 background.go:115] Background task 2024-03-11 15:54:26.995287314 +0000 UTC m=+60.349999574
-I0311 15:54:26.995356 2101307 background.go:188] background-refreshing repositories
-I0311 15:54:26.995487 2101307 round_trippers.go:463] GET https://127.0.0.1:31000/apis/config.porch.kpt.dev/v1alpha1/repositories
-I0311 15:54:26.995496 2101307 round_trippers.go:469] Request Headers:
-I0311 15:54:26.995504 2101307 round_trippers.go:473] Accept: application/json, */*
-I0311 15:54:26.995509 2101307 round_trippers.go:473] User-Agent: __debug_bin3534874763/v0.0.0 (linux/amd64) kubernetes/$Format
-I0311 15:54:26.997313 2101307 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
-```
-
-
-
-Check that the apiservice is now Ready:
+1. Check that the apiservice is ready:
```
kubectl get apiservice v1alpha1.porch.kpt.dev
```
@@ -375,28 +82,25 @@ Sample output:
NAME SERVICE AVAILABLE AGE
v1alpha1.porch.kpt.dev porch-system/api True 18m
```
-
-
Check the porch api-resources:
-
-We should now also have the `porch.kpt.dev/v1alpha1` resources available
```
kubectl api-resources | grep porch
```
Sample output:
```
-...
-
-functions porch.kpt.dev/v1alpha1 true Function
-packagerevisionresources porch.kpt.dev/v1alpha1 true PackageRevisionResources
-packagerevisions porch.kpt.dev/v1alpha1 true PackageRevision
-packages porch.kpt.dev/v1alpha1 true Package
-
+packagerevs config.porch.kpt.dev/v1alpha1 true PackageRev
+packagevariants config.porch.kpt.dev/v1alpha1 true PackageVariant
+packagevariantsets config.porch.kpt.dev/v1alpha2 true PackageVariantSet
+repositories config.porch.kpt.dev/v1alpha1 true Repository
+functions porch.kpt.dev/v1alpha1 true Function
+packagerevisionresources porch.kpt.dev/v1alpha1 true PackageRevisionResources
+packagerevisions porch.kpt.dev/v1alpha1 true PackageRevision
+packages porch.kpt.dev/v1alpha1 true PorchPackage
```
Check to ensure that the apiserver is serving requests:
```
-curl https://localhost:9443/apis/porch.kpt.dev/v1alpha1 -k
+curl https://localhost:4443/apis/porch.kpt.dev/v1alpha1 -k
```
@@ -476,7 +180,32 @@ curl https://localhost:9443/apis/porch.kpt.dev/v1alpha1 -k
-
+## Add the CLI binary to your PATH
+
+Copy the `.build/porchctl` binary (that was built by the setup script) to somewhere in your $PATH.
+
+
+## Test that everything works as expected
+
+Make sure that the porch server is still running in VS Code and than run the following tests from the project root.
+
+### Run the porch unit tests
+
+```
+make test
+```
+
+### Run the end-to-end tests
+
+To test porch directly via its API:
+```
+E2E=1 go test -v ./test/e2e
+```
+
+To test porch via its CLI:
+```
+E2E=1 go test -v ./test/e2e/cli
+```
# Create Repositories using your local Porch server
@@ -509,4 +238,18 @@ external-blueprints git Package false True https://github.com/n
management git Package false True http://172.18.255.200:3000/nephio/management.git
```
-You now have a locally running Porch (api)server. Happy developing!
\ No newline at end of file
+You now have a locally running Porch (api)server. Happy developing!
+
+
+# Restart from scratch
+
+Sometimes the development cluster gets cluttered and you may experience weird behaviour from porch.
+In this case you might want to restart with a clean slate, by deleting the development cluster with the following command:
+```
+kind delete cluster --name porch-test
+```
+
+and running the [setup script](bin/setup.sh) again:
+```
+docs/tutorials/porch-development-environment/bin/setup.sh
+```
\ No newline at end of file
diff --git a/docs/tutorials/porch-development-environment/bin/cleardown.sh b/docs/tutorials/porch-development-environment/bin/cleardown.sh
deleted file mode 100755
index 14138915..00000000
--- a/docs/tutorials/porch-development-environment/bin/cleardown.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#! /bin/bash
-
-# Copyright 2024 The kpt and Nephio Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind delete cluster --name management
-kind delete cluster --name edge1
-
-rm ~/.kube/kind-management-config
-rm ~/.kube/kind-edge1-config
\ No newline at end of file
diff --git a/docs/tutorials/porch-development-environment/bin/kind_porch_test_cluster.yaml b/docs/tutorials/porch-development-environment/bin/kind_porch_test_cluster.yaml
new file mode 100644
index 00000000..ed96f6ee
--- /dev/null
+++ b/docs/tutorials/porch-development-environment/bin/kind_porch_test_cluster.yaml
@@ -0,0 +1,15 @@
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+name: management
+networking:
+ apiServerAddress: 127.0.0.1
+ apiServerPort: 31000
+ podSubnet: 10.97.0.0/16
+ serviceSubnet: 10.197.0.0/16
+nodes:
+- role: control-plane
+ extraPortMappings:
+ - containerPort: 30000 # Gitea NodePort
+ hostPort: 3000
+ - containerPort: 30001 # function-runner NodePort
+ hostPort: 30001
diff --git a/docs/tutorials/porch-development-environment/bin/setup.sh b/docs/tutorials/porch-development-environment/bin/setup.sh
index 69afb5ac..7a4bd9b0 100755
--- a/docs/tutorials/porch-development-environment/bin/setup.sh
+++ b/docs/tutorials/porch-development-environment/bin/setup.sh
@@ -14,10 +14,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-porch_cluster_name=porch-test
+porch_cluster_name=${PORCH_TEST_CLUSTER:-porch-test}
git_repo_name="$porch_cluster_name"
-gitea_ip=172.18.255.200 # should be from the address range specified here: https://github.com/nephio-project/porch/blob/main/docs/tutorials/starting-with-porch/metallb-conf.yaml
+gitea_ip=172.18.255.200 # should be from the address range specified here: docs/tutorials/starting-with-porch/metallb-conf.yaml
+function_runner_ip=172.18.255.201
+
self_dir="$(dirname "$(readlink -f "$0")")"
+git_root="$(readlink -f "${self_dir}/../../../..")"
+cd "${git_root}"
function h1() {
echo
@@ -40,8 +44,9 @@ fi
##############################################
h1 "Install kind cluster: $porch_cluster_name"
if ! kind get clusters | grep -q "^$porch_cluster_name\$" ; then
- curl -s https://raw.githubusercontent.com/nephio-project/porch/main/docs/tutorials/starting-with-porch/kind_management_cluster.yaml | \
- kind create cluster --config=- --name "$porch_cluster_name" || true
+ kind create cluster \
+ --config="${git_root}/docs/tutorials/porch-development-environment/bin/kind_porch_test_cluster.yaml" \
+ --name "$porch_cluster_name" || true
mkdir -p ~/.kube
kind get kubeconfig --name="$porch_cluster_name" > ~/.kube/"kind-$porch_cluster_name"
@@ -58,49 +63,54 @@ echo "Waiting for controller to become ready..."
kubectl wait --namespace metallb-system deploy controller \
--for=condition=available \
--timeout=90s
-kubectl apply -f https://raw.githubusercontent.com/nephio-project/porch/main/docs/tutorials/starting-with-porch/metallb-conf.yaml
-
-############################################
-h1 Prepare tmp dir
-TMP_DIR=$(mktemp -d)
-echo "$TMP_DIR"
+kubectl apply -f "${git_root}/docs/tutorials/starting-with-porch/metallb-conf.yaml"
############################################
h1 Install Gitea
-mkdir "$TMP_DIR/kpt_packages"
-cd "$TMP_DIR/kpt_packages"
-kpt pkg get https://github.com/nephio-project/catalog/tree/main/distros/sandbox/gitea
+mkdir -p "${git_root}/.build"
+cd "${git_root}/.build"
+if [ -d gitea ]; then
+ kpt pkg update gitea
+else
+ kpt pkg get https://github.com/nephio-project/catalog/tree/main/distros/sandbox/gitea
+fi
+
kpt fn eval gitea \
--image gcr.io/kpt-fn/set-annotations:v0.1.4 \
--match-kind Service \
--match-name gitea \
--match-namespace gitea \
-- "metallb.universe.tf/loadBalancerIPs=${gitea_ip}"
-curl -o gitea/cluster-config.yaml https://raw.githubusercontent.com/nephio-project/porch/main/docs/tutorials/starting-with-porch/kind_management_cluster.yaml
-echo "metadata: { name: "porch-test" }" >> gitea/cluster-config.yaml
-kpt fn eval gitea \
- --image gcr.io/kpt-fn/set-annotations:v0.1.4 \
- --match-kind Cluster \
- --match-api-version kind.x-k8s.io/v1alpha4 \
- -- "config.kubernetes.io/local-config=true"
+
+cp -f "${git_root}/docs/tutorials/porch-development-environment/bin/kind_porch_test_cluster.yaml" gitea/cluster-config.yaml
+# turn kind's cluster-config into a valid KRM
+cat >> gitea/cluster-config.yaml </dev/null; then
+ echo "Add main branch to git repo:"
git switch -c main
touch README.md
git add README.md
@@ -110,11 +120,101 @@ if ! git rev-parse -q --verify refs/remotes/origin/main >/dev/null; then
else
echo "main branch already exists in git repo."
fi
+cd "${git_root}"
+rm -fr "$TMP_DIR"
+
+############################################
+h1 Generate certs and keys
+cd "${git_root}"
+deployments/local/makekeys.sh
+
+############################################
+h1 Load container images into kind cluster
+cd "${git_root}"
+export IMAGE_TAG=v2.0.0
+export KIND_CONTEXT_NAME="$porch_cluster_name"
+if ! docker exec -it "$porch_cluster_name-control-plane" crictl images | grep -q docker.io/nephio/test-git-server ; then
+ make build-images
+ kind load docker-image docker.io/nephio/porch-controllers:${IMAGE_TAG} -n ${KIND_CONTEXT_NAME}
+ kind load docker-image docker.io/nephio/porch-function-runner:${IMAGE_TAG} -n ${KIND_CONTEXT_NAME}
+ kind load docker-image docker.io/nephio/porch-wrapper-server:${IMAGE_TAG} -n ${KIND_CONTEXT_NAME}
+ kind load docker-image docker.io/nephio/test-git-server:${IMAGE_TAG} -n ${KIND_CONTEXT_NAME}
+else
+ echo "Images already loaded into kind cluster."
+fi
############################################
-h1 "Clean up"
-cd "$self_dir"
-rm -fr "$TMP_DIR"
+h1 Install all porch components, except porch-server
+cd "${git_root}"
+make deployment-config-no-sa
+cd .build/deploy-no-sa
+# expose function-runner to local processes
+kpt fn eval \
+ --image gcr.io/kpt-fn/starlark:v0.5.0 \
+ --match-kind Service \
+ --match-name function-runner \
+ --match-namespace porch-system \
+ -- "ip=${function_runner_ip}" 'source=
+ip = ctx.resource_list["functionConfig"]["data"]["ip"]
+for resource in ctx.resource_list["items"]:
+ resource["metadata"].setdefault("annotations", {})["metallb.universe.tf/loadBalancerIPs"] = ip
+ resource["spec"]["type"] = "LoadBalancer"
+ resource["spec"]["ports"][0]["nodePort"] = 30001'
+# "remove" porch-server from package
+kpt fn eval \
+ --image gcr.io/kpt-fn/starlark:v0.5.0 \
+ --match-kind Deployment \
+ --match-name porch-server \
+ --match-namespace porch-system \
+ -- 'source=ctx.resource_list["items"] = []'
+# make the api service point to the local porch-server
+if [ "$(uname)" = "Darwin" ]
+then
+ # MAC
+ kpt fn eval \
+ --image gcr.io/kpt-fn/starlark:v0.5.0 \
+ --match-kind Service \
+ --match-name api \
+ --match-namespace porch-system \
+ -- 'source=
+for resource in ctx.resource_list["items"]:
+ resource["spec"] = {
+ "type": "ExternalName",
+ "externalName": "host.docker.internal"
+ }
+'
+else
+ # Linux
+ docker_bridge_ip="$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}')"
+ kpt fn eval \
+ --image upsert-resource:v0.2.0 \
+ --fn-config "${git_root}/deployments/local/porch-api-endpoints.yaml"
+ kpt fn eval \
+ --image gcr.io/kpt-fn/search-replace:v0.2.0 \
+ --match-kind Endpoints \
+ --match-name api \
+ --match-namespace porch-system \
+ -- 'by-path=subsets[0].addresses[0].ip' "put-value=$docker_bridge_ip"
+ kpt fn eval \
+ --image gcr.io/kpt-fn/starlark:v0.5.0 \
+ --match-kind Service \
+ --match-name api \
+ --match-namespace porch-system \
+ -- 'source=
+for resource in ctx.resource_list["items"]:
+ resource["spec"].pop("selector")'
+fi
+kpt fn render
+kpt live init || true
+kpt live apply --inventory-policy=adopt
+
+############################################
+h1 "Build the porch CLI (.build/porchctl)"
+cd "${git_root}"
+make porchctl
+
+
+############################################
echo
echo Done.
diff --git a/porch.code-workspace b/porch.code-workspace
index f90fdd25..d50229ad 100644
--- a/porch.code-workspace
+++ b/porch.code-workspace
@@ -11,17 +11,9 @@
"name": "porch",
"path": "."
},
- {
- "name": "kpt",
- "path": ".."
- },
{
"name": "controllers",
"path": "controllers"
- },
- {
- "name": "rollouts",
- "path": "../rollouts"
}
]
}