Skip to content

Commit

Permalink
change KubeArmor default namespace to "kubearmor"
Browse files Browse the repository at this point in the history
Signed-off-by: Ankur Kothiwal <[email protected]>
  • Loading branch information
Ankurk99 committed Sep 22, 2023
1 parent e256849 commit 0003e65
Show file tree
Hide file tree
Showing 24 changed files with 77 additions and 77 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/ci-latest-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,9 @@ jobs:
--values ./KubeArmor/build/kubearmor-helm-test-values.yaml \
--set kubearmor.image.tag=${{ steps.vars.outputs.tag }} \
--set kubearmorInit.image.tag=${{ steps.vars.outputs.tag }} \
-n kube-system;
-n kubearmor --create-namespace;
kubectl wait --for=condition=ready --timeout=5m -n kube-system pod -l kubearmor-app
kubectl wait --for=condition=ready --timeout=5m -n kubearmor pod -l kubearmor-app
kubectl get pods -A
- name: Test KubeArmor using Ginkgo
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/ci-test-controllers.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ jobs:
--set kubearmorInit.imagePullPolicy=Always \
--set kubearmor.imagePullPolicy=Always \
--set kubearmor.image.tag=latest \
-n kube-system;
kubectl wait --for=condition=ready --timeout=5m -n kube-system pod -l kubearmor-app
-n kubearmor --create-namespace;
kubectl wait --for=condition=ready --timeout=5m -n kubearmor pod -l kubearmor-app
kubectl get pods -A
- name: Test KubeArmor using Ginkgo
Expand All @@ -48,7 +48,7 @@ jobs:
- name: Get karmor sysdump
if: ${{ failure() }}
run: |
kubectl describe pod -n kube-system -l kubearmor-app=kubearmor
kubectl describe pod -n kubearmor -l kubearmor-app=kubearmor
curl -sfL http://get.kubearmor.io/ | sudo sh -s -- -b /usr/local/bin
mkdir -p /tmp/kubearmor/ && cd /tmp/kubearmor && karmor sysdump
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/ci-test-ginkgo.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,12 +75,12 @@ jobs:
sudo podman pull docker-daemon:kubearmor/kubearmor-snitch:latest
fi
fi
helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kube-system
kubectl wait --for=condition=ready --timeout=5m -n kube-system pod -l kubearmor-app=kubearmor-operator
helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kubearmor --create-namespace
kubectl wait --for=condition=ready --timeout=5m -n kubearmor pod -l kubearmor-app=kubearmor-operator
kubectl get pods -A
kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-test.yaml
kubectl wait -n kube-system --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test
kubectl wait --timeout=5m --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch -n kube-system
kubectl wait -n kubearmor --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test
kubectl wait --timeout=5m --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch -n kubearmor
kubectl get pods -A
- name: Test KubeArmor using Ginkgo
Expand All @@ -93,7 +93,7 @@ jobs:
- name: Get karmor sysdump
if: ${{ failure() }}
run: |
kubectl describe pod -n kube-system -l kubearmor-app=kubearmor
kubectl describe pod -n kubearmor -l kubearmor-app=kubearmor
curl -sfL http://get.kubearmor.io/ | sudo sh -s -- -b /usr/local/bin
mkdir -p /tmp/kubearmor/ && cd /tmp/kubearmor && karmor sysdump
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/ci-test-ubi-image.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,12 @@ jobs:
sudo podman pull docker-daemon:kubearmor/kubearmor-ubi:latest
sudo podman pull docker-daemon:kubearmor/kubearmor-operator:latest
sudo podman pull docker-daemon:kubearmor/kubearmor-snitch:latest
helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kube-system
kubectl wait --for=condition=ready --timeout=5m -n kube-system pod -l kubearmor-app=kubearmor-operator
helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kubearmor --create-namespace
kubectl wait --for=condition=ready --timeout=5m -n kubearmor pod -l kubearmor-app=kubearmor-operator
kubectl get pods -A
kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-ubi-test.yaml
kubectl wait -n kube-system --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test
kubectl wait --timeout=5m --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch -n kube-system
kubectl wait -n kubearmor --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test
kubectl wait --timeout=5m --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch -n kubearmor
kubectl get pods -A
- name: Test KubeArmor using Ginkgo
Expand All @@ -81,7 +81,7 @@ jobs:
- name: Get karmor sysdump
if: ${{ failure() }}
run: |
kubectl describe pod -n kube-system -l kubearmor-app=kubearmor
kubectl describe pod -n kubearmor -l kubearmor-app=kubearmor
curl -sfL http://get.kubearmor.io/ | sudo sh -s -- -b /usr/local/bin
mkdir -p /tmp/kubearmor/ && cd /tmp/kubearmor && karmor sysdump
Expand Down
4 changes: 2 additions & 2 deletions KubeArmor/core/kubeUpdate.go
Original file line number Diff line number Diff line change
Expand Up @@ -2326,8 +2326,8 @@ func (dm *KubeArmorDaemon) GetConfigMapNS() string {

if envNamespace == "" {
// kubearmor is running as system process,
// return "kube-system" for testing purpose in dev env
return "kube-system"
// return "kubearmor" for testing purpose in dev env
return "kubearmor"
}
return envNamespace
}
12 changes: 6 additions & 6 deletions deployments/controller/cert-manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4935,7 +4935,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager-cainjector:leaderelection
namespace: kube-system
namespace: kubearmor
labels:
app: cainjector
app.kubernetes.io/name: cainjector
Expand All @@ -4961,7 +4961,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager:leaderelection
namespace: kube-system
namespace: kubearmor
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
Expand Down Expand Up @@ -5007,7 +5007,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-cainjector:leaderelection
namespace: kube-system
namespace: kubearmor
labels:
app: cainjector
app.kubernetes.io/name: cainjector
Expand All @@ -5030,7 +5030,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager:leaderelection
namespace: kube-system
namespace: kubearmor
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
Expand Down Expand Up @@ -5154,7 +5154,7 @@ spec:
imagePullPolicy: IfNotPresent
args:
- --v=2
- --leader-election-namespace=kube-system
- --leader-election-namespace=kubearmor
env:
- name: POD_NAMESPACE
valueFrom:
Expand Down Expand Up @@ -5208,7 +5208,7 @@ spec:
args:
- --v=2
- --cluster-resource-namespace=$(POD_NAMESPACE)
- --leader-election-namespace=kube-system
- --leader-election-namespace=kubearmor
ports:
- containerPort: 9402
name: http-metrics
Expand Down
2 changes: 1 addition & 1 deletion deployments/controller/install-kubearmor-controller.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@ kubectl apply -f deployments/controller/cert-manager.yaml
kubectl wait pods --for=condition=ready -n cert-manager -l app.kubernetes.io/instance=cert-manager
cmctl check api --wait 300s
kubectl apply -f deployments/controller/kubearmor-controller-mutating-webhook-config.yaml
kubectl wait pods --for=condition=ready -n kube-system -l kubearmor-app=kubearmor-controller
kubectl wait pods --for=condition=ready -n kubearmor -l kubearmor-app=kubearmor-controller
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@ apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kubearmor-controller-serving-cert
namespace: kube-system
namespace: kubearmor
spec:
dnsNames:
- kubearmor-controller-webhook-service.kube-system.svc
- kubearmor-controller-webhook-service.kube-system.svc.cluster.local
- kubearmor-controller-webhook-service.kubearmor.svc
- kubearmor-controller-webhook-service.kubearmor.svc.cluster.local
issuerRef:
kind: Issuer
name: kubearmor-controller-selfsigned-issuer
Expand All @@ -16,24 +16,24 @@ apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: kubearmor-controller-selfsigned-issuer
namespace: kube-system
namespace: kubearmor
spec:
selfSigned: {}
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: kube-system/kubearmor-controller-serving-cert
cert-manager.io/inject-ca-from: kubearmor/kubearmor-controller-serving-cert
name: kubearmor-controller-mutating-webhook-configuration
namespace: kube-system
namespace: kubearmor
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: kubearmor-controller-webhook-service
namespace: kube-system
namespace: kubearmor
path: /mutate-pods
failurePolicy: Ignore
name: annotation.kubearmor.com
Expand Down
8 changes: 4 additions & 4 deletions deployments/helm/KubeArmor/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@ Install KubeArmor using Helm chart repo. Also see [values](#Values) for your res
```
helm repo add kubearmor https://kubearmor.github.io/charts
helm repo update kubearmor
helm upgrade --install kubearmor kubearmor/kubearmor -n kube-system
helm upgrade --install kubearmor kubearmor/kubearmor -n kubearmor --create-namespace
```

Install KubeArmor using Helm charts locally (for testing)
```
cd deployments/helm/KubeArmor
helm upgrade --install kubearmor . -n kube-system
helm upgrade --install kubearmor . -n kubearmor --create-namespace
```

## Values
Expand Down Expand Up @@ -92,7 +92,7 @@ Usage of ./kubearmor:

## Verify if all the resources are up and running
```
kubectl get all -n kube-system -l kubearmor-app
kubectl get all -n kubearmor -l kubearmor-app
NAME READY STATUS RESTARTS AGE
pod/kubearmor-controller-7b48cf777f-bn7d8 2/2 Running 0 24s
pod/kubearmor-relay-5656cc5bf7-jl56q 1/1 Running 0 24s
Expand All @@ -116,5 +116,5 @@ replicaset.apps/kubearmor-relay-5656cc5bf7 1 1 1 24
## Remove KubeArmor
Uninstall KubeArmor using helm
```
helm uninstall kubearmor -n kube-system
helm uninstall kubearmor -n kubearmor
```
8 changes: 4 additions & 4 deletions deployments/helm/KubeArmorOperator/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@ Install KubeArmorOperator using the official `kubearmor` Helm chart repo.Also se
```
helm repo add kubearmor https://kubearmor.github.io/charts
helm repo update kubearmor
helm upgrade --install kubearmor-operator kubearmor/kubearmor-operator -n kube-system
helm upgrade --install kubearmor-operator kubearmor/kubearmor-operator -n kubearmor --create-namespace
```

Install KubeArmorOperator using Helm charts locally (for testing)
```
cd deployments/helm/KubeArmorOperator
helm upgrade --install kubearmor-operator . -n kube-system
helm upgrade --install kubearmor-operator . -n kubearmor --create-namespace
```

## Values
Expand Down Expand Up @@ -76,7 +76,7 @@ If a valid configuration is received, the operator will deploy jobs to your node
Once done, the following resources related to KubeArmor will exist in your cluster:
```
$ kubectl get all -n kube-system -l kubearmor-app
$ kubectl get all -n kubearmor -l kubearmor-app
NAME READY STATUS RESTARTS AGE
pod/kubearmor-operator-66fbff5559-qb7dh 1/1 Running 0 11m
pod/kubearmor-relay-557dfcc57b-c8t55 1/1 Running 0 2m53s
Expand Down Expand Up @@ -108,5 +108,5 @@ job.batch/kubearmor-snitch-lglbd 1/1 3s 11m
## Uninstall The Operator
Uninstalling the Operator will also uninstall KubeArmor from all your nodes. To uninstall, just run:
```bash
helm uninstall kubearmor -n kube-system
helm uninstall kubearmor -n kubearmor
```
2 changes: 1 addition & 1 deletion deployments/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (

func main() {
envs := []string{"generic", "docker", "minikube", "microk8s", "k0s", "k3s", "GKE", "EKS", "BottleRocket", "AKS", "OKE"}
nsPtr := flag.String("namespace", "kube-system", "Namespace")
nsPtr := flag.String("namespace", "kubearmor", "Namespace")

flag.Parse()

Expand Down
10 changes: 5 additions & 5 deletions deployments/operator/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: kubearmor-operator
namespace: kube-system
namespace: kubearmor
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
Expand Down Expand Up @@ -379,7 +379,7 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubearmor-operator
namespace: kube-system
namespace: kubearmor
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
Expand All @@ -392,7 +392,7 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubearmor-operator
namespace: kube-system
namespace: kubearmor
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
Expand All @@ -405,13 +405,13 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubearmor-operator
namespace: kube-system
namespace: kubearmor
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubearmor-operator
namespace: kube-system
namespace: kubearmor
spec:
selector:
matchLabels:
Expand Down
6 changes: 3 additions & 3 deletions getting-started/FAQ.md
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ If you don't find an existing dashboard particular to your needs, feel free to c
`karmor logs` internally uses Kubernetes' client's port-forward. Port forward is not meant for long running connection and it times out if left idle. Checkout this [StackOverflow answer](https://stackoverflow.com/questions/47484312/kubectl-port-forwarding-timeout-issue) for more info.

If you want to stream logs reliably there are a couple of solutions you can try:
1. Modiy the `kubearmor` service in `kube-system` namespace and change the service type to `NodePort`. Then run karmor with:
1. Modiy the `kubearmor` service in `kubearmor` namespace and change the service type to `NodePort`. Then run karmor with:
```bash
karmor logs --gRPC=<address of the kubearmor node-port service>
```
Expand Down Expand Up @@ -223,12 +223,12 @@ For more such differences checkout [Enforce Feature Parity Wiki](https://github.
<details><summary><h4>How to enable `KubeArmorHostPolicy` for k8s cluster?</h4></summary>
By default the host policies and visibility is disabled for k8s hosts.

If you use following command, `kubectl logs -n kube-system <KUBEARMOR-POD> | grep "Started to protect"`<br>
If you use following command, `kubectl logs -n kubearmor <KUBEARMOR-POD> | grep "Started to protect"`<br>
you will see, `2023-08-21 12:58:34.641665 INFO Started to protect containers.`<br>
This indicates that only container/pod protection is enabled.<br>
If you have hostpolicy enabled you should see something like this, `2023-08-22 18:07:43.335232 INFO Started to protect a host and containers`<br>

One can enable the host policy by patching the daemonset (`kubectl edit daemonsets.apps -n kube-system kubearmor`):
One can enable the host policy by patching the daemonset (`kubectl edit daemonsets.apps -n kubearmor kubearmor`):
```diff
...
template:
Expand Down
2 changes: 1 addition & 1 deletion pkg/KubeArmorController/config/default/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Adds namespace to all resources.
namespace: kube-system
namespace: kubearmor

# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
Expand Down
2 changes: 1 addition & 1 deletion pkg/KubeArmorOperator/common/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ var (
BTFLabel string = "kubearmor.io/btf"
DeleteAction string = "DELETE"
AddAction string = "ADD"
Namespace string = "kube-system"
Namespace string = "kubearmor"
Privileged bool = false
HostPID bool = false
SnitchName string = "kubearmor-snitch"
Expand Down
2 changes: 1 addition & 1 deletion pkg/KubeArmorOperator/config/default/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

namespace: kube-system
namespace: kubearmor

resources:
- ../crd
Expand Down
6 changes: 3 additions & 3 deletions pkg/KubeArmorOperator/config/rbac/clusterrolebinding.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubearmor-operator
namespace: kube-system
namespace: kubearmor
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
Expand All @@ -22,7 +22,7 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubearmor-operator
namespace: kube-system
namespace: kubearmor
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
Expand All @@ -35,4 +35,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubearmor-operator
namespace: kube-system
namespace: kubearmor
2 changes: 1 addition & 1 deletion pkg/KubeArmorOperator/config/samples/kubearmor-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ metadata:
app.kubernetes.io/managed-by: kustomize
app.kubernetes.io/created-by: kubearmoroperator
name: kubearmorconfig-test
namespace: kube-system
namespace: kubearmor
spec:
defaultCapabilitiesPosture: block
defaultFilePosture: block
Expand Down
Loading

0 comments on commit 0003e65

Please sign in to comment.