Skip to content

Commit

Permalink
Merge pull request #1681 from PrimalPimmy/syscall
Browse files Browse the repository at this point in the history
fix(seccomp): Seccomp default to false and ARM related changes
  • Loading branch information
rksharma95 authored Mar 15, 2024
2 parents 5408a4e + 9d8a946 commit 409cc15
Show file tree
Hide file tree
Showing 12 changed files with 65 additions and 30 deletions.
27 changes: 25 additions & 2 deletions .github/workflows/ci-test-controllers.yml
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,31 @@ jobs:
kubectl apply -f -
fi
kubectl wait -n kubearmor --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test
kubectl wait --timeout=7m --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch -n kubearmor
kubectl get pods -A
timeout 7m bash -c -- '
while true; do
all_running=true
echo "Checking pod status..."
for pod_status in $(kubectl get pod -n kubearmor -l kubearmor-app,kubearmor-app!=kubearmor-snitch --output=jsonpath="{.items[*].status.phase}" 2>/dev/null); do
if [ "$pod_status" != "Running" ]; then
all_running=false
echo "Waiting for pods to be Running..."
break
fi
done
if $all_running; then
echo "All pods are Running."
break
fi
if kubectl get pod -n kubearmor -l kubearmor-app,kubearmor-app!=kubearmor-snitch | grep CrashLoopBackOff; then
echo "Error: Pod in CrashLoopBackOff state"
exit 1
fi
sleep 1
done
'
- name: Test KubeArmor using Ginkgo
run: |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,6 @@ spec:
type: string
type: object
seccompEnabled:
default: true
type: boolean
type: object
status:
Expand Down
1 change: 0 additions & 1 deletion deployments/operator/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,6 @@ spec:
type: string
type: object
seccompEnabled:
default: true
type: boolean
type: object
status:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ type KubeArmorConfigSpec struct {
// +kubebuilder:validation:optional
EnableStdOutMsgs bool `json:"enableStdOutMsgs,omitempty"`
// +kubebuilder:validation:Optional
// +kubebuilder:default=true
SeccompEnabled bool `json:"seccompEnabled,omitempty"`
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/KubeArmorOperator/common/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ var ConfigMapData = map[string]string{
ConfigDefaultPostureLogs: "true",
}

var ConfigDefaultSeccompEnabled = "true"
var ConfigDefaultSeccompEnabled = "false"

var KubearmorRelayEnvMap = map[string]string{
EnableStdOutAlerts: "false",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,6 @@ spec:
type: string
type: object
seccompEnabled:
default: true
type: boolean
type: object
status:
Expand Down
2 changes: 1 addition & 1 deletion pkg/KubeArmorOperator/config/samples/kubearmor-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ spec:
defaultFilePosture: block
defaultNetworkPosture: block
defaultVisibility: process,file,network,capabilities
seccompEnabled: true
seccompEnabled: false
kubearmorImage:
image: kubearmor/kubearmor:latest
imagePullPolicy: Never
Expand Down
2 changes: 1 addition & 1 deletion pkg/KubeArmorOperator/config/samples/sample-config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ spec:
enableStdOutLogs: false
enableStdOutAlerts: false
enableStdOutMsgs: false
seccompEnabled: true
seccompEnabled: false
kubearmorImage:
image: kubearmor/kubearmor:stable
imagePullPolicy: Always
Expand Down
38 changes: 20 additions & 18 deletions pkg/KubeArmorOperator/internal/controller/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -449,26 +449,28 @@ func (clusterWatcher *ClusterWatcher) UpdateKubearmorSeccomp(cfg *opv1.KubeArmor
res = err
} else {
for _, ds := range dsList.Items {
if cfg.Spec.SeccompEnabled && ds.Spec.Template.Spec.Containers[0].SecurityContext.SeccompProfile == nil {
ds.Spec.Template.Spec.Containers[0].SecurityContext.SeccompProfile = &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeLocalhost,
LocalhostProfile: &common.SeccompProfile,
}
ds.Spec.Template.Spec.InitContainers[0].SecurityContext.SeccompProfile = &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeLocalhost,
LocalhostProfile: &common.SeccompInitProfile,
if ds.Spec.Template.Labels[common.SeccompLabel] == "yes" {
if cfg.Spec.SeccompEnabled && ds.Spec.Template.Spec.Containers[0].SecurityContext.SeccompProfile == nil {
ds.Spec.Template.Spec.Containers[0].SecurityContext.SeccompProfile = &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeLocalhost,
LocalhostProfile: &common.SeccompProfile,
}
ds.Spec.Template.Spec.InitContainers[0].SecurityContext.SeccompProfile = &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeLocalhost,
LocalhostProfile: &common.SeccompInitProfile,
}
} else if !cfg.Spec.SeccompEnabled && ds.Spec.Template.Spec.Containers[0].SecurityContext.SeccompProfile != nil {
ds.Spec.Template.Spec.Containers[0].SecurityContext.SeccompProfile = nil
ds.Spec.Template.Spec.InitContainers[0].SecurityContext.SeccompProfile = nil
}
} else if !cfg.Spec.SeccompEnabled && ds.Spec.Template.Spec.Containers[0].SecurityContext.SeccompProfile != nil {
ds.Spec.Template.Spec.Containers[0].SecurityContext.SeccompProfile = nil
ds.Spec.Template.Spec.InitContainers[0].SecurityContext.SeccompProfile = nil
}

_, err = clusterWatcher.Client.AppsV1().DaemonSets(common.Namespace).Update(context.Background(), &ds, v1.UpdateOptions{})
if err != nil {
clusterWatcher.Log.Warnf("Cannot update daemonset=%s error=%s", ds.Name, err.Error())
res = err
} else {
clusterWatcher.Log.Infof("Updated daemonset=%s", ds.Name)
_, err = clusterWatcher.Client.AppsV1().DaemonSets(common.Namespace).Update(context.Background(), &ds, v1.UpdateOptions{})
if err != nil {
clusterWatcher.Log.Warnf("Cannot update daemonset=%s error=%s", ds.Name, err.Error())
res = err
} else {
clusterWatcher.Log.Infof("Updated daemonset=%s", ds.Name)
}
}
}
}
Expand Down
1 change: 1 addition & 0 deletions pkg/KubeArmorOperator/internal/controller/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ func generateDaemonset(name, enforcer, runtime, socket, btfPresent, apparmorfs,
common.SocketLabel: socket,
common.OsLabel: "linux",
common.BTFLabel: btfPresent,
common.SeccompLabel: seccompPresent,
}
daemonset.Spec.Template.Spec.NodeSelector = common.CopyStrMap(labels)
labels["kubearmor-app"] = "kubearmor"
Expand Down
2 changes: 2 additions & 0 deletions pkg/KubeArmorOperator/seccomp/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ var (
Archx86_64 = "SCMP_ARCH_X86_64"
Archx86 = "SCMP_ARCH_X86"
Archx32 = "SCMP_ARCH_X32"
ArchARM64 = "SCMP_ARCH_AARCH64"
ArchARM = "SCMP_ARCH_ARM"

ActErrno = "SCMP_ACT_ERRNO"
ActAllow = "SCMP_ACT_ALLOW"
Expand Down
17 changes: 14 additions & 3 deletions pkg/KubeArmorOperator/seccomp/seccomp.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ func LoadSeccompInNode() {

seccompProfile := Seccomp{
DefaultAction: ActErrno,
Architectures: []string{Archx32, Archx86_64, Archx86},
Architectures: []string{Archx32, Archx86_64, Archx86, ArchARM, ArchARM64},
Syscalls: []*Syscalls{{
Names: []string{
"getsockopt",
Expand Down Expand Up @@ -126,7 +126,7 @@ func LoadSeccompInNode() {

seccompInitProfile := Seccomp{
DefaultAction: ActErrno,
Architectures: []string{Archx32, Archx86_64, Archx86},
Architectures: []string{Archx32, Archx86_64, Archx86, ArchARM, ArchARM64},
Syscalls: []*Syscalls{{
Names: []string{
"dup2",
Expand All @@ -138,9 +138,15 @@ func LoadSeccompInNode() {
"memfd_create",
"capset",
"read",
"dup3",
"getpgid",
"getrandom",
"close",
"fchown",
"mremap",
"unlinkat",
"readlink",
"sigaltstack",
"getegid",
"arch_prctl",
"lseek",
Expand All @@ -162,6 +168,7 @@ func LoadSeccompInNode() {
"capget",
"sysinfo",
"connect",
"pipe2",
"openat",
"access",
"set_robust_list",
Expand Down Expand Up @@ -216,8 +223,12 @@ func LoadSeccompInNode() {
}

func CheckIfSeccompProfilePresent() string {
if _, err := os.Stat(filepath.Clean(seccompPath)); err == nil {
_, err1 := os.Stat(filepath.Clean(seccompPath + "/kubearmor-init-seccomp.json"))
_, err2 := os.Stat(filepath.Clean(seccompPath + "/kubearmor-seccomp.json"))

if err1 == nil && err2 == nil {
return "yes"
}

return "no"
}

0 comments on commit 409cc15

Please sign in to comment.