Solutions: System Hardening
Solution Format
Each solution provides:
- Step-by-step commands and YAML manifests
- Verification commands to confirm correctness
- Exam tips for faster execution
Solution 1: RuntimeDefault Seccomp Profile
Difficulty: Easy
Steps
# Step 1: Create the namespace
kubectl create namespace web-apps# Step 2: Create the pod with RuntimeDefault seccomp
# Save as web-server.yaml
apiVersion: v1
kind: Pod
metadata:
name: web-server
namespace: web-apps
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: nginx
image: nginx:1.27
ports:
- containerPort: 80# Step 3: Apply the pod
kubectl apply -f web-server.yaml
# Step 4: Verify
kubectl -n web-apps get pod web-server
# NAME READY STATUS RESTARTS AGE
# web-server 1/1 Running 0 10sVerification
# Verify seccomp is applied (check annotations/security context)
kubectl -n web-apps get pod web-server -o jsonpath='{.spec.securityContext.seccompProfile}' | jq .
# {"type":"RuntimeDefault"}Exam Tip
Setting seccomp at the pod level (spec.securityContext.seccompProfile) applies it to ALL containers in the pod. This is faster than setting it on each container individually.
Solution 2: Drop ALL Capabilities
Difficulty: Easy
Steps
# Save as legacy-app.yaml
apiVersion: v1
kind: Pod
metadata:
name: legacy-app
spec:
containers:
- name: app
image: busybox:1.36
command: ["sleep", "3600"]
securityContext:
capabilities:
drop:
- ALL# Apply
kubectl apply -f legacy-app.yaml
# Verify pod is running
kubectl get pod legacy-appVerification
# Check that capabilities are empty
kubectl exec legacy-app -- cat /proc/1/status | grep Cap
# CapPrm: 0000000000000000 (no capabilities)
# CapEff: 0000000000000000 (no capabilities)
# Verify -- trying a privileged operation should fail
kubectl exec legacy-app -- ping -c 1 8.8.8.8
# ping: permission denied (operation not permitted)
# (NET_RAW is needed for ping, and we dropped it)Exam Tip
busybox with sleep is the simplest container for testing security contexts. It starts quickly and you can exec into it to test restrictions.
Solution 3: Baseline Pod Security Standard
Difficulty: Easy
Steps
# Step 1: Create namespace
kubectl create namespace production
# Step 2: Label for baseline enforcement + restricted warning
kubectl label namespace production \
pod-security.kubernetes.io/enforce=baseline \
pod-security.kubernetes.io/enforce-version=latest \
pod-security.kubernetes.io/warn=restricted \
pod-security.kubernetes.io/warn-version=latest
# Step 3: Verify labels
kubectl get namespace production --show-labelsVerification
# Test: Try creating a privileged pod (should be REJECTED)
kubectl -n production run test-privileged --image=nginx \
--overrides='{"spec":{"containers":[{"name":"nginx","image":"nginx","securityContext":{"privileged":true}}]}}'
# Error from server (Forbidden): pods "test-privileged" is forbidden:
# violates PodSecurity "baseline:latest": privileged
# Test: Create a normal pod (should succeed with restricted warnings)
kubectl -n production run test-normal --image=nginx
# Warning: would violate PodSecurity "restricted:latest": ...
# pod/test-normal created
# Clean up
kubectl -n production delete pod test-normalSolution 4: Non-Root Execution
Difficulty: Easy
Steps
# Save as nonroot-app.yaml
apiVersion: v1
kind: Pod
metadata:
name: nonroot-app
spec:
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
containers:
- name: app
image: python:3.12-slim
command: ["python", "-m", "http.server", "8080"]
ports:
- containerPort: 8080
securityContext:
allowPrivilegeEscalation: falsekubectl apply -f nonroot-app.yamlVerification
# Check pod is running
kubectl get pod nonroot-app
# Verify UID
kubectl exec nonroot-app -- id
# uid=1000 gid=1000 groups=1000
# Verify the process is running as UID 1000
kubectl exec nonroot-app -- ps aux
# PID USER TIME COMMAND
# 1 1000 0:00 python -m http.server 8080Solution 5: Read-Only Root Filesystem
Difficulty: Easy
Steps
# Save as readonly-nginx.yaml
apiVersion: v1
kind: Pod
metadata:
name: readonly-nginx
spec:
containers:
- name: nginx
image: nginx:1.27
ports:
- containerPort: 80
securityContext:
readOnlyRootFilesystem: true
volumeMounts:
- name: cache
mountPath: /var/cache/nginx
- name: run
mountPath: /var/run
- name: tmp
mountPath: /tmp
volumes:
- name: cache
emptyDir: {}
- name: run
emptyDir: {}
- name: tmp
emptyDir: {}kubectl apply -f readonly-nginx.yamlVerification
# Check pod is running
kubectl get pod readonly-nginx
# Verify nginx is serving
kubectl exec readonly-nginx -- curl -s localhost:80 | head -5
# Verify writes to /etc fail
kubectl exec readonly-nginx -- touch /etc/test-file
# touch: cannot touch '/etc/test-file': Read-only file system
# Verify writes to /tmp succeed (writable volume)
kubectl exec readonly-nginx -- touch /tmp/test-file
# (no error)Solution 6: Custom Seccomp Profile (Block Networking)
Difficulty: Medium
Steps
# Step 1: Access the Kind node
docker exec -it cks-lab-worker bash
# Step 2: Create the seccomp profile directory
mkdir -p /var/lib/kubelet/seccomp/profiles
# Step 3: Create the profile
cat > /var/lib/kubelet/seccomp/profiles/block-networking.json << 'EOF'
{
"defaultAction": "SCMP_ACT_ALLOW",
"syscalls": [
{
"names": [
"socket",
"connect",
"bind",
"listen",
"accept",
"accept4",
"sendto",
"recvfrom"
],
"action": "SCMP_ACT_ERRNO"
}
]
}
EOF
# Step 4: Verify the file exists
cat /var/lib/kubelet/seccomp/profiles/block-networking.json
# Step 5: Exit the node
exit# Step 6: Create the pod - save as no-net-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: no-net-pod
spec:
nodeName: cks-lab-worker
containers:
- name: app
image: busybox:1.36
command: ["sleep", "3600"]
securityContext:
seccompProfile:
type: Localhost
localhostProfile: profiles/block-networking.jsonkubectl apply -f no-net-pod.yamlVerification
# Check pod is running
kubectl get pod no-net-pod
# Test that network is blocked
kubectl exec no-net-pod -- wget -qO- -T 3 http://kubernetes.default.svc
# wget: can't connect to remote host (or similar error)
# Test that file operations still work
kubectl exec no-net-pod -- ls /
kubectl exec no-net-pod -- cat /etc/hostnameExam Tip
For Kind clusters, use docker exec -it <node-name> bash to access nodes. For kubeadm clusters in the exam, use ssh <node>.
Solution 7: Deployment Meeting Restricted PSS
Difficulty: Medium
Steps
# Step 1: Create namespace with restricted enforcement
kubectl create namespace restricted-ns
kubectl label namespace restricted-ns \
pod-security.kubernetes.io/enforce=restricted \
pod-security.kubernetes.io/enforce-version=latest# Step 2: Create the deployment - save as secure-api.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: secure-api
namespace: restricted-ns
spec:
replicas: 2
selector:
matchLabels:
app: secure-api
template:
metadata:
labels:
app: secure-api
spec:
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
containers:
- name: api
image: python:3.12-slim
command: ["python", "-m", "http.server", "8080"]
ports:
- containerPort: 8080
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
volumeMounts:
- name: tmp
mountPath: /tmp
volumes:
- name: tmp
emptyDir: {}kubectl apply -f secure-api.yamlVerification
# Verify all pods are running
kubectl -n restricted-ns get pods
# NAME READY STATUS RESTARTS AGE
# secure-api-xxxx-xxxxx 1/1 Running 0 10s
# secure-api-xxxx-xxxxx 1/1 Running 0 10s
# Verify security context
kubectl -n restricted-ns get pod -l app=secure-api -o jsonpath='{.items[0].spec.securityContext}' | jq .
# Verify it meets restricted standard by dry-run testing
kubectl -n restricted-ns get deploy secure-api -o yaml | kubectl apply --dry-run=server -f -Solution 8: AppArmor Profile Application
Difficulty: Medium
Steps
# Step 1: Access the Kind node
docker exec -it cks-lab-worker bash
# Step 2: Create the AppArmor profile
cat > /etc/apparmor.d/k8s-deny-write << 'PROFILE'
#include <tunables/global>
profile k8s-deny-write flags=(attach_disconnected) {
#include <abstractions/base>
file,
network,
deny /** w,
}
PROFILE
# Step 3: Load the profile
apparmor_parser /etc/apparmor.d/k8s-deny-write
# Step 4: Verify it's loaded
aa-status | grep k8s-deny-write
# Step 5: Exit node
exit# Step 6: Create the pod - save as readonly-enforced.yaml
apiVersion: v1
kind: Pod
metadata:
name: readonly-enforced
spec:
nodeName: cks-lab-worker
containers:
- name: app
image: busybox:1.36
command: ["sleep", "3600"]
securityContext:
appArmorProfile:
type: Localhost
localhostProfile: k8s-deny-writekubectl apply -f readonly-enforced.yamlVerification
# Check pod is running
kubectl get pod readonly-enforced
# Verify AppArmor profile is active
kubectl exec readonly-enforced -- cat /proc/1/attr/current
# k8s-deny-write (enforce)
# Verify writes are denied
kubectl exec readonly-enforced -- touch /tmp/test
# touch: /tmp/test: Permission denied
kubectl exec readonly-enforced -- sh -c "echo test > /etc/test"
# sh: can't create /etc/test: Permission denied
# Verify reads work
kubectl exec readonly-enforced -- cat /etc/hostname
# (outputs hostname)
kubectl exec readonly-enforced -- ls /
# (outputs directory listing)Solution 9: Multiple PSS Namespaces
Difficulty: Medium
Steps
# Step 1: Create namespaces with labels
kubectl create namespace ns-privileged
kubectl label namespace ns-privileged \
pod-security.kubernetes.io/enforce=privileged \
pod-security.kubernetes.io/enforce-version=latest
kubectl create namespace ns-baseline
kubectl label namespace ns-baseline \
pod-security.kubernetes.io/enforce=baseline \
pod-security.kubernetes.io/enforce-version=latest \
pod-security.kubernetes.io/warn=restricted \
pod-security.kubernetes.io/warn-version=latest
kubectl create namespace ns-restricted
kubectl label namespace ns-restricted \
pod-security.kubernetes.io/enforce=restricted \
pod-security.kubernetes.io/enforce-version=latest \
pod-security.kubernetes.io/warn=restricted \
pod-security.kubernetes.io/warn-version=latest \
pod-security.kubernetes.io/audit=restricted \
pod-security.kubernetes.io/audit-version=latestVerification
# Test ns-privileged: privileged pod should work
kubectl -n ns-privileged run priv-test --image=nginx \
--overrides='{"spec":{"containers":[{"name":"nginx","image":"nginx","securityContext":{"privileged":true}}]}}'
# pod/priv-test created
# Test ns-baseline: privileged pod should be rejected
kubectl -n ns-baseline run priv-test --image=nginx \
--overrides='{"spec":{"containers":[{"name":"nginx","image":"nginx","securityContext":{"privileged":true}}]}}'
# Error from server (Forbidden): violates PodSecurity "baseline:latest": privileged
# Test ns-restricted: normal nginx should be rejected
kubectl -n ns-restricted run nginx-test --image=nginx
# Error from server (Forbidden): violates PodSecurity "restricted:latest": ...
# Verify labels
kubectl get ns ns-privileged ns-baseline ns-restricted --show-labelsSolution 10: Combined Security Hardening
Difficulty: Medium
Steps
# Save as hardened-app.yaml
apiVersion: v1
kind: Pod
metadata:
name: hardened-app
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: nginx
image: nginx:1.27
ports:
- containerPort: 80
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
volumeMounts:
- name: cache
mountPath: /var/cache/nginx
- name: run
mountPath: /var/run
- name: tmp
mountPath: /tmp
volumes:
- name: cache
emptyDir: {}
- name: run
emptyDir: {}
- name: tmp
emptyDir: {}kubectl apply -f hardened-app.yamlVerification
# Check pod is running
kubectl get pod hardened-app
# Verify nginx is serving
kubectl exec hardened-app -- curl -s localhost:80 | head -3
# Verify seccomp
kubectl get pod hardened-app -o jsonpath='{.spec.securityContext.seccompProfile}'
# {"type":"RuntimeDefault"}
# Verify capabilities
kubectl exec hardened-app -- cat /proc/1/status | grep CapEff
# CapEff: 0000000000000400 (only NET_BIND_SERVICE)
# Verify read-only filesystem
kubectl exec hardened-app -- touch /etc/test
# touch: cannot touch '/etc/test': Read-only file systemSolution 11: Audit Seccomp Profile
Difficulty: Medium
Steps
# Step 1: Access Kind node
docker exec -it cks-lab-worker bash
# Step 2: Create audit profile
mkdir -p /var/lib/kubelet/seccomp/profiles
cat > /var/lib/kubelet/seccomp/profiles/audit-all.json << 'EOF'
{
"defaultAction": "SCMP_ACT_LOG"
}
EOF
# Step 3: Exit
exit# Step 4: Create the pod - save as audit-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: audit-pod
spec:
nodeName: cks-lab-worker
containers:
- name: app
image: busybox:1.36
command: ["sh", "-c", "wget -qO- http://kubernetes.default.svc 2>&1; sleep 3600"]
securityContext:
seccompProfile:
type: Localhost
localhostProfile: profiles/audit-all.jsonkubectl apply -f audit-pod.yamlVerification
# Check pod is running
kubectl get pod audit-pod
# The pod should be running -- SCMP_ACT_LOG allows all syscalls
# but logs them to the kernel audit log
# To see logged syscalls (on the node):
docker exec cks-lab-worker dmesg | grep SECCOMP | tail -20Exam Tip
SCMP_ACT_LOG is useful for building custom profiles. Run your app with this profile, collect the logged syscalls, then create an allowlist profile from the results.
Solution 12: Fix Dangerous Pod Settings
Difficulty: Medium
Steps
# Step 1: Create the dangerous pod - save as dangerous-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: dangerous-pod
spec:
hostNetwork: true
hostPID: true
containers:
- name: nginx
image: nginx:1.27
securityContext:
privileged: truekubectl apply -f dangerous-pod.yaml# Step 2: Create the fixed version - save as safe-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: safe-pod
spec:
hostNetwork: false
hostPID: false
hostIPC: false
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: nginx
image: nginx:1.27
ports:
- containerPort: 80
securityContext:
privileged: false
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
volumeMounts:
- name: cache
mountPath: /var/cache/nginx
- name: run
mountPath: /var/run
- name: tmp
mountPath: /tmp
volumes:
- name: cache
emptyDir: {}
- name: run
emptyDir: {}
- name: tmp
emptyDir: {}kubectl apply -f safe-pod.yamlSecurity Violations Identified
| Setting | Violation | Risk |
|---|---|---|
hostNetwork: true | Shares host network namespace | Network sniffing, port binding, bypass NetworkPolicies |
hostPID: true | Shares host PID namespace | See all host processes, read environments |
privileged: true | Full host access | Container escape, kernel module loading, device access |
Verification
# Verify safe-pod is running
kubectl get pod safe-pod
# Verify it cannot see host processes
kubectl exec safe-pod -- ps aux
# Should only show nginx processes, not host processes
# Verify it cannot write to root filesystem
kubectl exec safe-pod -- touch /etc/test
# Read-only file systemSolution 13: Complete Hardened Nginx Stack
Difficulty: Hard
Steps
Important Note
Standard nginx runs its master process as root to bind to port 80. The Restricted PSS requires runAsNonRoot: true, which conflicts with standard nginx. We have two options:
- Use a namespace with
baselineenforcement (practical approach shown below) - Use an nginx-unprivileged image with restricted enforcement
We will demonstrate both approaches.
Approach A: Baseline Namespace with AppArmor
# Step 1: Create namespace with baseline enforcement
kubectl create namespace hardened-web
kubectl label namespace hardened-web \
pod-security.kubernetes.io/enforce=baseline \
pod-security.kubernetes.io/enforce-version=latest \
pod-security.kubernetes.io/warn=restricted \
pod-security.kubernetes.io/warn-version=latest# Step 2: Create and load AppArmor profile on node
docker exec -it cks-lab-worker bash
cat > /etc/apparmor.d/k8s-nginx-restricted << 'PROFILE'
#include <tunables/global>
profile k8s-nginx-restricted flags=(attach_disconnected) {
#include <abstractions/base>
/** r,
/var/cache/nginx/** rw,
/var/run/** rw,
/tmp/** rw,
/dev/null rw,
/proc/** r,
network tcp,
capability net_bind_service,
capability setuid,
capability setgid,
capability chown,
capability dac_override,
deny capability sys_admin,
}
PROFILE
apparmor_parser /etc/apparmor.d/k8s-nginx-restricted
aa-status | grep k8s-nginx-restricted
exit# Step 3: Create the pod - save as nginx-fortress.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx-fortress
namespace: hardened-web
spec:
nodeName: cks-lab-worker
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: nginx
image: nginx:1.27
ports:
- containerPort: 80
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
- CHOWN
- SETUID
- SETGID
- DAC_OVERRIDE
appArmorProfile:
type: Localhost
localhostProfile: k8s-nginx-restricted
volumeMounts:
- name: cache
mountPath: /var/cache/nginx
- name: run
mountPath: /var/run
- name: tmp
mountPath: /tmp
volumes:
- name: cache
emptyDir: {}
- name: run
emptyDir: {}
- name: tmp
emptyDir: {}kubectl apply -f nginx-fortress.yamlApproach B: Restricted Namespace with nginx-unprivileged
# Alternative for truly restricted PSS compliance
apiVersion: v1
kind: Pod
metadata:
name: nginx-fortress-restricted
namespace: hardened-web-restricted
spec:
securityContext:
runAsNonRoot: true
runAsUser: 101
runAsGroup: 101
seccompProfile:
type: RuntimeDefault
containers:
- name: nginx
image: nginxinc/nginx-unprivileged:1.27
ports:
- containerPort: 8080
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
volumeMounts:
- name: cache
mountPath: /var/cache/nginx
- name: run
mountPath: /var/run
- name: tmp
mountPath: /tmp
volumes:
- name: cache
emptyDir: {}
- name: run
emptyDir: {}
- name: tmp
emptyDir: {}Verification
# Check pod is running
kubectl -n hardened-web get pod nginx-fortress
# Verify AppArmor
kubectl -n hardened-web exec nginx-fortress -- cat /proc/1/attr/current
# k8s-nginx-restricted (enforce)
# Verify nginx is serving
kubectl -n hardened-web exec nginx-fortress -- curl -s localhost:80 | head -3Solution 14: Strict Allowlist Seccomp Profile
Difficulty: Hard
Steps
# Step 1: Access Kind node
docker exec -it cks-lab-worker bash
# Step 2: Create the strict profile
mkdir -p /var/lib/kubelet/seccomp/profiles
cat > /var/lib/kubelet/seccomp/profiles/nginx-strict.json << 'SECCOMP'
{
"defaultAction": "SCMP_ACT_ERRNO",
"architectures": [
"SCMP_ARCH_X86_64",
"SCMP_ARCH_X86",
"SCMP_ARCH_X32"
],
"syscalls": [
{
"names": [
"accept4",
"access",
"arch_prctl",
"bind",
"brk",
"capget",
"capset",
"chown",
"clone",
"clone3",
"close",
"connect",
"dup2",
"epoll_create1",
"epoll_ctl",
"epoll_pwait",
"epoll_wait",
"eventfd2",
"execve",
"exit",
"exit_group",
"fchmod",
"fchown",
"fcntl",
"fstat",
"fstatfs",
"futex",
"getdents64",
"getegid",
"geteuid",
"getgid",
"getpid",
"getppid",
"getrandom",
"getuid",
"ioctl",
"listen",
"lseek",
"madvise",
"mmap",
"mprotect",
"munmap",
"nanosleep",
"newfstatat",
"openat",
"pipe2",
"prctl",
"pread64",
"prlimit64",
"pwrite64",
"read",
"recvfrom",
"recvmsg",
"rseq",
"rt_sigaction",
"rt_sigprocmask",
"rt_sigreturn",
"sched_getaffinity",
"sendfile",
"sendmsg",
"sendto",
"set_robust_list",
"set_tid_address",
"setgid",
"setgroups",
"setitimer",
"setsockopt",
"setuid",
"sigaltstack",
"socket",
"socketpair",
"stat",
"statfs",
"sysinfo",
"tgkill",
"uname",
"unlink",
"wait4",
"write",
"writev"
],
"action": "SCMP_ACT_ALLOW"
}
]
}
SECCOMP
# Step 3: Exit
exit# Step 4: Create the pod - save as strict-nginx.yaml
apiVersion: v1
kind: Pod
metadata:
name: strict-nginx
spec:
nodeName: cks-lab-worker
containers:
- name: nginx
image: nginx:1.27
ports:
- containerPort: 80
securityContext:
seccompProfile:
type: Localhost
localhostProfile: profiles/nginx-strict.jsonkubectl apply -f strict-nginx.yamlVerification
# Check pod is running
kubectl get pod strict-nginx
# Verify nginx is functional
kubectl exec strict-nginx -- curl -s localhost:80 | head -5
# Verify the seccomp profile type
kubectl get pod strict-nginx -o jsonpath='{.spec.containers[0].securityContext.seccompProfile}'Exam Tip
If the pod fails to start with a strict allowlist, check dmesg on the node for SECCOMP messages to identify which syscalls are missing. Add them to the allowlist and update the profile.
Solution 15: Audit and Fix Insecure Pods
Difficulty: Hard
Steps
# Step 1: Create insecure pods - save as insecure-pods.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pod-a
spec:
containers:
- name: nginx
image: nginx:1.27
securityContext:
privileged: true
---
apiVersion: v1
kind: Pod
metadata:
name: pod-b
spec:
hostPID: true
hostNetwork: true
containers:
- name: app
image: busybox:1.36
command: ["sleep", "3600"]
---
apiVersion: v1
kind: Pod
metadata:
name: pod-c
spec:
containers:
- name: nginx
image: nginx:1.27
securityContext:
capabilities:
add:
- SYS_ADMIN
- NET_ADMINkubectl apply -f insecure-pods.yaml# Step 2: Audit commands
# Find privileged pods
kubectl get pods -o json | jq -r '
.items[] |
select(.spec.containers[].securityContext.privileged==true) |
.metadata.name'
# Output: pod-a
# Find pods with hostPID
kubectl get pods -o json | jq -r '
.items[] | select(.spec.hostPID==true) | .metadata.name'
# Output: pod-b
# Find pods with SYS_ADMIN capability
kubectl get pods -o json | jq -r '
.items[] |
select(.spec.containers[].securityContext.capabilities.add[]? == "SYS_ADMIN") |
.metadata.name'
# Output: pod-c# Step 3: Create fixed pods - save as fixed-pods.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pod-a-fixed
spec:
containers:
- name: nginx
image: nginx:1.27
securityContext:
privileged: false
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
---
apiVersion: v1
kind: Pod
metadata:
name: pod-b-fixed
spec:
hostPID: false
hostNetwork: false
hostIPC: false
containers:
- name: app
image: busybox:1.36
command: ["sleep", "3600"]
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
---
apiVersion: v1
kind: Pod
metadata:
name: pod-c-fixed
spec:
containers:
- name: nginx
image: nginx:1.27
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICEkubectl apply -f fixed-pods.yamlVerification
# Verify all fixed pods are running
kubectl get pods pod-a-fixed pod-b-fixed pod-c-fixed
# Re-run audit -- should find no issues in fixed pods
kubectl get pods -l '!batch.kubernetes.io/job-name' -o json | jq -r '
.items[] |
select(.metadata.name | endswith("-fixed")) |
select(
.spec.hostNetwork==true or
.spec.hostPID==true or
(.spec.containers[].securityContext.privileged==true) or
(.spec.containers[].securityContext.capabilities.add[]? == "SYS_ADMIN")
) | .metadata.name'
# (no output -- all fixed pods are clean)Solution 16: PSS Boundary Testing
Difficulty: Hard
Steps
# Step 1: Create namespace
kubectl create namespace pss-test
kubectl label namespace pss-test \
pod-security.kubernetes.io/enforce=baseline \
pod-security.kubernetes.io/enforce-version=latest \
pod-security.kubernetes.io/warn=restricted \
pod-security.kubernetes.io/warn-version=latest# Step 2: Pod that passes baseline - save as test-baseline-pass.yaml
apiVersion: v1
kind: Pod
metadata:
name: test-baseline-pass
namespace: pss-test
spec:
containers:
- name: nginx
image: nginx:1.27
ports:
- containerPort: 80kubectl apply -f test-baseline-pass.yaml
# Warning: would violate PodSecurity "restricted:latest":
# allowPrivilegeEscalation != false
# unrestricted capabilities
# runAsNonRoot != true
# seccompProfile
# pod/test-baseline-pass created# Step 3: Pod that fails baseline (should be rejected)
kubectl -n pss-test run test-baseline-fail --image=nginx \
--overrides='{"spec":{"containers":[{"name":"nginx","image":"nginx","securityContext":{"privileged":true}}]}}'
# Error from server (Forbidden): violates PodSecurity "baseline:latest": privileged# Step 4: Document restricted violations from the warning message
# The warning tells us test-baseline-pass violates restricted because:
# 1. allowPrivilegeEscalation != false (containers "nginx" must set to false)
# 2. unrestricted capabilities (containers "nginx" must set drop: ALL)
# 3. runAsNonRoot != true (pod or containers "nginx" must set runAsNonRoot: true)
# 4. seccompProfile (pod or containers "nginx" must set RuntimeDefault or Localhost)# Step 5: Pod that passes restricted - save as test-restricted-pass.yaml
apiVersion: v1
kind: Pod
metadata:
name: test-restricted-pass
namespace: pss-test
spec:
securityContext:
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
containers:
- name: app
image: python:3.12-slim
command: ["python", "-m", "http.server", "8080"]
ports:
- containerPort: 8080
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALLkubectl apply -f test-restricted-pass.yaml
# pod/test-restricted-pass created (no warnings!)Verification
kubectl -n pss-test get pods
# test-baseline-pass 1/1 Running 0 ...
# test-restricted-pass 1/1 Running 0 ...
# (test-baseline-fail was rejected, does not exist)Solution 17: AppArmor Network Denial
Difficulty: Hard
Steps
# Step 1: Create and load AppArmor profile
docker exec -it cks-lab-worker bash
cat > /etc/apparmor.d/k8s-deny-network << 'PROFILE'
#include <tunables/global>
profile k8s-deny-network flags=(attach_disconnected) {
#include <abstractions/base>
file,
deny network,
}
PROFILE
apparmor_parser /etc/apparmor.d/k8s-deny-network
aa-status | grep k8s-deny-network
exit# Step 2: Create the pod - save as isolated-app.yaml
apiVersion: v1
kind: Pod
metadata:
name: isolated-app
spec:
nodeName: cks-lab-worker
containers:
- name: app
image: busybox:1.36
command: ["sleep", "3600"]
securityContext:
appArmorProfile:
type: Localhost
localhostProfile: k8s-deny-networkkubectl apply -f isolated-app.yamlVerification
# Check pod is running
kubectl get pod isolated-app
# Verify file operations work
kubectl exec isolated-app -- ls /
# bin dev etc home lib ...
kubectl exec isolated-app -- cat /etc/hostname
# isolated-app
# Verify network operations fail
kubectl exec isolated-app -- wget -qO- -T 3 http://kubernetes.default.svc
# wget: can't connect ... (Permission denied or similar)
kubectl exec isolated-app -- nslookup kubernetes.default
# nslookup: ... Permission denied (or similar network error)
# Verify AppArmor profile
kubectl exec isolated-app -- cat /proc/1/attr/current
# k8s-deny-network (enforce)Solution 18: Multi-Tier Application Security
Difficulty: Hard
Steps
# Step 1: Create namespace with baseline enforcement
kubectl create namespace multi-tier
kubectl label namespace multi-tier \
pod-security.kubernetes.io/enforce=baseline \
pod-security.kubernetes.io/enforce-version=latest# Step 2: Create all three pods - save as multi-tier.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: frontend
namespace: multi-tier
labels:
tier: frontend
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: nginx
image: nginx:1.27
ports:
- containerPort: 80
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
volumeMounts:
- name: cache
mountPath: /var/cache/nginx
- name: run
mountPath: /var/run
- name: tmp
mountPath: /tmp
volumes:
- name: cache
emptyDir: {}
- name: run
emptyDir: {}
- name: tmp
emptyDir: {}
---
apiVersion: v1
kind: Pod
metadata:
name: backend
namespace: multi-tier
labels:
tier: backend
spec:
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- name: api
image: python:3.12-slim
command: ["python", "-m", "http.server", "8080"]
ports:
- containerPort: 8080
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
volumeMounts:
- name: tmp
mountPath: /tmp
volumes:
- name: tmp
emptyDir: {}
---
apiVersion: v1
kind: Pod
metadata:
name: worker
namespace: multi-tier
labels:
tier: worker
spec:
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- name: worker
image: busybox:1.36
command: ["sh", "-c", "while true; do echo working; sleep 60; done"]
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
volumeMounts:
- name: tmp
mountPath: /tmp
volumes:
- name: tmp
emptyDir: {}kubectl apply -f multi-tier.yamlVerification
# All three pods should be running
kubectl -n multi-tier get pods
# NAME READY STATUS RESTARTS AGE
# backend 1/1 Running 0 10s
# frontend 1/1 Running 0 10s
# worker 1/1 Running 0 10s
# Verify frontend nginx is serving
kubectl -n multi-tier exec frontend -- curl -s localhost:80 | head -3
# Verify backend is serving
kubectl -n multi-tier exec backend -- python -c "import urllib.request; print(urllib.request.urlopen('http://localhost:8080').status)"
# 200
# Verify worker identity
kubectl -n multi-tier exec worker -- id
# uid=1000 gid=1000
# Verify backend identity
kubectl -n multi-tier exec backend -- id
# uid=1000 gid=1000 groups=1000Solution 19: Double Hardened (Seccomp + Capabilities)
Difficulty: Hard
Steps
# Step 1: Create seccomp profile on node
docker exec -it cks-lab-worker bash
mkdir -p /var/lib/kubelet/seccomp/profiles
cat > /var/lib/kubelet/seccomp/profiles/hardened-default.json << 'EOF'
{
"defaultAction": "SCMP_ACT_ALLOW",
"syscalls": [
{
"names": [
"ptrace",
"mount",
"umount2",
"kexec_load",
"reboot",
"setns",
"unshare",
"bpf"
],
"action": "SCMP_ACT_ERRNO"
}
]
}
EOF
exit# Step 2: Create the pod - save as double-hardened.yaml
apiVersion: v1
kind: Pod
metadata:
name: double-hardened
spec:
nodeName: cks-lab-worker
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
containers:
- name: app
image: busybox:1.36
command: ["sleep", "3600"]
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
seccompProfile:
type: Localhost
localhostProfile: profiles/hardened-default.json
volumeMounts:
- name: tmp
mountPath: /tmp
volumes:
- name: tmp
emptyDir: {}kubectl apply -f double-hardened.yamlVerification
# Check pod is running
kubectl get pod double-hardened
# Verify mount fails (both capability and seccomp block it)
kubectl exec double-hardened -- mount /dev/null /mnt 2>&1
# mount: permission denied (or operation not permitted)
# Verify ptrace fails
kubectl exec double-hardened -- ls /proc/1/maps 2>&1
# (may fail due to capabilities, ptrace also blocked by seccomp)
# Verify file reads work
kubectl exec double-hardened -- cat /etc/hostname
# double-hardened
# Verify /tmp is writable
kubectl exec double-hardened -- touch /tmp/test-file
kubectl exec double-hardened -- ls /tmp/test-file
# /tmp/test-file
# Verify /etc is NOT writable (read-only root filesystem)
kubectl exec double-hardened -- touch /etc/test-file
# touch: /etc/test-file: Read-only file system
# Verify running as non-root
kubectl exec double-hardened -- id
# uid=1000 gid=1000Solution 20: Full Security Audit and Remediation
Difficulty: Hard
Steps
# Step 1: Create namespace and insecure pods
kubectl create namespace audit-ns# Save as audit-pods.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: web-exposed
namespace: audit-ns
spec:
hostNetwork: true
containers:
- name: nginx
image: nginx:1.27
---
apiVersion: v1
kind: Pod
metadata:
name: db-privileged
namespace: audit-ns
spec:
containers:
- name: db
image: busybox:1.36
command: ["sleep", "3600"]
securityContext:
privileged: true
---
apiVersion: v1
kind: Pod
metadata:
name: app-overcapped
namespace: audit-ns
spec:
containers:
- name: app
image: busybox:1.36
command: ["sleep", "3600"]
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_PTRACE
- NET_ADMIN
---
apiVersion: v1
kind: Pod
metadata:
name: worker-ok
namespace: audit-ns
spec:
securityContext:
runAsUser: 1000
runAsNonRoot: true
containers:
- name: worker
image: busybox:1.36
command: ["sleep", "3600"]
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALLkubectl apply -f audit-pods.yaml# Step 2: Run audit commands
# Find pods with host namespace sharing
echo "=== Pods with Host Namespaces ==="
kubectl -n audit-ns get pods -o json | jq -r '
.items[] |
select(.spec.hostNetwork==true or .spec.hostPID==true or .spec.hostIPC==true) |
.metadata.name'
# web-exposed
# Find privileged pods
echo "=== Privileged Pods ==="
kubectl -n audit-ns get pods -o json | jq -r '
.items[] |
select(.spec.containers[].securityContext.privileged==true) |
.metadata.name'
# db-privileged
# Find pods with dangerous capabilities
echo "=== Pods with Dangerous Capabilities ==="
kubectl -n audit-ns get pods -o json | jq -r '
.items[] |
select(.spec.containers[].securityContext.capabilities.add != null) |
"\(.metadata.name): \(.spec.containers[].securityContext.capabilities.add)"'
# app-overcapped: ["SYS_ADMIN","SYS_PTRACE","NET_ADMIN"]
# Identify the properly hardened pod
echo "=== Properly Hardened Pods ==="
kubectl -n audit-ns get pods -o json | jq -r '
.items[] |
select(
.spec.hostNetwork!=true and
.spec.hostPID!=true and
(.spec.containers | all(.securityContext.privileged!=true)) and
(.spec.containers | all(.securityContext.capabilities.add==null or (.securityContext.capabilities.add | length)==0))
) | .metadata.name'
# worker-ok# Step 3: Delete insecure pods
kubectl -n audit-ns delete pod web-exposed db-privileged app-overcapped# Step 4: Create hardened replacements - save as fixed-audit-pods.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: web-fixed
namespace: audit-ns
spec:
hostNetwork: false
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: nginx
image: nginx:1.27
ports:
- containerPort: 80
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
---
apiVersion: v1
kind: Pod
metadata:
name: db-fixed
namespace: audit-ns
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: db
image: busybox:1.36
command: ["sleep", "3600"]
securityContext:
privileged: false
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
---
apiVersion: v1
kind: Pod
metadata:
name: app-fixed
namespace: audit-ns
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: app
image: busybox:1.36
command: ["sleep", "3600"]
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALLkubectl apply -f fixed-audit-pods.yaml# Step 5: Apply baseline PSS enforcement
kubectl label namespace audit-ns \
pod-security.kubernetes.io/enforce=baseline \
pod-security.kubernetes.io/enforce-version=latest# Step 6: Verify enforcement -- privileged pod should be rejected
kubectl -n audit-ns run enforcement-test --image=nginx \
--overrides='{"spec":{"containers":[{"name":"nginx","image":"nginx","securityContext":{"privileged":true}}]}}'
# Error from server (Forbidden): violates PodSecurity "baseline:latest": privilegedFinal Verification
# All pods should be running
kubectl -n audit-ns get pods
# NAME READY STATUS RESTARTS AGE
# app-fixed 1/1 Running 0 ...
# db-fixed 1/1 Running 0 ...
# web-fixed 1/1 Running 0 ...
# worker-ok 1/1 Running 0 ...
# Re-run audit -- no issues should be found
kubectl -n audit-ns get pods -o json | jq -r '
.items[] |
select(
.spec.hostNetwork==true or
.spec.hostPID==true or
(.spec.containers[].securityContext.privileged==true) or
(.spec.containers[].securityContext.capabilities.add[]? | IN("SYS_ADMIN","SYS_PTRACE","NET_ADMIN"))
) | .metadata.name'
# (no output -- all clean)
# Verify namespace labels
kubectl get ns audit-ns --show-labels | grep pod-securityGeneral Exam Tips for System Hardening
- Memorize the minimum pod spec for Restricted PSS -- you will need it frequently:yaml
securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault containers: - securityContext: allowPrivilegeEscalation: false capabilities: drop: ["ALL"] - Use
kubectl explainduring the exam:bashkubectl explain pod.spec.securityContext kubectl explain pod.spec.containers.securityContext.capabilities kubectl explain pod.spec.containers.securityContext.seccompProfile - Use
--dry-run=serverto test PSS compliance before creating pods - For AppArmor: Remember the profile must be loaded on the node, not the control plane
- For seccomp: Remember profiles go in
/var/lib/kubelet/seccomp/and paths are relative to that directory - Practice jq queries for auditing pods -- they save significant time
- Know the difference:
securityContextat pod level vs container level -- capabilities are container-level only