Whenever I do a cluster install and after running a cluster for a while, the “Completed” pod references start to mount up in the GUI. Here’s a quick and dirty way to clean up old pods.
apiVersion: v1
kind: ServiceAccount
metadata:
name: pod-cleaner
namespace: openshift
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pod-cleaner-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: pod-cleaner-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: pod-cleaner-role
subjects:
- kind: ServiceAccount
name: pod-cleaner
namespace: openshift
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: cleanup-completed-pods
namespace: openshift
spec:
schedule: "0 2 * * *" # daily at 02:00
jobTemplate:
spec:
template:
spec:
serviceAccountName: pod-cleaner
restartPolicy: Never
containers:
- name: pod-cleaner
image: registry.redhat.io/openshift4/ose-cli:latest
env:
# Inline your target namespaces here (one per line)
- name: NAMESPACES
value: |
openshift
openshift-marketplace
openshift-operator-lifecycle-manager
openshift-kube-scheduler
openshift-etcd
openshift-kube-api-server
openshift-storage
command:
- /bin/sh
- -c
- |
set -eu
echo "Starting pod cleanup run at $(date -Is)"
printf "%s\n" "$NAMESPACES" | while read -r ns; do
[ -z "$ns" ] && continue
echo "Cleaning namespace: $ns"
oc delete pods -n "$ns" --field-selector=status.phase=Succeeded || true
oc delete pods -n "$ns" --field-selector=status.phase=Failed || true
done
echo "Cleanup complete at $(date -Is)"
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
One comment
Comments are closed.