From 17f5633a8df403a2e6de066406fcdef087f8a57b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pedro=20Mart=C3=ADn?= Date: Tue, 13 Jan 2026 10:16:28 +0100 Subject: [PATCH] feat(compliance): add CIS 1.12 for Kubernetes (#9778) --- dashboard/compliance/cis_1_12_kubernetes.py | 24 + prowler/CHANGELOG.md | 1 + .../kubernetes/cis_1.12_kubernetes.json | 2915 +++++++++++++++++ 3 files changed, 2940 insertions(+) create mode 100644 dashboard/compliance/cis_1_12_kubernetes.py create mode 100644 prowler/compliance/kubernetes/cis_1.12_kubernetes.json diff --git a/dashboard/compliance/cis_1_12_kubernetes.py b/dashboard/compliance/cis_1_12_kubernetes.py new file mode 100644 index 0000000000..94558f33ad --- /dev/null +++ b/dashboard/compliance/cis_1_12_kubernetes.py @@ -0,0 +1,24 @@ +import warnings + +from dashboard.common_methods import get_section_containers_cis + +warnings.filterwarnings("ignore") + + +def get_table(data): + aux = data[ + [ + "REQUIREMENTS_ID", + "REQUIREMENTS_DESCRIPTION", + "REQUIREMENTS_ATTRIBUTES_SECTION", + "CHECKID", + "STATUS", + "REGION", + "ACCOUNTID", + "RESOURCEID", + ] + ].copy() + + return get_section_containers_cis( + aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION" + ) diff --git a/prowler/CHANGELOG.md b/prowler/CHANGELOG.md index c0f05bc50c..0f96bbfd6c 100644 --- a/prowler/CHANGELOG.md +++ b/prowler/CHANGELOG.md @@ -17,6 +17,7 @@ All notable changes to the **Prowler SDK** are documented in this file. - `compute_instance_group_load_balancer_attached` check for GCP provider [(#9695)](https://github.com/prowler-cloud/prowler/pull/9695) - `compute_instance_single_network_interface` check for GCP provider [(#9702)](https://github.com/prowler-cloud/prowler/pull/9702) - `compute_image_not_publicly_shared` check for GCP provider [(#9718)](https://github.com/prowler-cloud/prowler/pull/9718) +- CIS 1.12 compliance framework for Kubernetes [(#9778)](https://github.com/prowler-cloud/prowler/pull/9778) - CIS 6.0 for M365 provider [(#9779)](https://github.com/prowler-cloud/prowler/pull/9779) - CIS 5.0 compliance framework for the Azure provider [(#9777)](https://github.com/prowler-cloud/prowler/pull/9777) diff --git a/prowler/compliance/kubernetes/cis_1.12_kubernetes.json b/prowler/compliance/kubernetes/cis_1.12_kubernetes.json new file mode 100644 index 0000000000..1ba1e55a88 --- /dev/null +++ b/prowler/compliance/kubernetes/cis_1.12_kubernetes.json @@ -0,0 +1,2915 @@ +{ + "Framework": "CIS", + "Name": "CIS Kubernetes Benchmark v1.12.0", + "Version": "1.12.0", + "Provider": "Kubernetes", + "Description": "This CIS Kubernetes Benchmark provides prescriptive guidance for establishing a secure configuration posture for Kubernetes v1.32 - v1.34", + "Requirements": [ + { + "Id": "1.1.1", + "Description": "Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "RationaleStatement": "The API server pod specification file controls various parameters that set the behavior of the API server. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a /etc/kubernetes/manifests/kube-apiserver.yaml ``` Verify that the permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-apiserver/", + "DefaultValue": "By default, the `kube-apiserver.yaml` file has permissions of `640`." + } + ] + }, + { + "Id": "1.1.2", + "Description": "Ensure that the API server pod specification file ownership is set to root:root", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "RationaleStatement": "The API server pod specification file controls various parameters that set the behavior of the API server. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chown root:root /etc/kubernetes/manifests/kube-apiserver.yaml ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-apiserver/", + "DefaultValue": "By default, the `kube-apiserver.yaml` file ownership is set to `root:root`." + } + ] + }, + { + "Id": "1.1.3", + "Description": "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "RationaleStatement": "The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml ``` Verify that the permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-apiserver/", + "DefaultValue": "By default, the `kube-controller-manager.yaml` file has permissions of `640`." + } + ] + }, + { + "Id": "1.1.4", + "Description": "Ensure that the controller manager pod specification file ownership is set to root:root", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "RationaleStatement": "The controller manager pod specification file controls various parameters that set the behavior of various components of the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-controller-manager", + "DefaultValue": "By default, `kube-controller-manager.yaml` file ownership is set to `root:root`." + } + ] + }, + { + "Id": "1.1.5", + "Description": "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "RationaleStatement": "The scheduler pod specification file controls various parameters that set the behavior of the Scheduler service in the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a /etc/kubernetes/manifests/kube-scheduler.yaml ``` Verify that the permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-scheduler/", + "DefaultValue": "By default, `kube-scheduler.yaml` file has permissions of `640`." + } + ] + }, + { + "Id": "1.1.6", + "Description": "Ensure that the scheduler pod specification file ownership is set to root:root", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "RationaleStatement": "The scheduler pod specification file controls various parameters that set the behavior of the `kube-scheduler` service in the master node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chown root:root /etc/kubernetes/manifests/kube-scheduler.yaml ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-scheduler/", + "DefaultValue": "By default, `kube-scheduler.yaml` file ownership is set to `root:root`." + } + ] + }, + { + "Id": "1.1.7", + "Description": "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "RationaleStatement": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 /etc/kubernetes/manifests/etcd.yaml ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a /etc/kubernetes/manifests/etcd.yaml ``` Verify that the permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://coreos.com/etcd:https://kubernetes.io/docs/admin/etcd/", + "DefaultValue": "By default, `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `640`." + } + ] + }, + { + "Id": "1.1.8", + "Description": "Ensure that the etcd pod specification file ownership is set to root:root", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "RationaleStatement": "The etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` controls various parameters that set the behavior of the `etcd` service in the master node. etcd is a highly-available key-value store which Kubernetes uses for persistent storage of all of its REST API object. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chown root:root /etc/kubernetes/manifests/etcd.yaml ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %U:%G /etc/kubernetes/manifests/etcd.yaml ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://coreos.com/etcd:https://kubernetes.io/docs/admin/etcd/", + "DefaultValue": "By default, `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`." + } + ] + }, + { + "Id": "1.1.9", + "Description": "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "RationaleStatement": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be writable by only the administrators on the system.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a ``` Verify that the permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/cluster-administration/networking/", + "DefaultValue": "NA" + } + ] + }, + { + "Id": "1.1.10", + "Description": "Ensure that the Container Network Interface file ownership is set to root:root", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "RationaleStatement": "Container Network Interface provides various networking options for overlay networking. You should consult their documentation and restrict their respective file permissions to maintain the integrity of those files. Those files should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chown root:root ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %U:%G ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/cluster-administration/networking/", + "DefaultValue": "NA" + } + ] + }, + { + "Id": "1.1.11", + "Description": "Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "RationaleStatement": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world.", + "ImpactStatement": "None", + "RemediationProcedure": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: ``` ps -ef | grep etcd ``` Run the below command (based on the etcd data directory found above). For example, ``` chmod 700 /var/lib/etcd ```", + "AuditProcedure": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: ``` ps -ef | grep etcd ``` Run the below command (based on the etcd data directory found above). For example, ``` stat -c %a /var/lib/etcd ``` Verify that the permissions are `700` or more restrictive.", + "AdditionalInformation": "", + "References": "https://coreos.com/etcd/docs/latest/op-guide/configuration.html#data-dir:https://kubernetes.io/docs/admin/etcd/", + "DefaultValue": "By default, etcd data directory has permissions of `755`." + } + ] + }, + { + "Id": "1.1.12", + "Description": "Ensure that the etcd data directory ownership is set to etcd:etcd", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "RationaleStatement": "etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`.", + "ImpactStatement": "None", + "RemediationProcedure": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: ``` ps -ef | grep etcd ``` Run the below command (based on the etcd data directory found above). For example, ``` chown etcd:etcd /var/lib/etcd ```", + "AuditProcedure": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: ``` ps -ef | grep etcd ``` Run the below command (based on the etcd data directory found above). For example, ``` stat -c %U:%G /var/lib/etcd ``` Verify that the ownership is set to `etcd:etcd`.", + "AdditionalInformation": "", + "References": "https://coreos.com/etcd/docs/latest/op-guide/configuration.html#data-dir:https://kubernetes.io/docs/admin/etcd/", + "DefaultValue": "By default, etcd data directory ownership is set to `etcd:etcd`." + } + ] + }, + { + "Id": "1.1.13", + "Description": "Ensure that the default administrative credential file permissions are set to 600", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `admin.conf` file (and `super-admin.conf` file, where it exists) have permissions of `600`.", + "RationaleStatement": "As part of initial cluster setup, default kubeconfig files are created to be used by the administrator of the cluster. These files contain private keys and certificates which allow for privileged access to the cluster. You should restrict their file permissions to maintain the integrity and confidentiality of the file(s). The file(s) should be readable and writable by only the administrators on the system.", + "ImpactStatement": "None.", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 /etc/kubernetes/admin.conf ``` On Kubernetes 1.29+ the `super-admin.conf` file should also be modified, if present. For example, ``` chmod 600 /etc/kubernetes/super-admin.conf ```", + "AuditProcedure": "Run the following command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a /etc/kubernetes/admin.conf ``` On Kubernetes version 1.29 and higher run the following command as well :- ``` stat -c %a /etc/kubernetes/super-admin.conf ``` Verify that the permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/:https://raesene.github.io/blog/2024/01/06/when-is-admin-not-admin/", + "DefaultValue": "By default, admin.conf and super-admin.conf have permissions of `600`." + } + ] + }, + { + "Id": "1.1.14", + "Description": "Ensure that the default administrative credential file ownership is set to root:root", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `admin.conf` (and `super-admin.conf` file, where it exists) file ownership is set to `root:root`.", + "RationaleStatement": "As part of initial cluster setup, default kubeconfig files are created to be used by the administrator of the cluster. These files contain private keys and certificates which allow for privileged access to the cluster. You should set their file ownership to maintain the integrity and confidentiality of the file. The file(s) should be owned by `root:root`.", + "ImpactStatement": "None.", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chown root:root /etc/kubernetes/admin.conf ``` On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. For example, ``` chown root:root /etc/kubernetes/super-admin.conf ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %U:%G /etc/kubernetes/admin.conf ``` On Kubernetes version 1.29 and higher run the following command as well :- ``` stat -c %U:%G /etc/kubernetes/super-admin.conf ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubeadm/:https://raesene.github.io/blog/2024/01/06/when-is-admin-not-admin/", + "DefaultValue": "By default, `admin.conf` and `super-admin.conf` file ownership is set to `root:root`." + } + ] + }, + { + "Id": "1.1.15", + "Description": "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "RationaleStatement": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 /etc/kubernetes/scheduler.conf ```", + "AuditProcedure": "Run the following command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a /etc/kubernetes/scheduler.conf ``` Verify that the permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/", + "DefaultValue": "By default, `scheduler.conf` has permissions of `640`." + } + ] + }, + { + "Id": "1.1.16", + "Description": "Ensure that the scheduler.conf file ownership is set to root:root", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "RationaleStatement": "The `scheduler.conf` file is the kubeconfig file for the Scheduler. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chown root:root /etc/kubernetes/scheduler.conf ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %U:%G /etc/kubernetes/scheduler.conf ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubeadm/", + "DefaultValue": "By default, `scheduler.conf` file ownership is set to `root:root`." + } + ] + }, + { + "Id": "1.1.17", + "Description": "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "RationaleStatement": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 /etc/kubernetes/controller-manager.conf ```", + "AuditProcedure": "Run the following command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a /etc/kubernetes/controller-manager.conf ``` Verify that the permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-controller-manager/", + "DefaultValue": "By default, `controller-manager.conf` has permissions of `640`." + } + ] + }, + { + "Id": "1.1.18", + "Description": "Ensure that the controller-manager.conf file ownership is set to root:root", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "RationaleStatement": "The `controller-manager.conf` file is the kubeconfig file for the Controller Manager. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chown root:root /etc/kubernetes/controller-manager.conf ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %U:%G /etc/kubernetes/controller-manager.conf ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-controller-manager/", + "DefaultValue": "By default, `controller-manager.conf` file ownership is set to `root:root`." + } + ] + }, + { + "Id": "1.1.19", + "Description": "Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "RationaleStatement": "Kubernetes makes use of a number of certificates as part of its operation. You should set the ownership of the directory containing the PKI information and all files in that directory to maintain their integrity. The directory and files should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chown -R root:root /etc/kubernetes/pki/ ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` ls -laR /etc/kubernetes/pki/ ``` Verify that the ownership of all files and directories in this hierarchy is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-apiserver/", + "DefaultValue": "By default, the /etc/kubernetes/pki/ directory and all of the files and directories contained within it, are set to be owned by the root user." + } + ] + }, + { + "Id": "1.1.20", + "Description": "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Ensure that Kubernetes PKI certificate files have permissions of `644` or more restrictive.", + "RationaleStatement": "Kubernetes makes use of a number of certificate files as part of the operation of its components. The permissions on these files should be set to `644` or more restrictive to protect their integrity and confidentiality.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod -R 644 /etc/kubernetes/pki/*.crt ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c '%a' /etc/kubernetes/pki/*.crt ``` Verify that the permissions are `644` or more restrictive. or ``` ls -l /etc/kubernetes/pki/*.crt ``` Verify -rw------", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-apiserver/", + "DefaultValue": "By default, the certificates used by Kubernetes are set to have permissions of `644`" + } + ] + }, + { + "Id": "1.1.21", + "Description": "Ensure that the Kubernetes PKI key file permissions are set to 600", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.1 Control Plane Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "RationaleStatement": "Kubernetes makes use of a number of key files as part of the operation of its components. The permissions on these files should be set to `600` to protect their integrity and confidentiality.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod -R 600 /etc/kubernetes/pki/*.key ```", + "AuditProcedure": "Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c '%a' /etc/kubernetes/pki/*.key ``` Verify that the permissions are `600` or more restrictive. or ``` ls -l /etc/kubernetes/pki/*.key ``` Verify that the permissions are `-rw------`", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-apiserver/", + "DefaultValue": "By default, the keys used by Kubernetes are set to have permissions of `600`" + } + ] + }, + { + "Id": "1.2.1", + "Description": "Ensure that the --anonymous-auth argument is set to false", + "Checks": [ + "apiserver_anonymous_requests" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Disable anonymous requests to the API server.", + "RationaleStatement": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the API server. You should rely on authentication to authorize access and disallow anonymous requests. If you are using RBAC authorization, it is generally considered reasonable to allow anonymous access to the API Server for health checks and discovery purposes, and hence this recommendation is not scored. However, you should consider whether anonymous discovery is an acceptable risk for your purposes.", + "ImpactStatement": "Anonymous requests will be rejected.", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter. ``` --anonymous-auth=false ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--anonymous-auth` argument is set to `false`. Alternative Audit Method ``` kubectl get pod -nkube-system -lcomponent=kube-apiserver -o=jsonpath='{range .items[*]}{.spec.containers[*].command} {\\}{end}' | grep '--anonymous-auth' | grep -i false ``` If the exit code is '1', then the control isn't present / failed", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-apiserver/:https://kubernetes.io/docs/admin/authentication/#anonymous-requests", + "DefaultValue": "By default, anonymous access is enabled." + } + ] + }, + { + "Id": "1.2.2", + "Description": "Ensure that the --token-auth-file parameter is not set", + "Checks": [ + "apiserver_no_token_auth_file" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Do not use token based authentication.", + "RationaleStatement": "The token-based authentication utilizes static tokens to authenticate requests to the apiserver. The tokens are stored in clear-text in a file on the apiserver, and cannot be revoked or rotated without restarting the apiserver. Hence, do not use static token-based authentication.", + "ImpactStatement": "You will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.", + "RemediationProcedure": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--token-auth-file` argument does not exist. Alternative Audit Method ``` kubectl get pod -nkube-system -lcomponent=kube-apiserver -o=jsonpath='{range .items[*]}{.spec.containers[*].command} {\\}{end}' | grep '--token-auth-file' | grep -i false ``` If the exit code is '1', then the control isn't present / failed", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/authentication/#static-token-file:https://kubernetes.io/docs/admin/kube-apiserver/", + "DefaultValue": "By default, `--token-auth-file` argument is not set." + } + ] + }, + { + "Id": "1.2.3", + "Description": "Ensure that the DenyServiceExternalIPs is set", + "Checks": [ + "apiserver_deny_service_external_ips" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "RationaleStatement": "Most users do not need the ability to set the `externalIPs` field for a `Service` at all, and cluster admins should consider disabling this functionality by enabling the `DenyServiceExternalIPs` admission controller. Clusters that do need to allow this functionality should consider using some custom policy to manage its usage.", + "ImpactStatement": "When enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and append the Kubernetes API server flag --enable-admission-plugins with the DenyServiceExternalIPs plugin. Note, the Kubernetes API server flag --enable-admission-plugins takes a comma-delimited list of admission control plugins to be enabled, even if they are in the list of plugins enabled by default. ``` kube-apiserver --enable-admission-plugins=DenyServiceExternalIPs ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `DenyServiceExternalIPs' argument exist as a string value in --enable-admission-plugins.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/:https://kubernetes.io/docs/admin/kube-apiserver/", + "DefaultValue": "By default, --enable-admission-plugins=DenyServiceExternalIP argument is not set, and the use of externalIPs is authorized." + } + ] + }, + { + "Id": "1.2.4", + "Description": "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "Checks": [ + "apiserver_kubelet_tls_auth" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Enable certificate based kubelet authentication.", + "RationaleStatement": "The apiserver, by default, does not authenticate itself to the kubelet's HTTPS endpoints. The requests from the apiserver are treated anonymously. You should set up certificate-based kubelet authentication to ensure that the apiserver authenticates itself to kubelets when submitting requests.", + "ImpactStatement": "You require TLS to be configured on apiserver as well as kubelets.", + "RemediationProcedure": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below. ``` --kubelet-client-certificate= --kubelet-client-key= ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments exist and they are set as appropriate. Alternative Audit ``` kubectl get pod -nkube-system -lcomponent=kube-apiserver -o=jsonpath='{range .items[*]}{.spec.containers[*].command} {\\}{end}' | grep '--kubelet-client-certificate' | grep -i false ``` If the exit code is '1', then the control isn't present / failed", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-apiserver/:https://kubernetes.io/docs/admin/kubelet-authentication-authorization/:https://kubernetes.io/docs/concepts/cluster-administration/master-node-communication/#apiserver---kubelet", + "DefaultValue": "By default, certificate-based kubelet authentication is not set." + } + ] + }, + { + "Id": "1.2.5", + "Description": "Ensure that the --kubelet-certificate-authority argument is set as appropriate", + "Checks": [ + "apiserver_kubelet_cert_auth" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Verify kubelet's certificate before establishing connection.", + "RationaleStatement": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet’s port-forwarding functionality. These connections terminate at the kubelet’s HTTPS endpoint. By default, the apiserver does not verify the kubelet’s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "ImpactStatement": "You require TLS to be configured on apiserver as well as kubelets.", + "RemediationProcedure": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. ``` --kubelet-certificate-authority= ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--kubelet-certificate-authority` argument exists and is set as appropriate. Alternative Audit ``` kubectl get pod -nkube-system -lcomponent=kube-apiserver -o=jsonpath='{range .items[]}{.spec.containers[].command} {\\}{end}' | grep '--kubelet-certificate-authority' | grep -i false ``` If the exit code is '1', then the control isn't present / failed", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-apiserver/:https://kubernetes.io/docs/admin/kubelet-authentication-authorization/:https://kubernetes.io/docs/concepts/cluster-administration/master-node-communication/#apiserver---kubelet", + "DefaultValue": "By default, `--kubelet-certificate-authority` argument is not set." + } + ] + }, + { + "Id": "1.2.6", + "Description": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "Checks": [ + "apiserver_auth_mode_not_always_allow" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Do not always authorize all requests.", + "RationaleStatement": "The API Server, can be configured to allow all requests. This mode should not be used on any production cluster.", + "ImpactStatement": "Only authorized requests will be served.", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below. ``` --authorization-mode=RBAC ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--authorization-mode` argument exists and is not set to `AlwaysAllow`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://kubernetes.io/docs/admin/authorization/", + "DefaultValue": "By default, `AlwaysAllow` is not enabled." + } + ] + }, + { + "Id": "1.2.7", + "Description": "Ensure that the --authorization-mode argument includes Node", + "Checks": [ + "apiserver_auth_mode_include_node" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Restrict kubelet nodes to reading only objects associated with them.", + "RationaleStatement": "The `Node` authorization mode only allows kubelets to read `Secret`, `ConfigMap`, `PersistentVolume`, and `PersistentVolumeClaim` objects associated with their nodes.", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`. ``` --authorization-mode=Node,RBAC ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--authorization-mode` argument exists and is set to a value to include `Node`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-apiserver/:https://kubernetes.io/docs/admin/authorization/node/:https://github.com/kubernetes/kubernetes/pull/46076:https://acotten.com/post/kube17-security", + "DefaultValue": "By default, `Node` authorization is not enabled." + } + ] + }, + { + "Id": "1.2.8", + "Description": "Ensure that the --authorization-mode argument includes RBAC", + "Checks": [ + "apiserver_auth_mode_include_rbac" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Turn on Role Based Access Control.", + "RationaleStatement": "Role Based Access Control (RBAC) allows fine-grained control over the operations that different entities can perform on different objects in the cluster. It is recommended to use the RBAC authorization mode.", + "ImpactStatement": "When RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example: ``` --authorization-mode=Node,RBAC ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--authorization-mode` argument exists and is set to a value to include `RBAC`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/access-authn-authz/rbac/", + "DefaultValue": "By default, `RBAC` authorization is not enabled." + } + ] + }, + { + "Id": "1.2.9", + "Description": "Ensure that the admission control plugin EventRateLimit is set", + "Checks": [ + "apiserver_event_rate_limit" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Limit the rate at which the API server accepts requests.", + "RationaleStatement": "Using `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time slice. A misbehaving workload could overwhelm and DoS the API Server, making it unavailable. This particularly applies to a multi-tenant cluster, where there might be a small percentage of misbehaving tenants which could have a significant impact on the performance of the cluster overall. Hence, it is recommended to limit the rate of events that the API server will accept.", + "ImpactStatement": "You need to carefully tune in limits as per your environment.", + "RemediationProcedure": "Follow the Kubernetes documentation and set the desired limits in a configuration file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters. ``` --enable-admission-plugins=...,EventRateLimit,... --admission-control-config-file= ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--enable-admission-plugins` argument is set to a value that includes `EventRateLimit`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#eventratelimit:https://github.com/staebler/community/blob/9873b632f4d99b5d99c38c9b15fe2f8b93d0a746/contributors/design-proposals/admission_control_event_rate_limit.md", + "DefaultValue": "By default, `EventRateLimit` is not set." + } + ] + }, + { + "Id": "1.2.10", + "Description": "Ensure that the admission control plugin AlwaysAdmit is not set", + "Checks": [ + "apiserver_no_always_admit_plugin" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Do not allow all requests.", + "RationaleStatement": "Setting admission control plugin `AlwaysAdmit` allows all requests and do not filter any requests. The `AlwaysAdmit` admission controller was deprecated in Kubernetes v1.13. Its behavior was equivalent to turning off all admission controllers.", + "ImpactStatement": "Only requests explicitly allowed by the admissions control plugins would be served.", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that if the `--enable-admission-plugins` argument is set, its value does not include `AlwaysAdmit`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwaysadmit", + "DefaultValue": "`AlwaysAdmit` is not in the list of default admission plugins." + } + ] + }, + { + "Id": "1.2.11", + "Description": "Ensure that the admission control plugin AlwaysPullImages is set", + "Checks": [ + "apiserver_always_pull_images_plugin" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Always pull images.", + "RationaleStatement": "Setting admission control policy to `AlwaysPullImages` forces every new pod to pull the required images every time. In a multi-tenant cluster users can be assured that their private images can only be used by those who have the credentials to pull them. Without this admission control policy, once an image has been pulled to a node, any pod from any user can use it simply by knowing the image’s name, without any authorization check against the image ownership. When this plug-in is enabled, images are always pulled prior to starting containers, which means valid credentials are required.", + "ImpactStatement": "Credentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed. This setting could impact offline or isolated clusters, which have images preloaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`. ``` --enable-admission-plugins=...,AlwaysPullImages,... ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--enable-admission-plugins` argument is set to a value that includes `AlwaysPullImages`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages", + "DefaultValue": "By default, `AlwaysPullImages` is not set." + } + ] + }, + { + "Id": "1.2.12", + "Description": "Ensure that the admission control plugin ServiceAccount is set", + "Checks": [ + "apiserver_service_account_plugin" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "Automate service accounts management.", + "RationaleStatement": "When you create a pod, if you do not specify a service account, it is automatically assigned the `default` service account in the same namespace. You should create your own service account and let the API server manage its security tokens.", + "ImpactStatement": "None.", + "RemediationProcedure": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--disable-admission-plugins` argument is set to a value that does not includes `ServiceAccount`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#serviceaccount:https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "DefaultValue": "By default, `ServiceAccount` is set." + } + ] + }, + { + "Id": "1.2.13", + "Description": "Ensure that the admission control plugin NamespaceLifecycle is set", + "Checks": [ + "apiserver_namespace_lifecycle_plugin" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "Reject creating objects in a namespace that is undergoing termination.", + "RationaleStatement": "Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects.", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--disable-admission-plugins` argument is set to a value that does not include `NamespaceLifecycle`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#namespacelifecycle", + "DefaultValue": "By default, `NamespaceLifecycle` is set." + } + ] + }, + { + "Id": "1.2.14", + "Description": "Ensure that the admission control plugin NodeRestriction is set", + "Checks": [ + "apiserver_node_restriction_plugin" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "RationaleStatement": "Using the `NodeRestriction` plug-in ensures that the kubelet is restricted to the `Node` and `Pod` objects that it could modify as defined. Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.", + "ImpactStatement": "None", + "RemediationProcedure": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`. ``` --enable-admission-plugins=...,NodeRestriction,... ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--enable-admission-plugins` argument is set to a value that includes `NodeRestriction`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction:https://kubernetes.io/docs/reference/access-authn-authz/node/", + "DefaultValue": "By default, `NodeRestriction` is not set." + } + ] + }, + { + "Id": "1.2.15", + "Description": "Ensure that the --profiling argument is set to false", + "Checks": [ + "apiserver_disable_profiling" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Disable profiling, if not needed.", + "RationaleStatement": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "ImpactStatement": "Profiling information would not be available.", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter. ``` --profiling=false ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--profiling` argument is set to `false`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/", + "DefaultValue": "By default, profiling is enabled." + } + ] + }, + { + "Id": "1.2.16", + "Description": "Ensure that the --audit-log-path argument is set", + "Checks": [ + "apiserver_audit_log_path_set" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "RationaleStatement": "Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path.", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example: ``` --audit-log-path=/var/log/apiserver/audit.log ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--audit-log-path` argument is set as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/:https://github.com/kubernetes/enhancements/issues/22", + "DefaultValue": "By default, auditing is not enabled." + } + ] + }, + { + "Id": "1.2.17", + "Description": "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate", + "Checks": [ + "apiserver_audit_log_maxage_set" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Retain the logs for at least 30 days or as appropriate.", + "RationaleStatement": "Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements.", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days: ``` --audit-log-maxage=30 ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--audit-log-maxage` argument is set to `30` or as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/:https://github.com/kubernetes/enhancements/issues/22", + "DefaultValue": "By default, auditing is not enabled." + } + ] + }, + { + "Id": "1.2.18", + "Description": "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate", + "Checks": [ + "apiserver_audit_log_maxbackup_set" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Retain 10 or an appropriate number of old log files.", + "RationaleStatement": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value. ``` --audit-log-maxbackup=10 ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--audit-log-maxbackup` argument is set to `10` or as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/:https://github.com/kubernetes/enhancements/issues/22", + "DefaultValue": "By default, auditing is not enabled." + } + ] + }, + { + "Id": "1.2.19", + "Description": "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate", + "Checks": [ + "apiserver_audit_log_maxsize_set" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Rotate log files on reaching 100 MB or as appropriate.", + "RationaleStatement": "Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis.", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB: ``` --audit-log-maxsize=100 ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--audit-log-maxsize` argument is set to `100` or as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/:https://github.com/kubernetes/enhancements/issues/22", + "DefaultValue": "By default, auditing is not enabled." + } + ] + }, + { + "Id": "1.2.20", + "Description": "Ensure that the --request-timeout argument is set as appropriate", + "Checks": [ + "apiserver_request_timeout_set" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Set global request timeout for API server requests as appropriate.", + "RationaleStatement": "Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed.", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example, ``` --request-timeout=300s ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--request-timeout` argument is either not set or set to an appropriate value.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://github.com/kubernetes/kubernetes/pull/51415", + "DefaultValue": "By default, `--request-timeout` is set to 60 seconds." + } + ] + }, + { + "Id": "1.2.21", + "Description": "Ensure that the --service-account-lookup argument is set to true", + "Checks": [ + "apiserver_service_account_lookup_true" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Validate service account before validating token.", + "RationaleStatement": "If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue.", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter. ``` --service-account-lookup=true ``` Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that if the `--service-account-lookup` argument exists it is set to `true`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://github.com/kubernetes/kubernetes/issues/24167:https://en.wikipedia.org/wiki/Time-of-check_to_time-of-use", + "DefaultValue": "By default, `--service-account-lookup` argument is set to `true`." + } + ] + }, + { + "Id": "1.2.22", + "Description": "Ensure that the --service-account-key-file argument is set as appropriate", + "Checks": [ + "apiserver_service_account_key_file_set" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "RationaleStatement": "By default, if no `--service-account-key-file` is specified to the apiserver, it uses the private key from the TLS serving certificate to verify service account tokens. To ensure that the keys for service account tokens could be rotated as needed, a separate public/private key pair should be used for signing service account tokens. Hence, the public key should be specified to the apiserver with `--service-account-key-file`.", + "ImpactStatement": "The corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "RemediationProcedure": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts: ``` --service-account-key-file= ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--service-account-key-file` argument exists and is set as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://github.com/kubernetes/kubernetes/issues/24167", + "DefaultValue": "By default, `--service-account-key-file` argument is not set." + } + ] + }, + { + "Id": "1.2.23", + "Description": "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "Checks": [ + "apiserver_etcd_tls_config" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "etcd should be configured to make use of TLS encryption for client connections.", + "RationaleStatement": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a client certificate and key.", + "ImpactStatement": "TLS and client certificate authentication must be configured for etcd.", + "RemediationProcedure": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters. ``` --etcd-certfile= --etcd-keyfile= ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--etcd-certfile` and `--etcd-keyfile` arguments exist and they are set as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/", + "DefaultValue": "By default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set" + } + ] + }, + { + "Id": "1.2.24", + "Description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "Checks": [ + "apiserver_tls_config" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Setup TLS connection on the API server.", + "RationaleStatement": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic.", + "ImpactStatement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "RemediationProcedure": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters. ``` --tls-cert-file= --tls-private-key-file= ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--tls-cert-file` and `--tls-private-key-file` arguments exist and they are set as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://github.com/kelseyhightower/docker-kubernetes-tls-guide", + "DefaultValue": "By default, `--tls-cert-file` and `--tls-private-key-file` are presented and created for use." + } + ] + }, + { + "Id": "1.2.25", + "Description": "Ensure that the --client-ca-file argument is set as appropriate", + "Checks": [ + "apiserver_client_ca_file_set" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Setup TLS connection on the API server.", + "RationaleStatement": "API server communication contains sensitive parameters that should remain encrypted in transit. Configure the API server to serve only HTTPS traffic. If `--client-ca-file` argument is set, any request presenting a client certificate signed by one of the authorities in the `client-ca-file` is authenticated with an identity corresponding to the CommonName of the client certificate.", + "ImpactStatement": "TLS and client certificate authentication must be configured for your Kubernetes cluster deployment.", + "RemediationProcedure": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file. ``` --client-ca-file= ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--client-ca-file` argument exists and it is set as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://github.com/kelseyhightower/docker-kubernetes-tls-guide", + "DefaultValue": "By default, `--client-ca-file` argument is not set." + } + ] + }, + { + "Id": "1.2.26", + "Description": "Ensure that the --etcd-cafile argument is set as appropriate", + "Checks": [ + "apiserver_etcd_cafile_set" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "etcd should be configured to make use of TLS encryption for client connections.", + "RationaleStatement": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be protected by client authentication. This requires the API server to identify itself to the etcd server using a SSL Certificate Authority file.", + "ImpactStatement": "TLS and client certificate authentication must be configured for etcd.", + "RemediationProcedure": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter. ``` --etcd-cafile= ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--etcd-cafile` argument exists and it is set as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/", + "DefaultValue": "By default, `--etcd-cafile` is not set." + } + ] + }, + { + "Id": "1.2.27", + "Description": "Ensure that the --encryption-provider-config argument is set as appropriate", + "Checks": [ + "apiserver_encryption_provider_config_set" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Encrypt etcd key-value store.", + "RationaleStatement": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures.", + "ImpactStatement": "None", + "RemediationProcedure": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file: ``` --encryption-provider-config= ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--encryption-provider-config` argument is set to a `EncryptionConfig` file. Additionally, ensure that the `EncryptionConfig` file has all the desired `resources` covered especially any secrets.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/:https://acotten.com/post/kube17-security:https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://github.com/kubernetes/enhancements/issues/92", + "DefaultValue": "By default, `--encryption-provider-config` is not set." + } + ] + }, + { + "Id": "1.2.28", + "Description": "Ensure that encryption providers are appropriately configured", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "RationaleStatement": "Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms`, and `secretbox` are likely to be appropriate options.", + "ImpactStatement": "None", + "RemediationProcedure": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms`, or `secretbox` as the encryption provider.", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Get the `EncryptionConfig` file set for `--encryption-provider-config` argument. Verify that `aescbc`, `kms`, or `secretbox` is set as the encryption provider for all the desired `resources`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/:https://acotten.com/post/kube17-security:https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://github.com/kubernetes/enhancements/issues/92:https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#providers", + "DefaultValue": "By default, no encryption provider is set." + } + ] + }, + { + "Id": "1.2.29", + "Description": "Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "Checks": [ + "apiserver_strong_ciphers_only" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "RationaleStatement": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS cipher suites including some that have security concerns, weakening the protection provided.", + "ImpactStatement": "API server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.", + "RemediationProcedure": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter. ``` --tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256. ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the `--tls-cipher-suites` argument is set as outlined in the remediation procedure below.", + "AdditionalInformation": "Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_RC4_128_SHA.", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/:https://github.com/ssllabs/research/wiki/SSL-and-TLS-Deployment-Best-Practices#23-use-secure-cipher-suites", + "DefaultValue": "By default the Kubernetes API server supports a wide range of TLS ciphers" + } + ] + }, + { + "Id": "1.2.30", + "Description": "Ensure that the --service-account-extend-token-expiration parameter is set to false", + "Checks": [], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.2 API Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "By default Kubernetes extends service account token lifetimes to one year to aid in transition from the legacy token settings.", + "RationaleStatement": "This default setting is not ideal for security as it ignores other settings related to maximum token lifetime and means that a lost or stolen credential could be valid for an extended period of time.", + "ImpactStatement": "Disabling this setting means that the service account token expiry set in the cluster will be enforced, and service account tokens will expire at the end of that time frame.", + "RemediationProcedure": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the --service-account-extend-token-expiration parameter to false. ``` --service-account-extend-token-expiration=false ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-apiserver ``` Verify that the --service-account-extend-token-expiration argument is set to false.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/", + "DefaultValue": "By default, this parameter is set to true" + } + ] + }, + { + "Id": "1.3.1", + "Description": "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate", + "Checks": [ + "controllermanager_garbage_collection" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.3 Controller Manager", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Activate garbage collector on pod termination, as appropriate.", + "RationaleStatement": "Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection.", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example: ``` --terminated-pod-gc-threshold=10 ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-controller-manager ``` Verify that the `--terminated-pod-gc-threshold` argument is set as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-controller-manager/:https://github.com/kubernetes/kubernetes/issues/28484", + "DefaultValue": "By default, `--terminated-pod-gc-threshold` is set to `12500`." + } + ] + }, + { + "Id": "1.3.2", + "Description": "Ensure that the --profiling argument is set to false", + "Checks": [ + "controllermanager_disable_profiling" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.3 Controller Manager", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Disable profiling, if not needed.", + "RationaleStatement": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "ImpactStatement": "Profiling information would not be available.", + "RemediationProcedure": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter. ``` --profiling=false ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-controller-manager ``` Verify that the `--profiling` argument is set to `false`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-controller-manager/:https://github.com/kubernetes/community/blob/master/contributors/devel/profiling.md", + "DefaultValue": "By default, profiling is enabled." + } + ] + }, + { + "Id": "1.3.3", + "Description": "Ensure that the --use-service-account-credentials argument is set to true", + "Checks": [ + "controllermanager_service_account_credentials" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.3 Controller Manager", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Use individual service account credentials for each controller.", + "RationaleStatement": "The controller manager creates a service account per controller in the `kube-system` namespace, generates a credential for it, and builds a dedicated API client with that service account credential for each controller loop to use. Setting the `--use-service-account-credentials` to `true` runs each control loop within the controller manager using a separate service account credential. When used in combination with RBAC, this ensures that the control loops run with the minimum permissions required to perform their intended tasks.", + "ImpactStatement": "Whatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup. If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.", + "RemediationProcedure": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter. ``` --use-service-account-credentials=true ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-controller-manager ``` Verify that the `--use-service-account-credentials` argument is set to `true`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-controller-manager/:https://kubernetes.io/docs/admin/service-accounts-admin/:https://github.com/kubernetes/kubernetes/blob/release-1.6/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml:https://github.com/kubernetes/kubernetes/blob/release-1.6/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml:https://kubernetes.io/docs/admin/authorization/rbac/#controller-roles", + "DefaultValue": "By default, `--use-service-account-credentials` is set to false." + } + ] + }, + { + "Id": "1.3.4", + "Description": "Ensure that the --service-account-private-key-file argument is set as appropriate", + "Checks": [ + "controllermanager_service_account_private_key_file" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.3 Controller Manager", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "RationaleStatement": "To ensure that keys for service account tokens can be rotated as needed, a separate public/private key pair should be used for signing service account tokens. The private key should be specified to the controller manager with `--service-account-private-key-file` as appropriate.", + "ImpactStatement": "You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.", + "RemediationProcedure": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts. ``` --service-account-private-key-file= ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-controller-manager ``` Verify that the `--service-account-private-key-file` argument is set as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-controller-manager/", + "DefaultValue": "By default, `--service-account-private-key-file` it not set." + } + ] + }, + { + "Id": "1.3.5", + "Description": "Ensure that the --root-ca-file argument is set as appropriate", + "Checks": [ + "controllermanager_root_ca_file_set" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.3 Controller Manager", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "RationaleStatement": "Processes running within pods that need to contact the API server must verify the API server's serving certificate. Failing to do so could be a subject to man-in-the-middle attacks. Providing the root certificate for the API server's serving certificate to the controller manager with the `--root-ca-file` argument allows the controller manager to inject the trusted bundle into pods so that they can verify TLS connections to the API server.", + "ImpactStatement": "You need to setup and maintain root certificate authority file.", + "RemediationProcedure": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`. ``` --root-ca-file= ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-controller-manager ``` Verify that the `--root-ca-file` argument exists and is set to a certificate bundle file containing the root certificate for the API server's serving certificate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-controller-manager/:https://github.com/kubernetes/kubernetes/issues/11000", + "DefaultValue": "By default, `--root-ca-file` is not set." + } + ] + }, + { + "Id": "1.3.6", + "Description": "Ensure that the RotateKubeletServerCertificate argument is set to true", + "Checks": [ + "controllermanager_rotate_kubelet_server_cert" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.3 Controller Manager", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Enable kubelet server certificate rotation on controller-manager.", + "RationaleStatement": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad. Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`. ``` --feature-gates=RotateKubeletServerCertificate=true ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-controller-manager ``` Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#approval-controller:https://github.com/kubernetes/features/issues/267:https://github.com/kubernetes/kubernetes/pull/45059:https://kubernetes.io/docs/admin/kube-controller-manager/", + "DefaultValue": "By default, `RotateKubeletServerCertificate` is set to true this recommendation verifies that it has not been disabled." + } + ] + }, + { + "Id": "1.3.7", + "Description": "Ensure that the --bind-address argument is set to 127.0.0.1", + "Checks": [ + "controllermanager_bind_address" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.3 Controller Manager", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "RationaleStatement": "The Controller Manager API service which runs on port 10252/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-controller-manager ``` Verify that the `--bind-address` argument is set to 127.0.0.1", + "AdditionalInformation": "Although the current Kubernetes documentation site says that `--address` is deprecated in favour of `--bind-address` Kubeadm 1.11 still makes use of `--address`", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/", + "DefaultValue": "By default, the `--bind-address` parameter is set to 0.0.0.0" + } + ] + }, + { + "Id": "1.4.1", + "Description": "Ensure that the --profiling argument is set to false", + "Checks": [ + "scheduler_profiling" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.4 Scheduler", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Disable profiling, if not needed.", + "RationaleStatement": "Profiling allows for the identification of specific performance bottlenecks. It generates a significant amount of program data that could potentially be exploited to uncover system and program details. If you are not experiencing any bottlenecks and do not need the profiler for troubleshooting purposes, it is recommended to turn it off to reduce the potential attack surface.", + "ImpactStatement": "Profiling information would not be available.", + "RemediationProcedure": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter. ``` --profiling=false ```", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-scheduler ``` Verify that the `--profiling` argument is set to `false`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-scheduler/:https://github.com/kubernetes/community/blob/master/contributors/devel/profiling.md", + "DefaultValue": "By default, profiling is enabled." + } + ] + }, + { + "Id": "1.4.2", + "Description": "Ensure that the --bind-address argument is set to 127.0.0.1", + "Checks": [ + "scheduler_bind_address" + ], + "Attributes": [ + { + "Section": "1 Control Plane Components", + "SubSection": "1.4 Scheduler", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "RationaleStatement": "The Scheduler API service which runs on port 10251/TCP by default is used for health and metrics information and is available without authentication or encryption. As such it should only be bound to a localhost interface, to minimize the cluster's attack surface", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "AuditProcedure": "Run the following command on the Control Plane node: ``` ps -ef | grep kube-scheduler ``` Verify that the `--bind-address` argument is set to 127.0.0.1", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/", + "DefaultValue": "By default, the `--bind-address` parameter is set to 0.0.0.0" + } + ] + }, + { + "Id": "2.1", + "Description": "Ensure that the --cert-file and --key-file arguments are set as appropriate", + "Checks": [ + "etcd_tls_encryption" + ], + "Attributes": [ + { + "Section": "2 etcd", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Configure TLS encryption for the etcd service.", + "RationaleStatement": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit.", + "ImpactStatement": "Client connections only over TLS would be served.", + "RemediationProcedure": "Follow the etcd service documentation and configure TLS encryption. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters. ``` --cert-file= --key-file= ```", + "AuditProcedure": "Run the following command on the etcd server node ``` ps -ef | grep etcd ``` Verify that the `--cert-file` and the `--key-file` arguments are set as appropriate.", + "AdditionalInformation": "", + "References": "https://coreos.com/etcd/docs/latest/op-guide/security.html:https://kubernetes.io/docs/admin/etcd/", + "DefaultValue": "By default, TLS encryption is not set." + } + ] + }, + { + "Id": "2.2", + "Description": "Ensure that the --client-cert-auth argument is set to true", + "Checks": [ + "etcd_client_cert_auth" + ], + "Attributes": [ + { + "Section": "2 etcd", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Enable client authentication on etcd service.", + "RationaleStatement": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "ImpactStatement": "All clients attempting to access the etcd server will require a valid client certificate.", + "RemediationProcedure": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ``` --client-cert-auth=true ```", + "AuditProcedure": "Run the following command on the etcd server node: ``` ps -ef | grep etcd ``` Verify that the `--client-cert-auth` argument is set to `true`.", + "AdditionalInformation": "", + "References": "https://coreos.com/etcd/docs/latest/op-guide/security.html:https://kubernetes.io/docs/admin/etcd/:https://coreos.com/etcd/docs/latest/op-guide/configuration.html#client-cert-auth", + "DefaultValue": "By default, the etcd service can be queried by unauthenticated clients." + } + ] + }, + { + "Id": "2.3", + "Description": "Ensure that the --auto-tls argument is not set to true", + "Checks": [ + "etcd_no_auto_tls" + ], + "Attributes": [ + { + "Section": "2 etcd", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Do not use self-signed certificates for TLS.", + "RationaleStatement": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should not be available to unauthenticated clients. You should enable the client authentication via valid certificates to secure the access to the etcd service.", + "ImpactStatement": "Clients will not be able to use self-signed certificates for TLS.", + "RemediationProcedure": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`. ``` --auto-tls=false ```", + "AuditProcedure": "Run the following command on the etcd server node: ``` ps -ef | grep etcd ``` Verify that if the `--auto-tls` argument exists, it is not set to `true`.", + "AdditionalInformation": "", + "References": "https://coreos.com/etcd/docs/latest/op-guide/security.html:https://kubernetes.io/docs/admin/etcd/:https://coreos.com/etcd/docs/latest/op-guide/configuration.html#auto-tls", + "DefaultValue": "By default, `--auto-tls` is set to `false`." + } + ] + }, + { + "Id": "2.4", + "Description": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "Checks": [ + "etcd_peer_tls_config" + ], + "Attributes": [ + { + "Section": "2 etcd", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "etcd should be configured to make use of TLS encryption for peer connections.", + "RationaleStatement": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted in transit and also amongst peers in the etcd clusters.", + "ImpactStatement": "etcd cluster peers would need to set up TLS for their communication.", + "RemediationProcedure": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters. ``` --peer-client-file= --peer-key-file= ```", + "AuditProcedure": "Run the following command on the etcd server node: ``` ps -ef | grep etcd ``` Verify that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "AdditionalInformation": "", + "References": "https://coreos.com/etcd/docs/latest/op-guide/security.html:https://kubernetes.io/docs/admin/etcd/", + "DefaultValue": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, peer communication over TLS is not configured." + } + ] + }, + { + "Id": "2.5", + "Description": "Ensure that the --peer-client-cert-auth argument is set to true", + "Checks": [ + "etcd_peer_client_cert_auth" + ], + "Attributes": [ + { + "Section": "2 etcd", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "etcd should be configured for peer authentication.", + "RationaleStatement": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster.", + "ImpactStatement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "RemediationProcedure": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ``` --peer-client-cert-auth=true ```", + "AuditProcedure": "Run the following command on the etcd server node: ``` ps -ef | grep etcd ``` Verify that the `--peer-client-cert-auth` argument is set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "AdditionalInformation": "", + "References": "https://coreos.com/etcd/docs/latest/op-guide/security.html:https://kubernetes.io/docs/admin/etcd/:https://coreos.com/etcd/docs/latest/op-guide/configuration.html#peer-client-cert-auth", + "DefaultValue": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-client-cert-auth` argument is set to `false`." + } + ] + }, + { + "Id": "2.6", + "Description": "Ensure that the --peer-auto-tls argument is not set to true", + "Checks": [ + "etcd_no_peer_auto_tls" + ], + "Attributes": [ + { + "Section": "2 etcd", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "RationaleStatement": "etcd is a highly-available key value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be accessible only by authenticated etcd peers in the etcd cluster. Hence, do not use self-signed certificates for authentication.", + "ImpactStatement": "All peers attempting to communicate with the etcd server will require a valid client certificate for authentication.", + "RemediationProcedure": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`. ``` --peer-auto-tls=false ```", + "AuditProcedure": "Run the following command on the etcd server node: ``` ps -ef | grep etcd ``` Verify that if the `--peer-auto-tls` argument exists, it is not set to `true`. **Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.", + "AdditionalInformation": "", + "References": "https://coreos.com/etcd/docs/latest/op-guide/security.html:https://kubernetes.io/docs/admin/etcd/:https://coreos.com/etcd/docs/latest/op-guide/configuration.html#peer-auto-tls", + "DefaultValue": "**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable. By default, `--peer-auto-tls` argument is set to `false`." + } + ] + }, + { + "Id": "2.7", + "Description": "Ensure that a unique Certificate Authority is used for etcd", + "Checks": [ + "etcd_unique_ca" + ], + "Attributes": [ + { + "Section": "2 etcd", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "RationaleStatement": "etcd is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. Its access should be restricted to specifically designated clients and peers only. Authentication to etcd is based on whether the certificate presented was issued by a trusted certificate authority. There is no checking of certificate attributes such as common name or subject alternative name. As such, if any attackers were able to gain access to any certificate issued by the trusted certificate authority, they would be able to gain full access to the etcd database.", + "ImpactStatement": "Additional management of the certificates and keys for the dedicated certificate authority will be required.", + "RemediationProcedure": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ``` --trusted-ca-file= ```", + "AuditProcedure": "Review the CA used by the etcd environment and ensure that it does not match the CA certificate file used for the management of the overall Kubernetes cluster. Run the following command on the master node: ``` ps -ef | grep etcd ``` Note the file referenced by the `--trusted-ca-file` argument. Run the following command on the master node: ``` ps -ef | grep apiserver ``` Verify that the file referenced by the `--client-ca-file` for apiserver is different from the `--trusted-ca-file` used by etcd.", + "AdditionalInformation": "", + "References": "https://coreos.com/etcd/docs/latest/op-guide/security.html", + "DefaultValue": "By default, no etcd certificate is created and used." + } + ] + }, + { + "Id": "3.1.1", + "Description": "Client certificate authentication should not be used for users", + "Checks": [], + "Attributes": [ + { + "Section": "3 Control Plane Configuration", + "SubSection": "3.1 Authentication and Authorization", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Kubernetes provides the option to use client certificates for user authentication. However as there is no way to revoke these certificates when a user leaves an organization or loses their credential, they are not suitable for this purpose. It is not possible to fully disable client certificate use within a cluster as it is used for component to component authentication.", + "RationaleStatement": "With any authentication mechanism the ability to revoke credentials if they are compromised or no longer required, is a key control. Kubernetes client certificate authentication does not allow for this due to a lack of support for certificate revocation.", + "ImpactStatement": "External mechanisms for authentication generally require additional software to be deployed.", + "RemediationProcedure": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of client certificates.", + "AuditProcedure": "Review user access to the cluster and ensure that users are not making use of Kubernetes client certificate authentication.", + "AdditionalInformation": "The lack of certificate revocation was flagged up as a high risk issue in the recent Kubernetes security audit. Without this feature, client certificate authentication is not suitable for end users.", + "References": "", + "DefaultValue": "Client certificate authentication is enabled by default." + } + ] + }, + { + "Id": "3.1.2", + "Description": "Service account token authentication should not be used for users", + "Checks": [], + "Attributes": [ + { + "Section": "3 Control Plane Configuration", + "SubSection": "3.1 Authentication and Authorization", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Kubernetes provides service account tokens which are intended for use by workloads running in the Kubernetes cluster, for authentication to the API server. These tokens are not designed for use by end-users and do not provide for features such as revocation or expiry, making them insecure. A newer version of the feature (Bound service account token volumes) does introduce expiry but still does not allow for specific revocation.", + "RationaleStatement": "With any authentication mechanism the ability to revoke credentials if they are compromised or no longer required, is a key control. Service account token authentication does not allow for this due to the use of JWT tokens as an underlying technology.", + "ImpactStatement": "External mechanisms for authentication generally require additional software to be deployed.", + "RemediationProcedure": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of service account tokens.", + "AuditProcedure": "Review user access to the cluster and ensure that users are not making use of service account token authentication.", + "AdditionalInformation": "", + "References": "", + "DefaultValue": "Service account token authentication is enabled by default." + } + ] + }, + { + "Id": "3.1.3", + "Description": "Bootstrap token authentication should not be used for users", + "Checks": [], + "Attributes": [ + { + "Section": "3 Control Plane Configuration", + "SubSection": "3.1 Authentication and Authorization", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Kubernetes provides bootstrap tokens which are intended for use by new nodes joining the cluster These tokens are not designed for use by end-users they are specifically designed for the purpose of bootstrapping new nodes and not for general authentication", + "RationaleStatement": "Bootstrap tokens are not intended for use as a general authentication mechanism and impose constraints on user and group naming that do not facilitate good RBAC design. They also cannot be used with MFA resulting in a weak authentication mechanism being available.", + "ImpactStatement": "External mechanisms for authentication generally require additional software to be deployed.", + "RemediationProcedure": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of bootstrap tokens.", + "AuditProcedure": "Review user access to the cluster and ensure that users are not making use of bootstrap token authentication.", + "AdditionalInformation": "", + "References": "", + "DefaultValue": "Bootstrap token authentication is not enabled by default and requires an API server parameter to be set." + } + ] + }, + { + "Id": "3.2.1", + "Description": "Ensure that a minimal audit policy is created", + "Checks": [], + "Attributes": [ + { + "Section": "3 Control Plane Configuration", + "SubSection": "3.2 Logging", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "RationaleStatement": "Logging is an important detective control for all systems, to detect potential unauthorised access.", + "ImpactStatement": "Audit logs will be created on the master nodes, which will consume disk space. Care should be taken to avoid generating too large volumes of log information as this could impact the available of the cluster nodes.", + "RemediationProcedure": "Create an audit policy file for your cluster.", + "AuditProcedure": "Run the following command on one of the cluster master nodes: ``` ps -ef | grep kube-apiserver ``` Verify that the `--audit-policy-file` is set. Review the contents of the file specified and ensure that it contains a valid audit policy.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/tasks/debug-application-cluster/audit/", + "DefaultValue": "Unless the `--audit-policy-file` flag is specified, no auditing will be carried out." + } + ] + }, + { + "Id": "3.2.2", + "Description": "Ensure that the audit policy covers key security concerns", + "Checks": [], + "Attributes": [ + { + "Section": "3 Control Plane Configuration", + "SubSection": "3.2 Logging", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Ensure that the audit policy created for the cluster covers key security concerns.", + "RationaleStatement": "Security audit logs should cover access and modification of key resources in the cluster, to enable them to form an effective part of a security environment.", + "ImpactStatement": "Increasing audit logging will consume resources on the nodes or other log destination.", + "RemediationProcedure": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", + "AuditProcedure": "Review the audit policy provided for the cluster and ensure that it covers at least the following areas :- - Access to Secrets managed by the cluster. Care should be taken to only log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in order to avoid the risk of logging sensitive data. - Modification of `pod` and `deployment` objects. - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. For most requests, minimally logging at the Metadata level is recommended (the most basic level of logging).", + "AdditionalInformation": "", + "References": "https://github.com/k8scop/k8s-security-dashboard/blob/master/configs/kubernetes/adv-audit.yaml:https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy:https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/gci/configure-helper.sh#L735", + "DefaultValue": "By default Kubernetes clusters do not log audit information." + } + ] + }, + { + "Id": "4.1.1", + "Description": "Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "Checks": [ + "kubelet_service_file_permissions" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.1 Worker Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "RationaleStatement": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the each worker node. For example, ``` chmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf ```", + "AuditProcedure": "Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet service config file. Please set $kubelet_service_config= based on the file location on your system for example: ``` export kubelet_service_config=/etc/systemd/system/kubelet.service.d/kubeadm.conf ``` To perform the audit manually: Run the below command (based on the file location on your system) on the each worker node. For example, ``` stat -c %a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` Verify that the permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet/:https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#44-joining-your-nodes:https://kubernetes.io/docs/admin/kubeadm/#kubelet-drop-in", + "DefaultValue": "By default, the `kubelet` service file has permissions of `640`." + } + ] + }, + { + "Id": "4.1.2", + "Description": "Ensure that the kubelet service file ownership is set to root:root", + "Checks": [ + "kubelet_service_file_ownership_root" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.1 Worker Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "RationaleStatement": "The `kubelet` service file controls various parameters that set the behavior of the `kubelet` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the each worker node. For example, ``` chown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf ```", + "AuditProcedure": "Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet service config file. Please set $kubelet_service_config= based on the file location on your system for example: ``` export kubelet_service_config=/etc/systemd/system/kubelet.service.d/kubeadm.conf ``` To perform the audit manually: Run the below command (based on the file location on your system) on the each worker node. For example, ``` stat -c %U:%G /etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet/:https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#44-joining-your-nodes:https://kubernetes.io/docs/admin/kubeadm/#kubelet-drop-in", + "DefaultValue": "By default, `kubelet` service file ownership is set to `root:root`." + } + ] + }, + { + "Id": "4.1.3", + "Description": "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "Checks": [], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.1 Worker Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "RationaleStatement": "The `kube-proxy` kubeconfig file controls various parameters of the `kube-proxy` service in the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system. It is possible to run `kube-proxy` with the kubeconfig parameters configured as a Kubernetes ConfigMap instead of a file. In this case, there is no proxy kubeconfig file.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the each worker node. For example, ``` chmod 600 ```", + "AuditProcedure": "Find the kubeconfig file being used by `kube-proxy` by running the following command: ``` ps -ef | grep kube-proxy ``` If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter. To perform the audit: Run the below command (based on the file location on your system) on the each worker node. For example, ``` stat -c %a ``` Verify that a file is specified and it exists with permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-proxy/", + "DefaultValue": "By default, proxy file has permissions of `640`." + } + ] + }, + { + "Id": "4.1.4", + "Description": "If proxy kubeconfig file exists ensure ownership is set to root:root", + "Checks": [], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.1 Worker Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "RationaleStatement": "The kubeconfig file for `kube-proxy` controls various parameters for the `kube-proxy` service in the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the each worker node. For example, ``` chown root:root ```", + "AuditProcedure": "Find the kubeconfig file being used by `kube-proxy` by running the following command: ``` ps -ef | grep kube-proxy ``` If `kube-proxy` is running, get the kubeconfig file location from the `--kubeconfig` parameter. To perform the audit: Run the below command (based on the file location on your system) on the each worker node. For example, ``` stat -c %U:%G ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kube-proxy/", + "DefaultValue": "By default, `proxy` file ownership is set to `root:root`." + } + ] + }, + { + "Id": "4.1.5", + "Description": "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "Checks": [ + "kubelet_conf_file_permissions" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.1 Worker Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "RationaleStatement": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the each worker node. For example, ``` chmod 600 /etc/kubernetes/kubelet.conf ```", + "AuditProcedure": "Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet config file. Please set $kubelet_config= based on the file location on your system for example: ``` export kubelet_config=/etc/kubernetes/kubelet.conf ``` To perform the audit manually: Run the below command (based on the file location on your system) on the each worker node. For example, ``` stat -c %a /etc/kubernetes/kubelet.conf ``` Verify that the ownership is set to `root:root`.Verify that the permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet/", + "DefaultValue": "By default, `kubelet.conf` file has permissions of `600`." + } + ] + }, + { + "Id": "4.1.6", + "Description": "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "Checks": [ + "kubelet_conf_file_ownership" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.1 Worker Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "RationaleStatement": "The `kubelet.conf` file is the kubeconfig file for the node, and controls various parameters that set the behavior and identity of the worker node. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the below command (based on the file location on your system) on the each worker node. For example, ``` chown root:root /etc/kubernetes/kubelet.conf ```", + "AuditProcedure": "Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet config file. Please set $kubelet_config= based on the file location on your system for example: ``` export kubelet_config=/etc/kubernetes/kubelet.conf ``` To perform the audit manually: Run the below command (based on the file location on your system) on the each worker node. For example, ``` stat -c %U:%G /etc/kubernetes/kubelet.conf ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet/", + "DefaultValue": "By default, `kubelet.conf` file ownership is set to `root:root`." + } + ] + }, + { + "Id": "4.1.7", + "Description": "Ensure that the certificate authorities file permissions are set to 644 or more restrictive", + "Checks": [], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.1 Worker Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Ensure that the certificate authorities file has permissions of `644` or more restrictive.", + "RationaleStatement": "The certificate authorities file controls the authorities used to validate API requests. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the following command to modify the file permissions of the `--client-ca-file` ``` chmod 644 ```", + "AuditProcedure": "Run the following command: ``` ps -ef | grep kubelet ``` Find the file specified by the `--client-ca-file` argument. Run the following command: ``` stat -c %a ``` Verify that the permissions are `644` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/authentication/#x509-client-certs", + "DefaultValue": "By default no `--client-ca-file` is specified." + } + ] + }, + { + "Id": "4.1.8", + "Description": "Ensure that the client certificate authorities file ownership is set to root:root", + "Checks": [], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.1 Worker Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "RationaleStatement": "The certificate authorities file controls the authorities used to validate API requests. You should set its file ownership to maintain the integrity of the file. The file should be owned by `root:root`.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the following command to modify the ownership of the `--client-ca-file`. ``` chown root:root ```", + "AuditProcedure": "Run the following command: ``` ps -ef | grep kubelet ``` Find the file specified by the `--client-ca-file` argument. Run the following command: ``` stat -c %U:%G ``` Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/authentication/#x509-client-certs", + "DefaultValue": "By default no `--client-ca-file` is specified." + } + ] + }, + { + "Id": "4.1.9", + "Description": "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "Checks": [ + "kubelet_config_yaml_permissions" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.1 Worker Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "RationaleStatement": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the following command (using the config file location identified in the Audit step) ``` chmod 600 /var/lib/kubelet/config.yaml ```", + "AuditProcedure": "Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet config yaml file. Please set $kubelet_config_yaml= based on the file location on your system for example: ``` export kubelet_config_yaml=/var/lib/kubelet/config.yaml ``` To perform the audit manually: Run the below command (based on the file location on your system) on the each worker node. For example, ``` stat -c %a /var/lib/kubelet/config.yaml ``` Verify that the permissions are `600` or more restrictive.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/", + "DefaultValue": "By default, the /var/lib/kubelet/config.yaml file as set up by `kubeadm` has permissions of 600." + } + ] + }, + { + "Id": "4.1.10", + "Description": "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "Checks": [ + "kubelet_config_yaml_ownership" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.1 Worker Node Configuration Files", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "RationaleStatement": "The kubelet reads various parameters, including security settings, from a config file specified by the `--config` argument. If this file is specified you should restrict its file permissions to maintain the integrity of the file. The file should be owned by root:root.", + "ImpactStatement": "None", + "RemediationProcedure": "Run the following command (using the config file location identied in the Audit step) ``` chown root:root /etc/kubernetes/kubelet.conf ```", + "AuditProcedure": "Automated AAC auditing has been modified to allow CIS-CAT to input a variable for the / of the kubelet config yaml file. Please set $kubelet_config_yaml= based on the file location on your system for example: ``` export kubelet_config_yaml=/var/lib/kubelet/config.yaml ``` To perform the audit manually: Run the below command (based on the file location on your system) on the each worker node. For example, ``` stat -c %U:%G /var/lib/kubelet/config.yaml ```Verify that the ownership is set to `root:root`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/", + "DefaultValue": "By default, `/var/lib/kubelet/config.yaml` file as set up by `kubeadm` is owned by `root:root`." + } + ] + }, + { + "Id": "4.2.1", + "Description": "Ensure that the --anonymous-auth argument is set to false", + "Checks": [ + "kubelet_disable_anonymous_auth" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Disable anonymous requests to the Kubelet server.", + "RationaleStatement": "When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests. These requests are then served by the Kubelet server. You should rely on authentication to authorize access and disallow anonymous requests.", + "ImpactStatement": "Anonymous requests will be rejected.", + "RemediationProcedure": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`. If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. ``` --anonymous-auth=false ``` Based on your system, restart the `kubelet` service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "If using a Kubelet configuration file, check that there is an entry for `authentication: anonymous: enabled` set to `false`. Run the following command on each node: ``` ps -ef | grep kubelet ``` Verify that the `--anonymous-auth` argument is set to `false`. This executable argument may be omitted, provided there is a corresponding entry set to `false` in the Kubelet config file.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet/:https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication", + "DefaultValue": "By default, anonymous access is enabled." + } + ] + }, + { + "Id": "4.2.2", + "Description": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "Checks": [ + "kubelet_authorization_mode" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Do not allow all requests. Enable explicit authorization.", + "RationaleStatement": "Kubelets, by default, allow all authenticated requests (even anonymous ones) without needing explicit authorization checks from the apiserver. You should restrict this behavior and only allow explicitly authorized requests.", + "ImpactStatement": "Unauthorized requests will be denied.", + "RemediationProcedure": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`. If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable. ``` --authorization-mode=Webhook ``` Based on your system, restart the `kubelet` service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "Run the following command on each node: ``` ps -ef | grep kubelet ``` If the `--authorization-mode` argument is present check that it is not set to `AlwaysAllow`. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `authorization: mode` to something other than `AlwaysAllow`. It is also possible to review the running configuration of a Kubelet via the `/configz` endpoint on the Kubelet API port (typically `10250/TCP`). Accessing these with appropriate credentials will provide details of the Kubelet's configuration.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet/:https://kubernetes.io/docs/admin/kubelet-authentication-authorization/#kubelet-authentication", + "DefaultValue": "By default, `--authorization-mode` argument is set to `AlwaysAllow`." + } + ] + }, + { + "Id": "4.2.3", + "Description": "Ensure that the --client-ca-file argument is set as appropriate", + "Checks": [ + "kubelet_client_ca_file_set" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Enable Kubelet authentication using certificates.", + "RationaleStatement": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet’s port-forwarding functionality. These connections terminate at the kubelet’s HTTPS endpoint. By default, the apiserver does not verify the kubelet’s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks. Enabling Kubelet certificate authentication ensures that the apiserver could authenticate the Kubelet before submitting any requests.", + "ImpactStatement": "You require TLS to be configured on apiserver as well as kubelets.", + "RemediationProcedure": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file. If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable. ``` --client-ca-file= ``` Based on your system, restart the `kubelet` service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "Run the following command on each node: ``` ps -ef | grep kubelet ``` Verify that the `--client-ca-file` argument exists and is set to the location of the client certificate authority file. If the `--client-ca-file` argument is not present, check that there is a Kubelet config file specified by `--config`, and that the file sets `authentication: x509: clientCAFile` to the location of the client certificate authority file.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet/:https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/", + "DefaultValue": "By default, `--client-ca-file` argument is not set." + } + ] + }, + { + "Id": "4.2.4", + "Description": "Verify that if defined, readOnlyPort is set to 0", + "Checks": [ + "kubelet_disable_read_only_port" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Disable the read-only port.", + "RationaleStatement": "The Kubelet process provides a read-only API in addition to the main Kubelet API. Unauthenticated access is provided to this read-only API which could possibly retrieve potentially sensitive information about the cluster.", + "ImpactStatement": "Removal of the read-only port will require that any service which made use of it will need to be re-configured to use the main Kubelet API.", + "RemediationProcedure": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`. If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. ``` --read-only-port=0 ``` Based on your system, restart the `kubelet` service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "Run the following command on each node: ``` ps -ef | grep kubelet ``` Verify that the `--read-only-port` argument exists and is set to `0`. If the `--read-only-port` argument is not present, check that there is a Kubelet config file specified by `--config`. Check that if there is a `readOnlyPort` entry in the file, it is set to `0`.", + "AdditionalInformation": "https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/", + "References": "https://kubernetes.io/docs/admin/kubelet/:https://github.com/kubernetes/kubernetes/blob/6cedc0853faa118df0ba3d41b48b993422ad3df6/staging/src/k8s.io/kubelet/config/v1beta1/types.go#L142", + "DefaultValue": "" + } + ] + }, + { + "Id": "4.2.5", + "Description": "Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "Checks": [ + "kubelet_streaming_connection_timeout" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not disable timeouts on streaming connections.", + "RationaleStatement": "Setting idle timeouts ensures that you are protected against Denial-of-Service attacks, inactive connections and running out of ephemeral ports. **Note:** By default, `--streaming-connection-idle-timeout` is set to 4 hours which might be too high for your environment. Setting this as appropriate would additionally ensure that such streaming connections are timed out after serving legitimate use cases.", + "ImpactStatement": "Long-lived connections could be interrupted.", + "RemediationProcedure": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0. If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. ``` --streaming-connection-idle-timeout=5m ``` Based on your system, restart the `kubelet` service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "Run the following command on each node: ``` ps -ef | grep kubelet ``` Verify that the `--streaming-connection-idle-timeout` argument is not set to `0`. If the argument is not present, and there is a Kubelet config file specified by `--config`, check that it does not set `streamingConnectionIdleTimeout` to 0.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet/:https://github.com/kubernetes/kubernetes/pull/18552", + "DefaultValue": "By default, `--streaming-connection-idle-timeout` is set to 4 hours." + } + ] + }, + { + "Id": "4.2.6", + "Description": "Ensure that the --make-iptables-util-chains argument is set to true", + "Checks": [ + "kubelet_manage_iptables" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Allow Kubelet to manage iptables.", + "RationaleStatement": "Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open.", + "ImpactStatement": "Kubelet would manage the iptables on the system and keep it in sync. If you are using any other iptables management solution, then there might be some conflicts.", + "RemediationProcedure": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`. If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable. Based on your system, restart the `kubelet` service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "Run the following command on each node: ``` ps -ef | grep kubelet ``` Verify that if the `--make-iptables-util-chains` argument exists then it is set to `true`. If the `--make-iptables-util-chains` argument does not exist, and there is a Kubelet config file specified by `--config`, verify that the file does not set `makeIPTablesUtilChains` to `false`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet/", + "DefaultValue": "By default, `--make-iptables-util-chains` argument is set to `true`." + } + ] + }, + { + "Id": "4.2.7", + "Description": "Ensure that the --hostname-override argument is not set", + "Checks": [], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not override node hostnames.", + "RationaleStatement": "Overriding hostnames could potentially break TLS setup between the kubelet and the apiserver. Additionally, with overridden hostnames, it becomes increasingly difficult to associate logs with a particular node and process them for security analytics. Hence, you should setup your kubelet nodes with resolvable FQDNs and avoid overriding the hostnames with IPs.", + "ImpactStatement": "Some cloud providers may require this flag to ensure that hostname matches names issued by the cloud provider. In these environments, this recommendation should not apply.", + "RemediationProcedure": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable. Based on your system, restart the `kubelet` service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "Run the following command on each node: ``` ps -ef | grep kubelet ``` Verify that `--hostname-override` argument does not exist. **Note** This setting is not configurable via the Kubelet config file.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet/:https://github.com/kubernetes/kubernetes/issues/22063", + "DefaultValue": "By default, `--hostname-override` argument is not set." + } + ] + }, + { + "Id": "4.2.8", + "Description": "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture", + "Checks": [ + "kubelet_event_record_qps" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Security relevant information should be captured. The eventRecordQPS on the Kubelet configuration can be used to limit the rate at which events are gathered and sets the maximum event creations per second. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "RationaleStatement": "It is important to capture all events and not restrict event creation. Events are an important source of security information and analytics that ensure that your environment is consistently monitored using the event data.", + "ImpactStatement": "Setting this parameter to `0` could result in a denial of service condition due to excessive events being created. The cluster's event processing and storage systems should be scaled to handle expected event loads.", + "RemediationProcedure": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level. If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_ARGS` variable. Based on your system, restart the `kubelet` service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "Run the following command on each node: ``` sudo grep eventRecordQPS /etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` or If using command line arguments, kubelet service file is located /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf ``` sudo grep eventRecordQPS /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf ``` Review the value set for the argument and determine whether this has been set to an appropriate level for the cluster. If the argument does not exist, check that there is a Kubelet config file specified by `--config` and review the value in this location. If using command line arguments", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/kubelet/:https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go", + "DefaultValue": "By default, eventRecordQPS argument is set to `5`." + } + ] + }, + { + "Id": "4.2.9", + "Description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "Checks": [ + "kubelet_tls_cert_and_key" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Setup TLS connection on the Kubelets.", + "RationaleStatement": "The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet’s port-forwarding functionality. These connections terminate at the kubelet’s HTTPS endpoint. By default, the apiserver does not verify the kubelet’s serving certificate, which makes the connection subject to man-in-the-middle attacks, and unsafe to run over untrusted and/or public networks.", + "ImpactStatement": "", + "RemediationProcedure": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file. If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET_CERTIFICATE_ARGS variable. --tls-cert-file= --tls-private-key-file= Based on your system, restart the kubelet service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "Run the following command on each node: ``` ps -ef | grep kubelet ``` Verify that the --tls-cert-file and --tls-private-key-file arguments exist and they are set as appropriate. If these arguments are not present, check that there is a Kubelet config specified by --config and that it contains appropriate settings for tlsCertFile and tlsPrivateKeyFile.", + "AdditionalInformation": "", + "References": "", + "DefaultValue": "" + } + ] + }, + { + "Id": "4.2.10", + "Description": "Ensure that the --rotate-certificates argument is not set to false", + "Checks": [ + "kubelet_rotate_certificates" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Enable kubelet client certificate rotation.", + "RationaleStatement": "The `--rotate-certificates` setting causes the kubelet to rotate its client certificates by creating new CSRs as its existing credentials expire. This automated periodic rotation ensures that the there is no downtime due to expired certificates and thus addressing availability in the CIA security triad. **Note:** This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself. **Note:** This feature also require the `RotateKubeletClientCertificate` feature gate to be enabled (which is the default since Kubernetes v1.7)", + "ImpactStatement": "None", + "RemediationProcedure": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value. If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable or set --rotate-certificates=true . Based on your system, restart the `kubelet` service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "Run the following command on each node: ``` ps -ef | grep kubelet ``` Verify that the `RotateKubeletServerCertificate` argument is not present, or is set to `true`. If the RotateKubeletServerCertificate argument is not present, verify that if there is a Kubelet config file specified by `--config`, that file does not contain `RotateKubeletServerCertificate: false`.", + "AdditionalInformation": "", + "References": "https://github.com/kubernetes/kubernetes/pull/41912:https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration:https://kubernetes.io/docs/imported/release/notes/:https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/", + "DefaultValue": "By default, kubelet client certificate rotation is enabled." + } + ] + }, + { + "Id": "4.2.11", + "Description": "Verify that the RotateKubeletServerCertificate argument is set to true", + "Checks": [], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Enable kubelet server certificate rotation.", + "RationaleStatement": "`RotateKubeletServerCertificate` causes the kubelet to both request a serving certificate after bootstrapping its client credentials and rotate the certificate as its existing credentials expire. This automated periodic rotation ensures that the there are no downtimes due to expired certificates and thus addressing availability in the CIA security triad. Note: This recommendation only applies if you let kubelets get their certificates from the API server. In case your kubelet certificates come from an outside authority/tool (e.g. Vault) then you need to take care of rotation yourself.", + "ImpactStatement": "None", + "RemediationProcedure": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. ``` --feature-gates=RotateKubeletServerCertificate=true ``` Based on your system, restart the `kubelet` service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "Ignore this check if serverTLSBootstrap is true in the kubelet config file or if the --rotate-server-certificates parameter is set on kubelet Run the following command on each node: ``` ps -ef | grep kubelet ``` Verify that `RotateKubeletServerCertificate` argument exists and is set to `true`.", + "AdditionalInformation": "", + "References": "https://github.com/kubernetes/kubernetes/pull/45059:https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/#kubelet-configuration", + "DefaultValue": "By default, kubelet server certificate rotation is enabled." + } + ] + }, + { + "Id": "4.2.12", + "Description": "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "Checks": [ + "kubelet_strong_ciphers_only" + ], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", + "RationaleStatement": "TLS ciphers have had a number of known vulnerabilities and weaknesses, which can reduce the protection provided by them. By default Kubernetes supports a number of TLS ciphersuites including some that have security concerns, weakening the protection provided.", + "ImpactStatement": "Kubelet clients that cannot support modern cryptographic ciphers will not be able to make connections to the Kubelet API.", + "RemediationProcedure": "If using a Kubelet config file, edit the file to set `tlsCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values. If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values. ``` --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 ``` Based on your system, restart the `kubelet` service. For example: ``` systemctl daemon-reload systemctl restart kubelet.service ```", + "AuditProcedure": "The set of cryptographic ciphers currently considered secure is the following: - `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` - `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` - `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` - `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` - `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` - `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` - `TLS_RSA_WITH_AES_256_GCM_SHA384` - `TLS_RSA_WITH_AES_128_GCM_SHA256` Run the following command on each node: ``` ps -ef | grep kubelet ``` If the `--tls-cipher-suites` argument is present, ensure it only contains values included in this set. If it is not present check that there is a Kubelet config file specified by `--config`, and that file sets `tlsCipherSuites:` to only include values from this set.", + "AdditionalInformation": "The list chosen above should be fine for modern clients. It's essentially the list from the Mozilla Modern cipher option with the ciphersuites supporting CBC mode removed, as CBC has traditionally had a lot of issues", + "References": "", + "DefaultValue": "By default the Kubernetes API server supports a wide range of TLS ciphers" + } + ] + }, + { + "Id": "4.2.13", + "Description": "Ensure that a limit is set on pod PIDs", + "Checks": [], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Ensure that the Kubelet sets limits on the number of PIDs that can be created by pods running on the node.", + "RationaleStatement": "By default pods running in a cluster can consume any number of PIDs, potentially exhausting the resources available on the node. Setting an appropriate limit reduces the risk of a denial of service attack on cluster nodes.", + "ImpactStatement": "Setting this value will restrict the number of processes per pod. If this limit is lower than the number of PIDs required by a pod it will not operate.", + "RemediationProcedure": "Decide on an appropriate level for this parameter and set it, either via the `--pod-max-pids` command line parameter or the `PodPidsLimit` configuration file setting.", + "AuditProcedure": "Review the Kubelet's start-up parameters for the value of `--pod-max-pids`, and check the Kubelet configuration file for the `PodPidsLimit` . If neither of these values is set, then there is no limit in place.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/policy/pid-limiting/#pod-pid-limits", + "DefaultValue": "By default the number of PIDs is not limited." + } + ] + }, + { + "Id": "4.2.14", + "Description": "Ensure that the --seccomp-default parameter is set to true", + "Checks": [], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.2 Kubelet", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Ensure that the Kubelet enforces the use of the RuntimeDefault seccomp profile", + "RationaleStatement": "By default, Kubernetes disables the seccomp profile which ships with most container runtimes. Setting this parameter will ensure workloads running on the node are protected by the runtime's seccomp profile.", + "ImpactStatement": "Setting this will remove some rights from pods running on the node.", + "RemediationProcedure": "Set the parameter, either via the `--seccomp-default` command line parameter or the `seccompDefault` configuration file setting.", + "AuditProcedure": "Review the Kubelet's start-up parameters for the value of `--seccomp-default`, and check the Kubelet configuration file for the `seccompDefault` . If neither of these values is set, then the seccomp profile is not in use.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/tutorials/security/seccomp/#enable-the-use-of-runtimedefault-as-the-default-seccomp-profile-for-all-workloads", + "DefaultValue": "By default the seccomp profile is not enabled." + } + ] + }, + { + "Id": "4.3.1", + "Description": "Ensure that the kube-proxy metrics service is bound to localhost", + "Checks": [], + "Attributes": [ + { + "Section": "4 Worker Nodes", + "SubSection": "4.3 kube-proxy", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not bind the kube-proxy metrics port to non-loopback addresses.", + "RationaleStatement": "kube-proxy has two APIs which provided access to information about the service and can be bound to network ports. The metrics API service includes endpoints (`/metrics` and `/configz`) which disclose information about the configuration and operation of kube-proxy. These endpoints should not be exposed to untrusted networks as they do not support encryption or authentication to restrict access to the data they provide.", + "ImpactStatement": "3rd party services which try to access metrics or configuration information related to kube-proxy will require access to the localhost interface of the node.", + "RemediationProcedure": "Modify or remove any values which bind the metrics service to a non-localhost address", + "AuditProcedure": "review the start-up flags provided to kube proxy Run the following command on each node: ``` ps -ef | grep -i kube-proxy ``` Ensure that the `--metrics-bind-address` parameter is not set to a value other than 127.0.0.1. From the output of this command gather the location specified in the `--config` parameter. Review any file stored at that location and ensure that it does not specify a value other than 127.0.0.1 for `metricsBindAddress`.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/", + "DefaultValue": "The default value is `127.0.0.1:10249`" + } + ] + }, + { + "Id": "5.1.1", + "Description": "Ensure that the cluster-admin role is only used where required", + "Checks": [ + "rbac_cluster_admin_usage" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "RationaleStatement": "Kubernetes provides a set of default roles where RBAC is used. Some of these roles such as `cluster-admin` provide wide-ranging privileges which should only be applied where absolutely necessary. Roles such as `cluster-admin` allow super-user access to perform any action on any resource. When used in a `ClusterRoleBinding`, it gives full control over every resource in the cluster and in all namespaces. When used in a `RoleBinding`, it gives full control over every resource in the rolebinding's namespace, including the namespace itself.", + "ImpactStatement": "Care should be taken before removing any `clusterrolebindings` from the environment to ensure they were not required for operation of the cluster. Specifically, modifications should not be made to `clusterrolebindings` with the `system:` prefix as they are required for the operation of system components.", + "RemediationProcedure": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges. Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role : ``` kubectl delete clusterrolebinding [name] ```", + "AuditProcedure": "Obtain a list of the principals who have access to the `cluster-admin` role by reviewing the `clusterrolebinding` output for each role binding that has access to the `cluster-admin` role. ``` kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name ``` Review each principal listed and ensure that `cluster-admin` privilege is required for it.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/authorization/rbac/#user-facing-roles", + "DefaultValue": "By default a single `clusterrolebinding` called `cluster-admin` is provided with the `system:masters` group as its principal." + } + ] + }, + { + "Id": "5.1.2", + "Description": "Minimize access to secrets", + "Checks": [ + "rbac_minimize_secret_access" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "RationaleStatement": "Inappropriate access to secrets stored within the Kubernetes cluster can allow for an attacker to gain additional access to the Kubernetes cluster or external resources whose credentials are stored as secrets.", + "ImpactStatement": "Care should be taken not to remove access to secrets to system components which require this for their operation", + "RemediationProcedure": "Where possible, restrict access to secret objects in the cluster by removing get, list, and watch permissions.", + "AuditProcedure": "Review the users who have `get`, `list`, or `watch` access to `secrets` objects in the Kubernetes API.", + "AdditionalInformation": "", + "References": "", + "DefaultValue": "By default in a kubeadm cluster the following list of principals have `get` privileges on `secret` objects ``` CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE cluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system system:controller:expand-controller expand-controller ServiceAccount kube-system system:controller:generic-garbage-collector generic-garbage-collector ServiceAccount kube-system system:controller:namespace-controller namespace-controller ServiceAccount kube-system system:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system system:kube-controller-manager system:kube-controller-manager User ```" + } + ] + }, + { + "Id": "5.1.3", + "Description": "Minimize wildcard use in Roles and ClusterRoles", + "Checks": [ + "rbac_minimize_wildcard_use_roles" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard * which matches all items. Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "RationaleStatement": "The principle of least privilege recommends that users are provided only the access required for their role and nothing more. The use of wildcard rights grants is likely to provide excessive rights to the Kubernetes API.", + "ImpactStatement": "", + "RemediationProcedure": "Where possible replace any use of wildcards in ClusterRoles and Roles with specific objects or actions.", + "AuditProcedure": "Retrieve the roles defined across each namespaces in the cluster and review for wildcards ``` kubectl get roles --all-namespaces -o yaml ``` Retrieve the cluster roles defined in the cluster and review for wildcards ``` kubectl get clusterroles -o yaml ```", + "AdditionalInformation": "", + "References": "", + "DefaultValue": "" + } + ] + }, + { + "Id": "5.1.4", + "Description": "Minimize access to create pods", + "Checks": [ + "rbac_minimize_pod_creation_access" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access) As such, access to create new pods should be restricted to the smallest possible group of users.", + "RationaleStatement": "The ability to create pods in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "ImpactStatement": "Care should be taken not to remove access to pods to system components which require this for their operation", + "RemediationProcedure": "Where possible, remove `create` access to `pod` objects in the cluster.", + "AuditProcedure": "Review the users who have create access to pod objects in the Kubernetes API.", + "AdditionalInformation": "", + "References": "", + "DefaultValue": "By default in a kubeadm cluster the following list of principals have `create` privileges on `pod` objects ``` CLUSTERROLEBINDING SUBJECT TYPE SA-NAMESPACE cluster-admin system:masters Group system:controller:clusterrole-aggregation-controller clusterrole-aggregation-controller ServiceAccount kube-system system:controller:daemon-set-controller daemon-set-controller ServiceAccount kube-system system:controller:job-controller job-controller ServiceAccount kube-system system:controller:persistent-volume-binder persistent-volume-binder ServiceAccount kube-system system:controller:replicaset-controller replicaset-controller ServiceAccount kube-system system:controller:replication-controller replication-controller ServiceAccount kube-system system:controller:statefulset-controller statefulset-controller ServiceAccount kube-system ```" + } + ] + }, + { + "Id": "5.1.5", + "Description": "Ensure that default service accounts are not actively used.", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "RationaleStatement": "Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured to ensure that it does not automatically provide a service account token, and it must not have any non-default role bindings or custom role assignments", + "ImpactStatement": "All workloads which require access to the Kubernetes API will require an explicit service account to be created.", + "RemediationProcedure": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. Modify the configuration of each default service account to include this value ``` automountServiceAccountToken: false ```", + "AuditProcedure": "For each namespace in the cluster, review the rights assigned to the default service account and ensure that it has no roles or cluster roles bound to it apart from the defaults. Additionally ensure that the `automountServiceAccountToken: false` setting is in place for each default service account.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "DefaultValue": "By default the `default` service account allows for its service account token to be mounted in pods in its namespace." + } + ] + }, + { + "Id": "5.1.6", + "Description": "Ensure that Service Account Tokens are only mounted where necessary", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "RationaleStatement": "Mounting service account tokens inside pods can provide an avenue for privilege escalation attacks where an attacker is able to compromise a single pod in the cluster. Avoiding mounting these tokens removes this attack avenue.", + "ImpactStatement": "Pods mounted without service account tokens will not be able to communicate with the API server, except where the resource is available to unauthenticated principals.", + "RemediationProcedure": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "AuditProcedure": "Review pod and service account objects in the cluster and ensure that the option below is set, unless the resource explicitly requires this access. ``` automountServiceAccountToken: false ```", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "DefaultValue": "By default, all pods get a service account token mounted in them." + } + ] + }, + { + "Id": "5.1.7", + "Description": "Avoid use of system:masters group", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", + "RationaleStatement": "The `system:masters` group has unrestricted access to the Kubernetes API hard-coded into the API server source code. An authenticated user who is a member of this group cannot have their access reduced, even if all bindings and cluster role bindings which mention it, are removed. When combined with client certificate authentication, use of this group can allow for irrevocable cluster-admin level credentials to exist for a cluster.", + "ImpactStatement": "Once the RBAC system is operational in a cluster `system:masters` should not be specifically required, as ordinary bindings from principals to the `cluster-admin` cluster role can be made where unrestricted access is required.", + "RemediationProcedure": "Remove the `system:masters` group from all users in the cluster.", + "AuditProcedure": "Review a list of all credentials which have access to the cluster and ensure that the group `system:masters` is not used.", + "AdditionalInformation": "", + "References": "https://github.com/kubernetes/kubernetes/blob/master/pkg/registry/rbac/escalation_check.go#L38", + "DefaultValue": "By default some clusters will create a break glass client certificate which is a member of this group. Access to this client certificate should be carefully controlled and it should not be used for general cluster operations." + } + ] + }, + { + "Id": "5.1.8", + "Description": "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "RationaleStatement": "The impersonate privilege allows a subject to impersonate other users gaining their rights to the cluster. The bind privilege allows the subject to add a binding to a cluster role or role which escalates their effective permissions in the cluster. The escalate privilege allows a subject to modify cluster roles to which they are bound, increasing their rights to that level. Each of these permissions has the potential to allow for privilege escalation to cluster-admin level.", + "ImpactStatement": "There are some cases where these permissions are required for cluster service operation, and care should be taken before removing these permissions from system service accounts.", + "RemediationProcedure": "Where possible, remove the impersonate, bind, and escalate rights from subjects.", + "AuditProcedure": "Review the users who have access to cluster roles or roles which provide the impersonate, bind, or escalate privileges.", + "AdditionalInformation": "", + "References": "https://raesene.github.io/blog/2020/12/12/Escalating_Away/:https://raesene.github.io/blog/2021/01/16/Getting-Into-A-Bind-with-Kubernetes/", + "DefaultValue": "In a default kubeadm cluster, the system:masters group and clusterrole-aggregation-controller service account have access to the escalate privilege. The system:masters group also has access to bind and impersonate." + } + ] + }, + { + "Id": "5.1.9", + "Description": "Minimize access to create persistent volumes", + "Checks": [ + "rbac_minimize_pv_creation_access" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "The ability to create persistent volumes in a cluster can provide an opportunity for privilege escalation, via the creation of `hostPath` volumes. As persistent volumes are not covered by Pod Security Admission, a user with access to create persistent volumes may be able to get access to sensitive files from the underlying host even where restrictive Pod Security Admission policies are in place.", + "RationaleStatement": "The ability to create persistent volumes in a cluster opens up possibilities for privilege escalation and should be restricted, where possible.", + "ImpactStatement": "", + "RemediationProcedure": "Where possible, remove `create` access to `PersistentVolume` objects in the cluster.", + "AuditProcedure": "Review the users who have create access to `PersistentVolume` objects in the Kubernetes API.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/rbac-good-practices/#persistent-volume-creation", + "DefaultValue": "" + } + ] + }, + { + "Id": "5.1.10", + "Description": "Minimize access to the proxy sub-resource of nodes", + "Checks": [ + "rbac_minimize_node_proxy_subresource_access" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Users with access to the `Proxy` sub-resource of `Node` objects automatically have permissions to use the kubelet API, which may allow for privilege escalation or bypass cluster security controls such as audit logs. The kubelet provides an API which includes rights to execute commands in any container running on the node. Access to this API is covered by permissions to the main Kubernetes API via the `node` object. The proxy sub-resource specifically allows wide ranging access to the kubelet API. Direct access to the kubelet API bypasses controls like audit logging (there is no audit log of kubelet API access) and admission control.", + "RationaleStatement": "The ability to use the `proxy` sub-resource of `node` objects opens up possibilities for privilege escalation and should be restricted, where possible.", + "ImpactStatement": "", + "RemediationProcedure": "Where possible, remove access to the `proxy` sub-resource of `node` objects.", + "AuditProcedure": "Review the users who have access to the `proxy` sub-resource of `node` objects in the Kubernetes API.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/rbac-good-practices/#access-to-proxy-subresource-of-nodes:https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authorization", + "DefaultValue": "" + } + ] + }, + { + "Id": "5.1.11", + "Description": "Minimize access to the approval sub-resource of certificatesigningrequests objects", + "Checks": [ + "rbac_minimize_csr_approval_access" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Users with access to the update the `approval` sub-resource of `CertificateSigningRequests` objects can approve new client certificates for the Kubernetes API effectively allowing them to create new high-privileged user accounts. This can allow for privilege escalation to full cluster administrator, depending on users configured in the cluster", + "RationaleStatement": "The ability to update certificate signing requests should be limited.", + "ImpactStatement": "", + "RemediationProcedure": "Where possible, remove access to the `approval` sub-resource of `CertificateSigningRequests` objects.", + "AuditProcedure": "Review the users who have access to update the `approval` sub-resource of `CertificateSigningRequests` objects in the Kubernetes API.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/rbac-good-practices/#csrs-and-certificate-issuing", + "DefaultValue": "" + } + ] + }, + { + "Id": "5.1.12", + "Description": "Minimize access to webhook configuration objects", + "Checks": [ + "rbac_minimize_webhook_config_access" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Users with rights to create/modify/delete `validatingwebhookconfigurations` or `mutatingwebhookconfigurations` can control webhooks that can read any object admitted to the cluster, and in the case of mutating webhooks, also mutate admitted objects. This could allow for privilege escalation or disruption of the operation of the cluster.", + "RationaleStatement": "The ability to manage webhook configuration should be limited", + "ImpactStatement": "", + "RemediationProcedure": "Where possible, remove access to the `validatingwebhookconfigurations` or `mutatingwebhookconfigurations` objects", + "AuditProcedure": "Review the users who have access to `validatingwebhookconfigurations` or `mutatingwebhookconfigurations` objects in the Kubernetes API.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/rbac-good-practices/#control-admission-webhooks", + "DefaultValue": "" + } + ] + }, + { + "Id": "5.1.13", + "Description": "Minimize access to the service account token creation", + "Checks": [ + "rbac_minimize_service_account_token_creation" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.1 RBAC and Service Accounts", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Users with rights to create new service account tokens at a cluster level, can create long-lived privileged credentials in the cluster. This could allow for privilege escalation and persistent access to the cluster, even if the users account has been revoked.", + "RationaleStatement": "The ability to create service account tokens should be limited.", + "ImpactStatement": "", + "RemediationProcedure": "Where possible, remove access to the `token` sub-resource of `serviceaccount` objects.", + "AuditProcedure": "Review the users who have access to create the `token` sub-resource of `serviceaccount` objects in the Kubernetes API.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/rbac-good-practices/#token-request", + "DefaultValue": "" + } + ] + }, + { + "Id": "5.2.1", + "Description": "Ensure that the cluster has at least one active policy control mechanism in place", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", + "RationaleStatement": "Without an active policy control mechanism, it is not possible to limit the use of containers with access to underlying cluster nodes, via mechanisms like privileged containers, or the use of hostPath volume mounts.", + "ImpactStatement": "Where policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.", + "RemediationProcedure": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", + "AuditProcedure": "Review the workloads deployed to the cluster to understand if Pod Security Admission or external admission control systems are in place.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/pod-security-admission", + "DefaultValue": "By default, Pod Security Admission is enabled but no policies are in place." + } + ] + }, + { + "Id": "5.2.2", + "Description": "Minimize the admission of privileged containers", + "Checks": [ + "core_minimize_privileged_containers" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "RationaleStatement": "Privileged containers have access to all Linux Kernel capabilities and devices. A container running with full privileges can do almost everything that the host can do. This flag exists to allow special use-cases, like manipulating the network stack and accessing devices. There should be at least one admission control policy defined which does not permit privileged containers. If you need to run privileged containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "ImpactStatement": "Pods defined with `spec.containers[].securityContext.privileged: true`, `spec.initContainers[].securityContext.privileged: true` and `spec.ephemeralContainers[].securityContext.privileged: true` will not be permitted.", + "RemediationProcedure": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", + "AuditProcedure": "Run the following command: `kubectl get pods -A -o=jsonpath='{range .items[*]}{@.metadata.name}: {@..securityContext}{end}'`It will produce an inventory of all the privileged use on the cluster, if any (please, refer to a sample below). Further grepping can be done to automate each specific violation detection. calico-kube-controllers-57b57c56f-jtmk4: {} << No Elevated Privileges calico-node-c4xv4: {} {privileged:true} {privileged:true} {privileged:true} {privileged:true} << Violates 5.2.2 dashboard-metrics-scraper-7bc864c59-2m2xw: {seccompProfile:{type:RuntimeDefault}} {allowPrivilegeEscalation:false,readOnlyRootFilesystem:true,runAsGroup:2001,runAsUser:1001}", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/pod-security-standards/", + "DefaultValue": "By default, there are no restrictions on the creation of privileged containers." + } + ] + }, + { + "Id": "5.2.3", + "Description": "Minimize the admission of containers wishing to share the host process ID namespace", + "Checks": [ + "core_minimize_hostPID_containers" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "RationaleStatement": "A container running in the host's PID namespace can inspect processes running outside the container. If the container also has access to ptrace capabilities this can be used to escalate privileges outside of the container. There should be at least one admission control policy defined which does not permit containers to share the host PID namespace. If you need to run containers which require hostPID, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "ImpactStatement": "Pods defined with `spec.hostPID: true` will not be permitted unless they are run under a specific policy.", + "RemediationProcedure": "Configure the Admission Controller to restrict the admission of `hostPID` containers.", + "AuditProcedure": "Fetch hostPID from each pod with `get pods -A -o=jsonpath='{range .items[*]}{@.metadata.name}: {@.spec.hostPID}{end}'`", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/pod-security-standards/", + "DefaultValue": "By default, there are no restrictions on the creation of `hostPID` containers." + } + ] + }, + { + "Id": "5.2.4", + "Description": "Minimize the admission of containers wishing to share the host IPC namespace", + "Checks": [ + "core_minimize_hostIPC_containers" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "RationaleStatement": "A container running in the host's IPC namespace can use IPC to interact with processes outside the container. There should be at least one admission control policy defined which does not permit containers to share the host IPC namespace. If you need to run containers which require hostIPC, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "ImpactStatement": "Pods defined with `spec.hostIPC: true` will not be permitted unless they are run under a specific policy.", + "RemediationProcedure": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "AuditProcedure": "Fetch hostIPC from each pod with `get pods -A -o=jsonpath='{range .items[*]}{@.metadata.name}: {@.spec.hostIPC}{end}'`", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/pod-security-standards/", + "DefaultValue": "By default, there are no restrictions on the creation of `hostIPC` containers." + } + ] + }, + { + "Id": "5.2.5", + "Description": "Minimize the admission of containers wishing to share the host network namespace", + "Checks": [ + "core_minimize_hostNetwork_containers" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "RationaleStatement": "A container running in the host's network namespace could access the local loopback device, and could access network traffic to and from other pods. There should be at least one admission control policy defined which does not permit containers to share the host network namespace. If you need to run containers which require access to the host's network namespaces, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "ImpactStatement": "Pods defined with `spec.hostNetwork: true` will not be permitted unless they are run under a specific policy.", + "RemediationProcedure": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "AuditProcedure": "Fetch hostNetwork from each pod with `get pods -A -o=jsonpath='{range .items[*]}{@.metadata.name}: {@.spec.hostNetwork}{end}'`", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/pod-security-standards/", + "DefaultValue": "By default, there are no restrictions on the creation of `hostNetwork` containers." + } + ] + }, + { + "Id": "5.2.6", + "Description": "Minimize the admission of containers with allowPrivilegeEscalation", + "Checks": [ + "core_minimize_allowPrivilegeEscalation_containers" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with. It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", + "RationaleStatement": "A container running with the `allowPrivilegeEscalation` flag set to `true` may have processes that can gain more privileges than their parent. There should be at least one admission control policy defined which does not permit containers to allow privilege escalation. The option exists (and is defaulted to true) to permit setuid binaries to run. If you have need to run containers which use setuid binaries or require privilege escalation, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "ImpactStatement": "Pods defined with `securityContext: allowPrivilegeEscalation: true` will not be permitted unless they are run under a specific policy.", + "RemediationProcedure": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with `securityContext: allowPrivilegeEscalation: true`", + "AuditProcedure": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which allow privilege escalation. To fetch a list of pods which `allowPrivilegeEscalation` run this command: `get pods -A -o=jsonpath='{range .items[*]}{@.metadata.name}: {@..securityContext}{end}'`", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/pod-security-standards/", + "DefaultValue": "By default, there are no restrictions on contained process ability to escalate privileges, within the context of the container." + } + ] + }, + { + "Id": "5.2.7", + "Description": "Minimize the admission of root containers", + "Checks": [ + "core_minimize_root_containers_admission" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Do not generally permit containers to be run as the root user.", + "RationaleStatement": "Containers may run as any Linux user. Containers which run as the root user, whilst constrained by Container Runtime security features still have a escalated likelihood of container breakout. Ideally, all containers should run as a defined non-UID 0 user. There should be at least one admission control policy defined which does not permit root containers. If you need to run root containers, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "ImpactStatement": "Pods with containers which run as the root user will not be permitted.", + "RemediationProcedure": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", + "AuditProcedure": "List the policies in use for each namespace in the cluster, ensure that each policy restricts the use of root containers by setting `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/pod-security-standards/", + "DefaultValue": "By default, there are no restrictions on the use of root containers and if a User is not specified in the image, the container will run as root." + } + ] + }, + { + "Id": "5.2.8", + "Description": "Minimize the admission of containers with the NET_RAW capability", + "Checks": [ + "core_minimize_net_raw_capability_admission" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not generally permit containers with the potentially dangerous NET_RAW capability.", + "RationaleStatement": "Containers run with a default set of capabilities as assigned by the Container Runtime. By default this can include potentially dangerous capabilities. With Docker as the container runtime the NET_RAW capability is enabled which may be misused by malicious containers. Ideally, all containers should drop this capability. There should be at least one admission control policy defined which does not permit containers with the NET_RAW capability. If you need to run containers with this capability, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "ImpactStatement": "Pods with containers which run with the NET_RAW capability will not be permitted.", + "RemediationProcedure": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", + "AuditProcedure": "List the policies in use for each namespace in the cluster, ensure that at least one policy disallows the admission of containers with the `NET_RAW` capability.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/pod-security-standards/:https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/", + "DefaultValue": "By default, there are no restrictions on the creation of containers with the `NET_RAW` capability." + } + ] + }, + { + "Id": "5.2.9", + "Description": "Minimize the admission of containers with capabilities assigned", + "Checks": [ + "core_minimize_containers_capabilities_assigned" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "RationaleStatement": "Containers run with a default set of capabilities as assigned by the Container Runtime. Capabilities outside this set can be added to containers which could expose them to risks of container breakout attacks. There should be at least one policy defined which prevents containers with capabilities beyond the default set from launching. If you need to run containers with additional capabilities, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "ImpactStatement": "Pods with containers which require capabilities outside the default set will not be permitted.", + "RemediationProcedure": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", + "AuditProcedure": "Ensure that allowedCapabilities is not present in policies for the cluster unless it is set to an empty array. Run: kubectl get pods -A -o=jsonpath='{range .items[*]}{@.metadata.name}: {@..securityContext}{end}'", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/pod-security-standards/:https://www.nccgroup.trust/uk/our-research/abusing-privileged-and-unprivileged-linux-containers/", + "DefaultValue": "By default, there are no restrictions on adding capabilities to containers." + } + ] + }, + { + "Id": "5.2.10", + "Description": "Minimize the admission of Windows HostProcess Containers", + "Checks": [ + "core_minimize_admission_windows_hostprocess_containers" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", + "RationaleStatement": "A Windows container making use of the `hostProcess` flag can interact with the underlying Windows cluster node. As per the Kubernetes documentation, this provides privileged access to the Windows node. Where Windows containers are used inside a Kubernetes cluster, there should be at least one admission control policy which does not permit `hostProcess` Windows containers. If you need to run Windows containers which require `hostProcess`, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "ImpactStatement": "Pods defined with `securityContext.windowsOptions.hostProcess: true` will not be permitted unless they are run under a specific policy.", + "RemediationProcedure": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", + "AuditProcedure": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of `hostProcess` containers", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/:https://kubernetes.io/docs/concepts/security/pod-security-standards/", + "DefaultValue": "By default, there are no restrictions on the creation of `hostProcess` containers." + } + ] + }, + { + "Id": "5.2.11", + "Description": "Minimize the admission of HostPath volumes", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not generally admit containers which make use of `hostPath` volumes.", + "RationaleStatement": "A container which mounts a `hostPath` volume as part of its specification will have access to the filesystem of the underlying cluster node. The use of `hostPath` volumes may allow containers access to privileged areas of the node filesystem. There should be at least one admission control policy defined which does not permit containers to mount `hostPath` volumes. If you need to run containers which require `hostPath` volumes, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "ImpactStatement": "Pods defined which make use of `hostPath` volumes will not be permitted unless they are run under a specific policy.", + "RemediationProcedure": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", + "AuditProcedure": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers with `hostPath` volumes.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/pod-security-standards/", + "DefaultValue": "By default, there are no restrictions on the creation of `hostPath` volumes." + } + ] + }, + { + "Id": "5.2.12", + "Description": "Minimize the admission of containers which use HostPorts", + "Checks": [ + "core_minimize_admission_hostport_containers" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.2 Pod Security Standards", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Do not generally permit containers which require the use of HostPorts.", + "RationaleStatement": "Host ports connect containers directly to the host's network. This can bypass controls such as network policy. There should be at least one admission control policy defined which does not permit containers which require the use of HostPorts. If you need to run containers which require HostPorts, this should be defined in a separate policy and you should carefully check to ensure that only limited service accounts and users are given permission to use that policy.", + "ImpactStatement": "Pods defined with `hostPort` settings in either the container, initContainer or ephemeralContainer sections will not be permitted unless they are run under a specific policy.", + "RemediationProcedure": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", + "AuditProcedure": "List the policies in use for each namespace in the cluster, ensure that each policy disallows the admission of containers which have `hostPort` sections.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/security/pod-security-standards/", + "DefaultValue": "By default, there are no restrictions on the use of HostPorts." + } + ] + }, + { + "Id": "5.3.1", + "Description": "Ensure that the CNI in use supports Network Policies", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.3 Network Policies and CNI", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "RationaleStatement": "Kubernetes network policies are enforced by the CNI plugin in use. As such it is important to ensure that the CNI plugin supports both Ingress and Egress network policies.", + "ImpactStatement": "None", + "RemediationProcedure": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "AuditProcedure": "Review the documentation of CNI plugin in use by the cluster, and confirm that it supports Ingress and Egress network policies.", + "AdditionalInformation": "One example here is Flannel (https://github.com/coreos/flannel) which does not support Network policy unless Calico is also in use.", + "References": "https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/", + "DefaultValue": "This will depend on the CNI plugin in use." + } + ] + }, + { + "Id": "5.3.2", + "Description": "Ensure that all Namespaces have Network Policies defined", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.3 Network Policies and CNI", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Use network policies to isolate traffic in your cluster network.", + "RationaleStatement": "Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints. Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace.", + "ImpactStatement": "Once network policies are in use within a given namespace, traffic not explicitly allowed by a network policy will be denied. As such it is important to ensure that, when introducing network policies, legitimate traffic is not blocked.", + "RemediationProcedure": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "AuditProcedure": "Run the below command and review the `NetworkPolicy` objects created in the cluster. ``` kubectl get networkpolicy --all-namespaces ``` Ensure that each namespace defined in the cluster has at least one Network Policy.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/services-networking/networkpolicies/:https://octetz.com/posts/k8s-network-policy-apis:https://kubernetes.io/docs/tasks/configure-pod-container/declare-network-policy/", + "DefaultValue": "By default, network policies are not created." + } + ] + }, + { + "Id": "5.4.1", + "Description": "Prefer using secrets as files over secrets as environment variables", + "Checks": [ + "core_no_secrets_envs" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.4 Secrets Management", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "RationaleStatement": "It is reasonably common for application code to log out its environment (particularly in the event of an error). This will include any secret values passed in as environment variables, so secrets can easily be exposed to any user or entity who has access to the logs.", + "ImpactStatement": "Application code which expects to read secrets in the form of environment variables would need modification", + "RemediationProcedure": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "AuditProcedure": "Run the following command to find references to objects which use environment variables defined from secrets. ``` kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]}{.kind}{@.metadata.name}{end}' -A ```", + "AdditionalInformation": "Mounting secrets as volumes has the additional benefit that secret values can be updated without restarting the pod", + "References": "https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets", + "DefaultValue": "By default, secrets are not defined" + } + ] + }, + { + "Id": "5.4.2", + "Description": "Consider external secret storage", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.4 Secrets Management", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "RationaleStatement": "Kubernetes supports secrets as first-class objects, but care needs to be taken to ensure that access to secrets is carefully limited. Using an external secrets provider can ease the management of access to secrets, especially where secrests are used across both Kubernetes and non-Kubernetes environments.", + "ImpactStatement": "None", + "RemediationProcedure": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "AuditProcedure": "Review your secrets management implementation.", + "AdditionalInformation": "", + "References": "", + "DefaultValue": "By default, no external secret management is configured." + } + ] + }, + { + "Id": "5.5.1", + "Description": "Configure Image Provenance using ImagePolicyWebhook admission controller", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.5 Extensible Admission Control", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Configure Image Provenance for your deployment.", + "RationaleStatement": "Kubernetes supports plugging in provenance rules to accept or reject the images in your deployments. You could configure such rules to ensure that only approved images are deployed in the cluster.", + "ImpactStatement": "You need to regularly maintain your provenance configuration based on container image updates.", + "RemediationProcedure": "Follow the Kubernetes documentation and setup image provenance.", + "AuditProcedure": "Review the pod definitions in your cluster and verify that image provenance is configured as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/admin/admission-controllers/#imagepolicywebhook:https://github.com/kubernetes/community/blob/master/contributors/design-proposals/image-provenance.md:https://hub.docker.com/r/dnurmi/anchore-toolbox/:https://github.com/kubernetes/kubernetes/issues/22888", + "DefaultValue": "By default, image provenance is not set." + } + ] + }, + { + "Id": "5.6.1", + "Description": "Create administrative boundaries between resources using namespaces", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.6 General Policies", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Use namespaces to isolate your Kubernetes objects.", + "RationaleStatement": "Limiting the scope of user permissions can reduce the impact of mistakes or malicious activities. A Kubernetes namespace allows you to partition created resources into logically named groups. Resources created in one namespace can be hidden from other namespaces. By default, each resource created by a user in Kubernetes cluster runs in a default namespace, called `default`. You can create additional namespaces and attach resources and users to them. You can use Kubernetes Authorization plugins to create policies that segregate access to namespace resources between different users.", + "ImpactStatement": "You need to switch between namespaces for administration.", + "RemediationProcedure": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "AuditProcedure": "Run the below command and review the namespaces created in the cluster. ``` kubectl get namespaces ``` Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/#viewing-namespaces:http://blog.kubernetes.io/2016/08/security-best-practices-kubernetes-deployment.html:https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/589-efficient-node-heartbeats", + "DefaultValue": "By default, Kubernetes starts with 4 initial namespaces: 1. `default` - The default namespace for objects with no other namespace 1. `kube-system` - The namespace for objects created by the Kubernetes system 1. `kube-node-lease` - Namespace used for node heartbeats 1. `kube-public` - Namespace used for public information in a cluster" + } + ] + }, + { + "Id": "5.6.2", + "Description": "Ensure that the seccomp profile is set to docker/default in your pod definitions", + "Checks": [ + "core_seccomp_profile_docker_default" + ], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.6 General Policies", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Enable `docker/default` seccomp profile in your pod definitions.", + "RationaleStatement": "Seccomp (secure computing mode) is used to restrict the set of system calls applications can make, allowing cluster administrators greater control over the security of workloads running in the cluster. Kubernetes disables seccomp profiles by default for historical reasons. You should enable it to ensure that the workloads have restricted actions available within the container.", + "ImpactStatement": "If the `docker/default` seccomp profile is too restrictive for you, you would have to create/manage your own seccomp profiles.", + "RemediationProcedure": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below: ``` securityContext: seccompProfile: type: RuntimeDefault ```", + "AuditProcedure": "Review the pod definitions in your cluster. It should create a line as below: ``` securityContext: seccompProfile: type: RuntimeDefault ```", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/tutorials/clusters/seccomp/:https://docs.docker.com/engine/security/seccomp/", + "DefaultValue": "By default, seccomp profile is set to `unconfined` which means that no seccomp profiles are enabled." + } + ] + }, + { + "Id": "5.6.3", + "Description": "Apply Security Context to Your Pods and Containers", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.6 General Policies", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Apply Security Context to Your Pods and Containers", + "RationaleStatement": "A security context defines the operating system security settings (uid, gid, capabilities, SELinux role, etc..) applied to a container. When designing your containers and pods, make sure that you configure the security context for your pods, containers, and volumes. A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. There are two levels of security context: pod level security context, and container level security context.", + "ImpactStatement": "If you incorrectly apply security contexts, you may have trouble running the pods.", + "RemediationProcedure": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "AuditProcedure": "Review the pod definitions in your cluster and verify that you have security contexts defined as appropriate.", + "AdditionalInformation": "", + "References": "https://kubernetes.io/docs/concepts/policy/security-context/:https://learn.cisecurity.org/benchmarks", + "DefaultValue": "By default, no security contexts are automatically applied to pods." + } + ] + }, + { + "Id": "5.6.4", + "Description": "The default namespace should not be used", + "Checks": [], + "Attributes": [ + { + "Section": "5 Policies", + "SubSection": "5.6 General Policies", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "RationaleStatement": "Resources in a Kubernetes cluster should be segregated by namespace, to allow for security controls to be applied at that level and to make it easier to manage resources.", + "ImpactStatement": "None", + "RemediationProcedure": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "AuditProcedure": "Run this command to list objects in default namespace ``` kubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default ``` The only entries there should be system managed resources such as the `kubernetes` service", + "AdditionalInformation": "", + "References": "", + "DefaultValue": "Unless a namespace is specific on object creation, the `default` namespace will be used" + } + ] + } + ] +}