feat(compliance): add new Prowler Threat Score Compliance Framework (#7603)

Co-authored-by: MrCloudSec <hello@mistercloudsec.com>
This commit is contained in:
Pedro Martín
2025-04-28 09:57:52 +02:00
committed by GitHub
parent b8836c6404
commit 06f94f884f
26 changed files with 5528 additions and 2 deletions

View File

@@ -0,0 +1,24 @@
import warnings
from dashboard.common_methods import get_section_containers_cis
warnings.filterwarnings("ignore")
def get_table(data):
aux = data[
[
"REQUIREMENTS_ID",
"REQUIREMENTS_DESCRIPTION",
"REQUIREMENTS_ATTRIBUTES_SECTION",
"CHECKID",
"STATUS",
"REGION",
"ACCOUNTID",
"RESOURCEID",
]
].copy()
return get_section_containers_cis(
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
)

View File

@@ -0,0 +1,24 @@
import warnings
from dashboard.common_methods import get_section_containers_cis
warnings.filterwarnings("ignore")
def get_table(data):
aux = data[
[
"REQUIREMENTS_ID",
"REQUIREMENTS_DESCRIPTION",
"REQUIREMENTS_ATTRIBUTES_SECTION",
"CHECKID",
"STATUS",
"REGION",
"ACCOUNTID",
"RESOURCEID",
]
].copy()
return get_section_containers_cis(
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
)

View File

@@ -0,0 +1,24 @@
import warnings
from dashboard.common_methods import get_section_containers_cis
warnings.filterwarnings("ignore")
def get_table(data):
aux = data[
[
"REQUIREMENTS_ID",
"REQUIREMENTS_DESCRIPTION",
"REQUIREMENTS_ATTRIBUTES_SECTION",
"CHECKID",
"STATUS",
"REGION",
"ACCOUNTID",
"RESOURCEID",
]
].copy()
return get_section_containers_cis(
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
)

View File

@@ -398,6 +398,10 @@ def display_data(
f"dashboard.compliance.{current}"
)
data.drop_duplicates(keep="first", inplace=True)
if "threatscore" in analytics_input:
data = get_threatscore_mean_by_pillar(data)
table = compliance_module.get_table(data)
except ModuleNotFoundError:
table = html.Div(
@@ -430,6 +434,9 @@ def display_data(
if "pci" in analytics_input:
pie_2 = get_bar_graph(df, "REQUIREMENTS_ID")
current_filter = "req_id"
elif "threatscore" in analytics_input:
pie_2 = get_table_prowler_threatscore(df)
current_filter = "threatscore"
elif (
"REQUIREMENTS_ATTRIBUTES_SECTION" in df.columns
and not df["REQUIREMENTS_ATTRIBUTES_SECTION"].isnull().values.any()
@@ -488,6 +495,13 @@ def display_data(
pie_2, f"Top 5 failed {current_filter} by requirements"
)
if "threatscore" in analytics_input:
security_level_graph = get_graph(
pie_2,
"Pillar Score by requirements (1 = Lowest Risk, 5 = Highest Risk)",
margin_top=0,
)
return (
table_output,
overall_status_result_graph,
@@ -501,7 +515,7 @@ def display_data(
)
def get_graph(pie, title):
def get_graph(pie, title, margin_top=7):
return [
html.Span(
title,
@@ -514,7 +528,7 @@ def get_graph(pie, title):
"display": "flex",
"justify-content": "center",
"align-items": "center",
"margin-top": "7%",
"margin-top": f"{margin_top}%",
},
),
]
@@ -618,3 +632,87 @@ def get_table(current_compliance, table):
className="relative flex flex-col bg-white shadow-provider rounded-xl px-4 py-3 flex-wrap w-full",
),
]
def get_threatscore_mean_by_pillar(df):
modified_df = df[df["STATUS"] == "FAIL"]
modified_df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"] = pd.to_numeric(
modified_df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"], errors="coerce"
)
pillar_means = (
modified_df.groupby("REQUIREMENTS_ATTRIBUTES_SECTION")[
"REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"
]
.mean()
.round(2)
)
output = []
for pillar, mean in pillar_means.items():
output.append(f"{pillar} - [{mean}]")
for value in output:
if value.split(" - ")[0] in df["REQUIREMENTS_ATTRIBUTES_SECTION"].values:
df.loc[
df["REQUIREMENTS_ATTRIBUTES_SECTION"] == value.split(" - ")[0],
"REQUIREMENTS_ATTRIBUTES_SECTION",
] = value
return df
def get_table_prowler_threatscore(df):
df = df[df["STATUS"] == "FAIL"]
# Delete " - " from the column REQUIREMENTS_ATTRIBUTES_SECTION
df["REQUIREMENTS_ATTRIBUTES_SECTION"] = (
df["REQUIREMENTS_ATTRIBUTES_SECTION"].str.split(" - ").str[0]
)
df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"] = pd.to_numeric(
df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"], errors="coerce"
)
score_df = (
df.groupby("REQUIREMENTS_ATTRIBUTES_SECTION")[
"REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"
]
.mean()
.reset_index()
.rename(
columns={
"REQUIREMENTS_ATTRIBUTES_SECTION": "Pillar",
"REQUIREMENTS_ATTRIBUTES_LEVELOFRISK": "Score",
}
)
)
fig = px.bar(
score_df,
x="Pillar",
y="Score",
color="Score",
color_continuous_scale=[
"#45cc6e",
"#f4d44d",
"#e77676",
], # verde → amarillo → rojo
hover_data={"Score": True, "Pillar": True},
labels={"Score": "Average Risk Score", "Pillar": "Section"},
height=400,
)
fig.update_layout(
xaxis_title="Pillar",
yaxis_title="Level of Risk",
margin=dict(l=20, r=20, t=30, b=20),
plot_bgcolor="rgba(0,0,0,0)",
paper_bgcolor="rgba(0,0,0,0)",
coloraxis_colorbar=dict(title="Risk"),
)
return dcc.Graph(
figure=fig,
style={"height": "25rem", "width": "40rem"},
)

View File

@@ -35,6 +35,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
- Add new check `teams_meeting_recording_disabled` [(#7607)](https://github.com/prowler-cloud/prowler/pull/7607)
- Add new check `teams_meeting_presenters_restricted` [(#7613)](https://github.com/prowler-cloud/prowler/pull/7613)
- Add new check `teams_meeting_chat_anonymous_users_disabled` [(#7579)](https://github.com/prowler-cloud/prowler/pull/7579)
- Add Prowler Threat Score Compliance Framework [(#7603)](https://github.com/prowler-cloud/prowler/pull/7603)
### Fixed

View File

@@ -70,6 +70,15 @@ from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_azure import (
AzureMitreAttack,
)
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_gcp import GCPMitreAttack
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_aws import (
ProwlerThreatScoreAWS,
)
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_azure import (
ProwlerThreatScoreAzure,
)
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_gcp import (
ProwlerThreatScoreGCP,
)
from prowler.lib.outputs.csv.csv import CSV
from prowler.lib.outputs.finding import Finding
from prowler.lib.outputs.html.html import HTML
@@ -478,6 +487,18 @@ def prowler():
)
generated_outputs["compliance"].append(kisa_ismsp)
kisa_ismsp.batch_write_data_to_file()
elif compliance_name == "prowler_threatscore_aws":
filename = (
f"{output_options.output_directory}/compliance/"
f"{output_options.output_filename}_{compliance_name}.csv"
)
prowler_threatscore = ProwlerThreatScoreAWS(
findings=finding_outputs,
compliance=bulk_compliance_frameworks[compliance_name],
file_path=filename,
)
generated_outputs["compliance"].append(prowler_threatscore)
prowler_threatscore.batch_write_data_to_file()
else:
filename = (
f"{output_options.output_directory}/compliance/"
@@ -545,6 +566,18 @@ def prowler():
)
generated_outputs["compliance"].append(iso27001)
iso27001.batch_write_data_to_file()
elif compliance_name == "prowler_threatscore_azure":
filename = (
f"{output_options.output_directory}/compliance/"
f"{output_options.output_filename}_{compliance_name}.csv"
)
prowler_threatscore = ProwlerThreatScoreAzure(
findings=finding_outputs,
compliance=bulk_compliance_frameworks[compliance_name],
file_path=filename,
)
generated_outputs["compliance"].append(prowler_threatscore)
prowler_threatscore.batch_write_data_to_file()
else:
filename = (
f"{output_options.output_directory}/compliance/"
@@ -612,6 +645,18 @@ def prowler():
)
generated_outputs["compliance"].append(iso27001)
iso27001.batch_write_data_to_file()
elif compliance_name == "prowler_threatscore_gcp":
filename = (
f"{output_options.output_directory}/compliance/"
f"{output_options.output_filename}_{compliance_name}.csv"
)
prowler_threatscore = ProwlerThreatScoreGCP(
findings=finding_outputs,
compliance=bulk_compliance_frameworks[compliance_name],
file_path=filename,
)
generated_outputs["compliance"].append(prowler_threatscore)
prowler_threatscore.batch_write_data_to_file()
else:
filename = (
f"{output_options.output_directory}/compliance/"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,977 @@
{
"Framework": "ProwlerThreatScore",
"Version": "1.0",
"Provider": "GCP",
"Description": "Prowler ThreatScore Compliance Framework for GCP ensures that the GCP project is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Forensic Readiness and Encryption",
"Requirements": [
{
"Id": "1.1.1",
"Description": "Ensure User-Managed/External Keys for Service Accounts Are Rotated Every 90 Days or Fewer",
"Checks": [
"iam_sa_user_managed_key_rotate_90_days"
],
"Attributes": [
{
"Title": "User-Managed/External Keys for Service Accounts Are Rotated Every 90 Days or Fewer",
"Section": "1. IAM",
"SubSection": "1.1 Authentication",
"AttributeDescription": "Service account keys consist of a key ID (private_key_id) and a private key, which are used to authenticate programmatic requests to Google Cloud services. It is recommended to regularly rotate service account keys to enhance security and reduce the risk of unauthorized access.",
"AdditionalInformation": "Regularly rotating service account keys minimizes the risk of a compromised, lost, or stolen key being used to access cloud resources. Google-managed keys are automatically rotated daily for internal authentication, ensuring strong security. For user-managed (external) keys, users are responsible for key security, storage, and rotation. Since Google does not retain private keys once generated, proper key management practices must be followed. Google Cloud allows up to 10 external keys per service account, making it easier to rotate them without disruption. Implementing regular key rotation ensures that old keys are not left active, reducing the potential attack surface.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.1.2",
"Description": "Ensure API Keys Only Exist for Active Services",
"Checks": [
"apikeys_key_exists"
],
"Attributes": [
{
"Title": "API Keys Only Exist for Active Services",
"Section": "1. IAM",
"SubSection": "1.1 Authentication",
"AttributeDescription": "API keys should only be used when no other authentication method is available, as they pose significant security risks. Unused API keys with active permissions may still exist within a project, potentially exposing resources to unauthorized access. It is recommended to use standard authentication flows such as OAuth 2.0 or service account authentication instead.",
"AdditionalInformation": "API keys are inherently insecure because they: Are simple encrypted strings that can be easily exposed in browsers, client-side applications, or devices. Do not authenticate users or applications making API requests. Can be accidentally leaked in logs, repositories, or web traffic.To enhance security, API keys should be avoided when possible, and unused keys should be deleted to minimize the risk of unauthorized access.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.1.3",
"Description": "Ensure API Keys Are Rotated Every 90 Days",
"Checks": [
"apikeys_key_rotated_in_90_days"
],
"Attributes": [
{
"Title": "API Keys Are Rotated Every 90 Days",
"Section": "1. IAM",
"SubSection": "1.1 Authentication",
"AttributeDescription": "API keys should only be used when no other authentication method is available. If API keys are in use, it is recommended to rotate them every 90 days to minimize security risks.",
"AdditionalInformation": "API keys are inherently insecure because: They are simple encrypted strings that can be easily exposed. They do not authenticate users or applications making API requests. They are often accessible to clients, increasing the risk of theft and misuse. Unlike credentials with expiration policies, stolen API keys remain valid indefinitely unless revoked or regenerated. Regularly rotating API keys reduces the risk of unauthorized access by ensuring that compromised keys cannot be used for extended periods. To enhance security, API keys should be rotated every 90 days or as part of a proactive security policy.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.2.1",
"Description": "Ensure That There Are Only GCP-Managed Service Account Keys for Each Service Account",
"Checks": [
"iam_sa_no_user_managed_keysiam_sa_no_user_managed_keys"
],
"Attributes": [
{
"Title": "Only GCP-Managed Service Account Keys for Each Service Account",
"Section": "1. IAM",
"SubSection": "1.2 Authorization",
"AttributeDescription": "Service accounts should not use user-managed keys, as they introduce security risks and require manual management. Instead, use Google Cloud-managed keys, which are automatically rotated and secured by Google.",
"AdditionalInformation": "User-managed keys are downloadable and manually managed, making them vulnerable to leaks, mismanagement, and unauthorized access. In contrast, GCP-managed keys are non-downloadable, automatically rotated weekly, and securely handled by Google Cloud services like App Engine and Compute Engine. Managing user-generated keys requires key storage, distribution, rotation, revocation, and protectionall of which introduce potential security gaps. Common risks include keys being exposed in source code repositories, left in unsecured locations, or unintentionally shared. To minimize security risks, it is recommended to disable user-managed service account keys and rely on GCP-managed keys instead.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.2.2",
"Description": "Ensure That Service Account Has No Admin Privileges",
"Checks": [
"iam_sa_no_administrative_privileges"
],
"Attributes": [
{
"Title": "SA Has No Admin Privileges",
"Section": "1. IAM",
"SubSection": "1.2 Authorization",
"AttributeDescription": "A service account is a special Google account assigned to an application or virtual machine (VM) rather than an individual user. It is used to authenticate API requests on behalf of the application. Service accounts should not be granted admin privileges to minimize security risks.",
"AdditionalInformation": "Service accounts control resource access based on their assigned roles. Granting admin privileges to a service account allows full control over applications or VMs, enabling actions like deletion, updates, and configuration changes without user intervention. This increases the risk of misconfigurations, privilege escalation, or potential security breaches. To follow the principle of least privilege, it is recommended to restrict admin access for service accounts and assign only the necessary permissions.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.2.3",
"Description": "Ensure That Cloud KMS Cryptokeys Are Not Anonymously or Publicly Accessible",
"Checks": [
"kms_key_not_publicly_accessible"
],
"Attributes": [
{
"Title": "Cloud KMS Cryptokeys Are Not Anonymously or Publicly Accessible",
"Section": "1. IAM",
"SubSection": "1.2 Authorization",
"AttributeDescription": "The IAM policy on Cloud KMS cryptographic keys should not allow anonymous (allUsers) or public (allAuthenticatedUsers) access to prevent unauthorized key usage.",
"AdditionalInformation": "Granting permissions to allUsers or allAuthenticatedUsers allows anyone to access the cryptographic keys, which can lead to data exposure, unauthorized encryption/decryption operations, or potential key compromise. This is particularly critical if sensitive data is protected using these keys. To maintain data security and compliance, ensure that Cloud KMS cryptographic keys are only accessible to authorized users, groups, or service accounts and do not have public or anonymous access permissions.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.2.4",
"Description": "Ensure KMS Encryption Keys Are Rotated Within a Period of 90 Days",
"Checks": [
"kms_key_rotation_enabled"
],
"Attributes": [
{
"Title": "KMS Encryption Keys Are Rotated Within a Period of 90 Days",
"Section": "1. IAM",
"SubSection": "1.2 Authorization",
"AttributeDescription": "Google Cloud Key Management Service (KMS) organizes cryptographic keys in a hierarchical structure to facilitate secure and efficient access control. Keys should be configured with a defined rotation schedule to ensure their cryptographic strength is maintained over time.",
"AdditionalInformation": "Key rotation ensures that new key versions are automatically generated at regular intervals, reducing the risk of key compromise and unauthorized access. The key material (actual encryption bits) changes over time, even though the keys logical identity remains the same. Since cryptographic keys protect sensitive data, setting a specific rotation period ensures that encrypted data remains secure, minimizes the impact of a potential key leak, and aligns with best security practices.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.2.5",
"Description": "Ensure That Separation of Duties Is Enforced While Assigning KMS Related Roles to Users",
"Checks": [
"iam_role_kms_enforce_separation_of_duties"
],
"Attributes": [
{
"Title": "Separation of Duties Is Enforced While Assigning KMS Related Roles to Users",
"Section": "1. IAM",
"SubSection": "1.2 Authorization",
"AttributeDescription": "The principle of Separation of Duties should be enforced when assigning Google Cloud Key Management Service (KMS) roles to users. This prevents excessive privileges and reduces security risks.",
"AdditionalInformation": "The Cloud KMS Admin role grants the ability to create, delete, and manage keys, while the Cloud KMS CryptoKey Encrypter/Decrypter, Encrypter, and Decrypter roles control encryption and decryption of data. Granting both administrative and cryptographic privileges to the same user violates the Separation of Duties principle, potentially allowing unauthorized access to sensitive data. To mitigate risks and prevent privilege escalation, no user should hold the Cloud KMS Admin role along with any of the CryptoKey Encrypter/Decrypter roles. Enforcing Separation of Duties helps ensure secure key management and aligns with security best practices.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.2.6",
"Description": "Ensure API Keys Are Restricted to Only APIs That Application Needs Access",
"Checks": [
"apikeys_api_restrictions_configured"
],
"Attributes": [
{
"Title": "API Keys Are Restricted to Only APIs That Application Needs Access",
"Section": "1. IAM",
"SubSection": "1.2 Authorization",
"AttributeDescription": "API keys should only be used when no other authentication method is available, as they pose a higher security risk due to their public visibility. To minimize exposure, API keys should be restricted to access only the specific APIs required by an application.",
"AdditionalInformation": "API keys present several security risks, including: They are simple encrypted strings that can be easily exposed in client-side applications or browsers. They do not authenticate the user or application making API requests. They are often accessible to clients, making them susceptible to discovery and theft. Google recommends using standard authentication methods instead of API keys whenever possible. However, in limited scenarios where API keys are necessary (e.g., mobile applications using Google Cloud Translation API without a backend server), restricting API key access to only the required APIs helps enforce least privilege access and reduces attack surfaces.",
"LevelOfRisk": 4
}
]
},
{
"Id": "1.3.1",
"Description": "Ensure That IAM Users Are Not Assigned the Service Account User or Service Account Token Creator Roles at Project Level",
"Checks": [
"iam_no_service_roles_at_project_level"
],
"Attributes": [
{
"Title": "IAM Users Are Not Assigned the SA User or SA Token Creator Roles at Project Level",
"Section": "1. IAM",
"SubSection": "1.3 Privilege Escalation Prevention",
"AttributeDescription": "It is recommended to assign the Service Account User (iam.serviceAccountUser) and Service Account Token Creator (iam.serviceAccountTokenCreator) roles to users at the service account level rather than granting them project-wide access.",
"AdditionalInformation": "Service accounts are identities used by applications and virtual machines (VMs) to interact with Google Cloud APIs. They also function as resources with IAM policies defining who can use them. Granting service account permissions at the project level allows users to access all service accounts within the project, including any created in the future. This increases the risk of privilege escalation, as users with Compute Instance Admin or App Engine Deployer roles could execute code as a service account, gaining access to additional resources. To enforce the principle of least privilege, users should be assigned service account roles at the specific service account level rather than at the project level. This ensures that each user has access only to the necessary service accounts while preventing unintended privilege escalation.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.3.2",
"Description": "Ensure That Separation of Duties Is Enforced While Assigning Service Account Related Roles to Users",
"Checks": [
"iam_role_kms_enforce_separation_of_duties"
],
"Attributes": [
{
"Title": "Separation of Duties Is Enforced While Assigning Service Account Related Roles to Users",
"Section": "1. IAM",
"SubSection": "1.3 Privilege Escalation Prevention",
"AttributeDescription": "It is recommended to enforce the principle of Separation of Duties when assigning service account-related IAM roles to users to prevent excessive privileges and security risks.",
"AdditionalInformation": "The Service Account Admin role allows a user to create, delete, and manage service accounts, while the Service Account User role allows a user to assign service accounts to applications or compute instances. Granting both roles to the same user violates the Separation of Duties principle, as it would allow an individual to create and assign service accounts, potentially leading to unauthorized access or privilege escalation. To minimize security risks, no user should be assigned both Service Account Admin and Service Account User roles simultaneously. Enforcing Separation of Duties ensures better access control, reduces the risk of privilege abuse, and aligns with security best practices.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.3.3",
"Description": "Ensure That Cloud Audit Logging Is Configured Properly",
"Checks": [
"iam_audit_logs_enabled"
],
"Attributes": [
{
"Title": "Cloud Audit Logging Is Configured Properly",
"Section": "1. IAM",
"SubSection": "1.3 Privilege Escalation Prevention",
"AttributeDescription": "Cloud Audit Logging should be configured to track all administrative activities and read/write access to user data. This ensures comprehensive visibility into who accessed or modified resources within a project, folder, or organization.",
"AdditionalInformation": "Cloud Audit Logging maintains two types of audit logs: 1. Admin Activity Logs Captures API calls and administrative actions that modify configurations or metadata. These logs are enabled by default and cannot be disabled. 2. Data Access Logs Tracks API calls that create, modify, or read user data. These logs are disabled by default and should be enabled for better monitoring. Data Access Logs provide three types of visibility: Admin Read Tracks metadata or configuration reads. Data Read Logs operations where user-provided data is accessed. Data Write Captures modifications to user-provided data.To ensure effective logging, it is recommended to: 1. Enable DATA_READ logs (for user activity tracking) and DATA_WRITE logs (to track modifications). 2. Apply audit logging to all supported services where Data Access logs are available. 3.Avoid exempting users from audit logs to maintain full tracking capabilities. Properly configuring Cloud Audit Logging helps strengthen security, detect unauthorized access, and ensure compliance with security policies.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.3.4",
"Description": "Ensure Log Metric Filter and Alerts Exist for Project Ownership Assignments/Changes",
"Checks": [
"logging_log_metric_filter_and_alert_for_project_ownership_changes_enabled"
],
"Attributes": [
{
"Title": "Log Metric Filter and Alerts Exist for Project Ownership Assignments/Changes",
"Section": "1. IAM",
"SubSection": "1.3 Privilege Escalation Prevention",
"AttributeDescription": "In order to prevent unnecessary project ownership assignments to users or service accounts and mitigate potential misuse of projects and resources, all role assignments to roles/Owner should be monitored. Users or service accounts assigned the roles/Owner primitive role are considered project owners. The Owner role grants full control over the project, including: full viewer permissions on all GCP services, permissions to modify the state of all services, manage roles and permissions for the project and its resources, and set up billing for the project. Granting the Owner role allows the member to modify the IAM policy, which contains sensitive access control data. To minimize security risks, the Owner role should only be assigned when strictly necessary, and the number of users with this role should be kept to a minimum.",
"AdditionalInformation": "Project ownership has the highest level of privileges within a project, making it a high-risk role if misused. To reduce potential security risks, all project ownership assignments and changes should be monitored and alerted to security teams or relevant recipients. Critical events to monitor include: sending project ownership invitations, acceptance or rejection of ownership invites, assigning the roles/Owner role to a user or service account, and removing a user or service account from the roles/Owner role. Monitoring these activities helps prevent unauthorized access, enforces least privilege principles, and improves security auditing and compliance.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.3.5",
"Description": "Ensure That the Log Metric Filter and Alerts Exist for Audit Configuration Changes",
"Checks": [
"logging_log_metric_filter_and_alert_for_audit_configuration_changes_enabled"
],
"Attributes": [
{
"Title": "Log Metric Filter and Alerts Exist for Audit Configuration Changes",
"Section": "1. IAM",
"SubSection": "1.3 Privilege Escalation Prevention",
"AttributeDescription": "Google Cloud Platform (GCP) services generate audit log entries in the Admin Activity and Data Access logs, providing visibility into who performed what action, where, and when within GCP projects. These logs capture key details such as the identity of the API caller, timestamp, source IP address, request parameters, and response data. Cloud audit logging records API calls made through the GCP Console, SDKs, command-line tools, and other GCP services, offering a comprehensive activity history for security monitoring and compliance.",
"AdditionalInformation": "Admin activity and data access logs play a critical role in security analysis, resource change tracking, and compliance auditing. Configuring metric filters and alerts for audit configuration changes ensures that audit logging remains in its recommended state, allowing organizations to detect and respond to unauthorized modifications while ensuring all project activities remain fully auditable at any time.",
"LevelOfRisk": 5
}
]
},
{
"Id": "1.3.6",
"Description": "Ensure That the Log Metric Filter and Alerts Exist for Custom Role Changes",
"Checks": [
"logging_log_metric_filter_and_alert_for_custom_role_changes_enabled"
],
"Attributes": [
{
"Title": "Log Metric Filter and Alerts Exist for Custom Role Changes",
"Section": "1. IAM",
"SubSection": "1.3 Privilege Escalation Prevention",
"AttributeDescription": "It is recommended to set up a metric filter and alarm to track changes to Identity and Access Management (IAM) roles, including their creation, deletion, and updates. Google Cloud IAM provides predefined roles for granular access control but also allows organizations to create custom roles to meet specific needs.",
"AdditionalInformation": "IAM role modifications can impact security by granting excessive privileges if not properly managed. Monitoring role creation, deletion, and updates helps detect potential misconfigurations or over-privileged roles early, ensuring that only intended access permissions are assigned within the organization.",
"LevelOfRisk": 4
}
]
},
{
"Id": "2.1.1",
"Description": "Ensure That the Default Network Does Not Exist in a Project ",
"Checks": [
"compute_network_default_in_use"
],
"Attributes": [
{
"Title": "Default Network Does Not Exist in a Project ",
"Section": "2. Attack Surface",
"SubSection": "2.1 Network",
"AttributeDescription": "A project should not have a default network to prevent the use of preconfigured and potentially insecure network settings.",
"AdditionalInformation": "The default network automatically creates permissive firewall rules, including unrestricted internal traffic, SSH, RDP, and ICMP access, which increases the risk of unauthorized access. Additionally, it is an auto mode network, limiting flexibility in subnet configuration and restricting the use of Cloud VPN or VPC Network Peering. Organizations should create a custom network tailored to their security and networking needs and remove the default network to minimize exposure.",
"LevelOfRisk": 5
}
]
},
{
"Id": "2.1.2",
"Description": "Ensure Legacy Networks Do Not Exist for Older Projects",
"Checks": [
"compute_network_not_legacy"
],
"Attributes": [
{
"Title": "Legacy Networks Do Not Exist for Older Projects",
"Section": "2. Attack Surface",
"SubSection": "2.1 Network",
"AttributeDescription": "Projects should not have a legacy network configured to prevent the use of outdated and inflexible networking models. While new projects can no longer create legacy networks, older projects should be checked to ensure they are not still using them.",
"AdditionalInformation": "Legacy networks use a single global IPv4 prefix and a single gateway IP for the entire network, lacking subnetting capabilities. This design limits flexibility, prevents migration to auto or custom subnet networks, and can create performance bottlenecks or single points of failure for high-traffic workloads. Removing legacy networks and transitioning to modern networking models improves scalability, security, and resilience.",
"LevelOfRisk": 1
}
]
},
{
"Id": "2.1.4",
"Description": "Ensure That SSH Access Is Restricted From the Internet",
"Checks": [
"compute_firewall_ssh_access_from_the_internet_allowed"
],
"Attributes": [
{
"Title": "SSH Access Is Restricted From the Internet",
"Section": "2. Attack Surface",
"SubSection": "2.1 Network",
"AttributeDescription": "GCP Firewall Rules control ingress and egress traffic within a VPC Network. These rules define traffic conditions such as ports, protocols, and source/destination IPs. Firewall rules operate at the VPC level and cannot be shared across networks. Only IPv4 addresses are supported, and it is crucial to restrict generic (0.0.0.0/0) incoming traffic, particularly for SSH on Port 22, to prevent unauthorized access.",
"AdditionalInformation": "Firewall rules regulate traffic flow between instances and external networks. Allowing unrestricted inbound SSH access (0.0.0.0/0 on port 22) increases security risks by exposing instances to unauthorized access and brute-force attacks. To minimize threats, internet-facing access should be limited by specifying granular IP ranges and enforcing least privilege access.",
"LevelOfRisk": 5
}
]
},
{
"Id": "2.1.5",
"Description": "Ensure That RDP Access Is Restricted From the Internet",
"Checks": [
"compute_firewall_rdp_access_from_the_internet_allowed"
],
"Attributes": [
{
"Title": "RDP Access Is Restricted From the Internet",
"Section": "2. Attack Surface",
"SubSection": "2.1 Network",
"AttributeDescription": "GCP Firewall Rules control incoming (ingress) and outgoing (egress) traffic within a VPC Network. Each rule specifies traffic conditions, including ports, protocols, and source/destination IPs. These rules operate at the VPC level, cannot be shared across networks, and support only IPv4 addresses. To enhance security, unrestricted RDP access (0.0.0.0/0 on port 3389) should be avoided to prevent unauthorized remote connections.",
"AdditionalInformation": "Firewall rules regulate traffic flow between instances and external networks. Allowing unrestricted RDP access from the Internet exposes virtual machines (VMs) to unauthorized access and brute-force attacks. To mitigate risks, internet-facing access should be restricted by enforcing least privilege access, defining specific IP ranges, and implementing secure remote access solutions such as Bastion hosts or VPNs.",
"LevelOfRisk": 5
}
]
},
{
"Id": "2.2.1",
"Description": "Ensure That Cloud Storage Bucket Is Not Anonymously or Publicly Accessible",
"Checks": [
"cloudstorage_bucket_public_access"
],
"Attributes": [
{
"Title": "Cloud Storage Bucket Is Not Anonymously or Publicly Accessible",
"Section": "2. Attack Surface",
"SubSection": "2.2 Storage",
"AttributeDescription": "IAM policies on Cloud Storage buckets should not allow anonymous or public access to prevent unauthorized data exposure.",
"AdditionalInformation": "Granting public or anonymous access allows anyone to access the buckets contents, posing a security risk, especially if sensitive data is stored. Restricting access ensures that only authorized users can interact with the bucket, reducing the risk of data breaches.",
"LevelOfRisk": 5
}
]
},
{
"Id": "2.2.2",
"Description": "Ensure 'user Connections' Database Flag for Cloud Sql Sql Server Instance Is Set to a Non-limiting Value",
"Checks": [
"cloudsql_instance_sqlserver_user_connections_flag"
],
"Attributes": [
{
"Title": "user Connections Database Flag for Cloud Sql Sql Server Instance Is Set to a Non-limiting Value",
"Section": "2. Attack Surface",
"SubSection": "2.2 Storage",
"AttributeDescription": "Verify the user connection limits for Cloud SQL SQL Server instances to ensure they are not unnecessarily restricting the number of simultaneous connections.",
"AdditionalInformation": "The user connections setting controls the maximum number of concurrent user connections allowed on an SQL Server instance. By default, SQL Server dynamically adjusts the number of connections as needed, up to a maximum of 32,767. Setting an artificial limit may prevent new connections from being established, leading to potential data loss or service outages. It is recommended to review and adjust this setting as necessary to avoid disruptions.",
"LevelOfRisk": 2
}
]
},
{
"Id": "2.2.3",
"Description": "Ensure 'remote access' database flag for Cloud SQL SQL Server instance is set to 'off'",
"Checks": [
"cloudsql_instance_sqlserver_remote_access_flag"
],
"Attributes": [
{
"Title": "remote access database flag for Cloud SQL SQL Server instance is set to off",
"Section": "2. Attack Surface",
"SubSection": "2.2 Storage",
"AttributeDescription": "Disable the remote access database flag for Cloud SQL SQL Server instances to prevent execution of stored procedures from remote servers.",
"AdditionalInformation": "The remote access option allows stored procedures to be executed from or on remote SQL Server instances. By default, this setting is enabled, which could be exploited for unauthorized query execution or Denial-of-Service (DoS) attacks by offloading processing to a target server. Disabling remote access enhances security by restricting stored procedure execution to the local server, reducing potential attack vectors. This recommendation applies to SQL Server database instances.",
"LevelOfRisk": 2
}
]
},
{
"Id": "2.2.5",
"Description": "Ensure That Cloud SQL Database Instances Do Not Implicitly Whitelist All Public IP Addresses",
"Checks": [
"cloudsql_instance_public_access"
],
"Attributes": [
{
"Title": "Cloud SQL Database Instances Do Not Implicitly Whitelist All Public IP Addresses",
"Section": "2. Attack Surface",
"SubSection": "2.2 Storage",
"AttributeDescription": "Restrict database server access to only trusted networks and IP addresses, preventing connections from public IPs.",
"AdditionalInformation": "Allowing unrestricted access to a database server increases the risk of unauthorized access and attacks. To minimize the attack surface, only trusted and necessary IP addresses should be whitelisted. Authorized networks should not be set to 0.0.0.0/0, which permits connections from anywhere. This control applies specifically to instances with public IP addresses.",
"LevelOfRisk": 5
}
]
},
{
"Id": "2.2.6",
"Description": "Ensure That Cloud SQL Database Instances Do Not Have Public IPs",
"Checks": [
"cloudsql_instance_public_access"
],
"Attributes": [
{
"Title": "Cloud SQL Database Instances Do Not Have Public IPs",
"Section": "2. Attack Surface",
"SubSection": "2.2 Storage",
"AttributeDescription": "Configure Second Generation Cloud SQL instances to use private IPs instead of public IPs.",
"AdditionalInformation": "Using private IPs for Cloud SQL databases enhances security by reducing exposure to external threats. It also improves network performance and lowers latency by keeping traffic within the internal network, minimizing the attack surface of the database.",
"LevelOfRisk": 1
}
]
},
{
"Id": "2.2.7",
"Description": "Ensure That BigQuery Datasets Are Not Anonymously or Publicly Accessible",
"Checks": [
"bigquery_dataset_public_access"
],
"Attributes": [
{
"Title": "BigQuery Datasets Are Not Anonymously or Publicly Accessible",
"Section": "2. Attack Surface",
"SubSection": "2.2 Storage",
"AttributeDescription": "Ensure that IAM policies on BigQuery datasets do not allow anonymous or public access.",
"AdditionalInformation": "Granting access to allUsers or allAuthenticatedUsers permits unrestricted access to the dataset, which can lead to unauthorized data exposure. To protect sensitive information, public or anonymous access should be strictly prohibited.",
"LevelOfRisk": 5
}
]
},
{
"Id": "2.3.3",
"Description": "Ensure That Instances Are Not Configured To Use the Default Service Account With Full Access to All Cloud APIs",
"Checks": [
"compute_instance_default_service_account_in_use_with_full_api_access"
],
"Attributes": [
{
"Title": "Instances Are Not Configured To Use the Default Service Account With Full Access to All Cloud APIs",
"Section": "2. Attack Surface",
"SubSection": "2.3 Application",
"AttributeDescription": "To enforce the principle of least privilege and prevent potential privilege escalation, instances should not be assigned the Compute Engine default service account with the scope Allow full access to all Cloud APIs.",
"AdditionalInformation": "Google Compute Engine provides a default service account for instances to access necessary cloud services. This default service account has the Project Editor role, granting broad permissions over most cloud services except billing. When assigned to an instance, it can operate in three modes: 1.Allow default access Grants minimal required permissions (recommended). 2.Allow full access to all Cloud APIs Grants excessive access to all cloud services (not recommended). 3.Set access for each API Allows administrators to specify required APIs (preferred for least privilege). Assigning an instance the Compute Engine default service account with full access to all APIs can expose cloud operations to unauthorized users based on IAM roles. To reduce security risks, instances should use custom service accounts with minimal required permissions.",
"LevelOfRisk": 4
}
]
},
{
"Id": "2.3.4",
"Description": "Ensure Block Project-Wide SSH Keys Is Enabled for VM Instances ",
"Checks": [
"compute_instance_block_project_wide_ssh_keys_disabled"
],
"Attributes": [
{
"Title": "Block Project-Wide SSH Keys Is Enabled for VM Instances ",
"Section": "2. Attack Surface",
"SubSection": "2.3 Application",
"AttributeDescription": "Instances should use instance-specific SSH keys instead of project-wide SSH keys to enhance security and reduce the risk of unauthorized access.",
"AdditionalInformation": "Project-wide SSH keys are stored in Compute Project metadata and can be used to access all instances within a project. While this simplifies SSH key management, it also increases security risksif a project-wide SSH key is compromised, all instances in the project could be affected. Using instance-specific SSH keys provides better security by limiting access to individual instances, reducing the attack surface in case of key compromise.",
"LevelOfRisk": 5
}
]
},
{
"Id": "2.3.5",
"Description": "Ensure Enable Connecting to Serial Ports Is Not Enabled for VM Instance",
"Checks": [
"compute_instance_serial_ports_in_use"
],
"Attributes": [
{
"Title": "Enable Connecting to Serial Ports Is Not Enabled for VM Instance",
"Section": "2. Attack Surface",
"SubSection": "2.3 Application",
"AttributeDescription": "The interactive serial console allows direct access to a virtual machines serial ports, similar to using a terminal window. When enabled, it allows connections from any IP address, creating a potential security risk. It is recommended to disable interactive serial console support.",
"AdditionalInformation": "A virtual machine instance has four virtual serial ports, often used by the operating system, BIOS, or other system-level entities for input and output. The first serial port (serial port 1) is commonly referred to as the serial console. Unlike SSH, the interactive serial console does not support IP-based access restrictions, meaning anyone with the correct SSH key, username, project ID, zone, and instance name could gain access. This exposes the instance to unauthorized access. To mitigate this risk, interactive serial console support should be disabled unless absolutely necessary.",
"LevelOfRisk": 5
}
]
},
{
"Id": "2.3.6",
"Description": "Ensure That IP Forwarding Is Not Enabled on Instances",
"Checks": [
"compute_instance_ip_forwarding_is_enabled"
],
"Attributes": [
{
"Title": "IP Forwarding Is Not Enabled on Instances",
"Section": "2. Attack Surface",
"SubSection": "2.3 Application",
"AttributeDescription": "Google Compute Engine instances should not forward data packets unless explicitly required for routing purposes. By default, an instance cannot forward packets unless the source IP matches the instances IP address. Similarly, GCP wont deliver packets if the destination IP does not match the instance. To prevent unauthorized data forwarding, it is recommended to disable IP forwarding.",
"AdditionalInformation": "When IP forwarding is enabled (canIpForward field), an instance can send and receive packets with non-matching source or destination IPs, effectively allowing it to act as a network router. This can lead to data loss, information disclosure, or unauthorized traffic routing. To maintain security and prevent misuse, IP forwarding should be disabled unless explicitly required for network routing configurations.",
"LevelOfRisk": 2
}
]
},
{
"Id": "2.3.7",
"Description": "Ensure Compute Instances Are Launched With Shielded VM Enabled",
"Checks": [
"compute_instance_shielded_vm_enabled"
],
"Attributes": [
{
"Title": "Compute Instances Are Launched With Shielded VM Enabled",
"Section": "2. Attack Surface",
"SubSection": "2.3 Application",
"AttributeDescription": "Shielded VMs are hardened virtual machines on Google Cloud Platform (GCP) designed to protect against rootkits, bootkits, and other low-level attacks. They ensure verifiable integrity using Secure Boot, virtual Trusted Platform Module (vTPM)-enabled Measured Boot, and integrity monitoring.",
"AdditionalInformation": "Shielded VMs use signed and verified firmware from Googles Certificate Authority to establish a root of trust. Secure Boot ensures only authentic software runs by verifying digital signatures, preventing unauthorized modifications. Integrity monitoring helps detect unexpected changes in the VMs boot process, while vTPM-enabled Measured Boot provides a baseline to compare against future boots. Enabling Shielded VMs enhances security by protecting against malware, unauthorized firmware changes, and persistent threats.",
"LevelOfRisk": 4
}
]
},
{
"Id": "2.3.8",
"Description": "Ensure That Compute Instances Do Not Have Public IP Addresses",
"Checks": [
"compute_instance_public_ip"
],
"Attributes": [
{
"Title": "That Compute Instances Do Not Have Public IP Addresses",
"Section": "2. Attack Surface",
"SubSection": "2.3 Application",
"AttributeDescription": "Compute instances should not be assigned external IP addresses to minimize exposure to the internet and reduce security risks.",
"AdditionalInformation": "Public IP addresses increase the attack surface of Compute instances, making them more vulnerable to threats. Instead, instances should be placed behind load balancers or use private networking to control access and reduce the risk of unauthorized exposure.",
"LevelOfRisk": 1
}
]
},
{
"Id": "3.1.1",
"Description": "Ensure That Sinks Are Configured for All Log Entries",
"Checks": [
"cloudstorage_bucket_log_retention_policy_lock"
],
"Attributes": [
{
"Title": "Sinks Are Configured for All Log Entries",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "It is recommended to create a log sink to export and store copies of all log entries. This enables log aggregation across multiple projects and allows integration with a Security Information and Event Management (SIEM) system for centralized monitoring.",
"AdditionalInformation": "Cloud Logging retains logs for a limited period. To ensure long-term storage and better security analysis, logs should be exported to a destination such as Cloud Storage, BigQuery, or Cloud Pub/Sub. A log sink allows you to: Aggregate logs from multiple projects, folders, or billing accounts. Extend log retention beyond Cloud Loggings default retention period. Send logs to a SIEM system for real-time monitoring and threat detection. To ensure all logs are captured and exported: 1.Create a sink without filters to capture all log entries. 2.Choose an appropriate destination (e.g., Cloud Storage for long-term storage, BigQuery for analysis, or Pub/Sub for real-time processing). 3.Apply logging at the organization level to cover all associated projects. Implementing log sinks enhances security visibility, forensic capabilities, and compliance adherence across cloud environments.",
"LevelOfRisk": 4
}
]
},
{
"Id": "3.1.2",
"Description": "Ensure That the Log Metric Filter and Alerts Exist for VPC Network Firewall Rule Changes",
"Checks": [
"logging_log_metric_filter_and_alert_for_vpc_firewall_rule_changes_enabled"
],
"Attributes": [
{
"Title": "Log Metric Filter and Alerts Exist for VPC Network Firewall Rule Changes",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "It is recommended to configure a metric filter and alarm to monitor Virtual Private Cloud (VPC) Network Firewall rule changes. Tracking modifications to firewall rules helps ensure that unauthorized or unintended changes do not compromise network security.",
"AdditionalInformation": "Firewall rules control ingress and egress traffic within a VPC. Monitoring create or update events provides visibility into network access changes and helps quickly detect potential security threats or misconfigurations, reducing the risk of unauthorized access.",
"LevelOfRisk": 5
}
]
},
{
"Id": "3.1.3",
"Description": "Ensure That the Log Metric Filter and Alerts Exist for VPC Network Route Changes",
"Checks": [
"logging_log_metric_filter_and_alert_for_vpc_network_route_changes_enabled"
],
"Attributes": [
{
"Title": "Log Metric Filter and Alerts Exist for VPC Network Route Changes",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "It is recommended to configure a metric filter and alarm to monitor Virtual Private Cloud (VPC) network route changes. Keeping track of modifications ensures that unauthorized or unintended changes do not disrupt expected network traffic flow.",
"AdditionalInformation": "GCP routes define how network traffic is directed between VM instances and external destinations. Monitoring route table changes helps ensure that traffic follows the intended path, preventing misconfigurations or malicious alterations that could lead to data exposure or connectivity issues.",
"LevelOfRisk": 5
}
]
},
{
"Id": "3.1.4",
"Description": "Ensure That the Log Metric Filter and Alerts Exist for VPC Network Changes",
"Checks": [
"logging_log_metric_filter_and_alert_for_vpc_network_changes_enabled"
],
"Attributes": [
{
"Title": "Log Metric Filter and Alerts Exist for VPC Network Changes",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "It is recommended to configure a metric filter and alarm to monitor Virtual Private Cloud (VPC) network changes. This helps track modifications to VPC configurations and peer connections, ensuring that network traffic remains secure and follows the intended paths.",
"AdditionalInformation": "It is recommended to configure a metric filter and alarm to monitor Virtual Private Cloud (VPC) network changes. This helps track modifications to VPC configurations and peer connections, ensuring that network traffic remains secure and follows the intended paths.",
"LevelOfRisk": 4
}
]
},
{
"Id": "3.1.5",
"Description": "Ensure That the Log Metric Filter and Alerts Exist for Cloud Storage IAM Permission Changes",
"Checks": [
"logging_log_metric_filter_and_alert_for_bucket_permission_changes_enabled"
],
"Attributes": [
{
"Title": "Log Metric Filter and Alerts Exist for Cloud Storage IAM Permission Changes",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "It is recommended to set up a metric filter and alarm to monitor Cloud Storage Bucket IAM changes. This ensures that any modifications to bucket permissions are tracked and reviewed in a timely manner.",
"AdditionalInformation": "Monitoring changes to Cloud Storage IAM policies helps detect and correct unauthorized access or overly permissive configurations. This reduces the risk of data exposure or breaches by ensuring that sensitive storage buckets and their contents remain properly secured.",
"LevelOfRisk": 5
}
]
},
{
"Id": "3.1.6",
"Description": "Ensure That the Log Metric Filter and Alerts Exist for SQL Instance Configuration Changes",
"Checks": [
"logging_log_metric_filter_and_alert_for_sql_instance_configuration_changes_enabled"
],
"Attributes": [
{
"Title": "Log Metric Filter and Alerts Exist for SQL Instance Configuration Changes",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "It is recommended to configure a metric filter and alarm to track SQL instance configuration changes. This helps in detecting and addressing misconfigurations that may impact security, availability, and compliance.",
"AdditionalInformation": "Monitoring SQL instance configuration changes ensures that critical security settings remain properly configured. Misconfigurations, such as disabling auto backups, allowing untrusted networks, or modifying high availability settings, can lead to data loss, security vulnerabilities, or operational disruptions. Early detection of such changes helps maintain a secure and resilient SQL environment.",
"LevelOfRisk": 5
}
]
},
{
"Id": "3.1.7",
"Description": "Ensure Logging is enabled for HTTP(S) Load Balancer ",
"Checks": [
"compute_loadbalancer_logging_enabled"
],
"Attributes": [
{
"Title": "Logging is enabled for HTTP(S) Load Balancer ",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "Enabling logging on an HTTPS Load Balancer captures all network traffic and its destination, providing visibility into requests made to your web applications.",
"AdditionalInformation": "Logging HTTPS network traffic helps monitor access patterns, troubleshoot issues, and enhance security by detecting suspicious activity or unauthorized access attempts.",
"LevelOfRisk": 5
}
]
},
{
"Id": "3.1.8",
"Description": "Ensure that VPC Flow Logs is Enabled for Every Subnet in a VPC Network",
"Checks": [
"compute_subnet_flow_logs_enabled"
],
"Attributes": [
{
"Title": "VPC Flow Logs is Enabled for Every Subnet in a VPC Network",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "Flow Logs capture and record IP traffic to and from network interfaces within VPC subnets. These logs are stored in Stackdriver Logging, allowing users to analyze traffic patterns, detect anomalies, and optimize network performance. It is recommended to enable Flow Logs for all critical VPC subnets to enhance network visibility and security.",
"AdditionalInformation": "VPC Flow Logs provide detailed insights into inbound and outbound traffic for virtual machines (VMs), whether they communicate with other VMs, on-premises data centers, Google services, or external networks. Enabling Flow Logs supports: Network monitoring Traffic analysis and cost optimization Incident investigation and forensics Real-time security threat detection For effective monitoring, Flow Logs should be configured to capture all traffic, use granular logging intervals, avoid log filtering, and include metadata for detailed investigations. Note that subnets reserved for internal HTTP(S) load balancing do not support Flow Logs.",
"LevelOfRisk": 5
}
]
},
{
"Id": "3.1.9",
"Description": "Ensure That the Log_connections Database Flag for Cloud SQL PostgreSQL Instance Is Set to On",
"Checks": [
"cloudsql_instance_postgres_log_connections_flag"
],
"Attributes": [
{
"Title": "Log_connections Database Flag for Cloud SQL PostgreSQL Instance Is Set to On",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "The log_connections setting should be enabled to log all attempted connections to the PostgreSQL server, including successful client authentication.",
"AdditionalInformation": "By default, PostgreSQL does not log connection attempts, making it harder to detect unauthorized access. Enabling log_connections provides visibility into all connection attempts, aiding in troubleshooting and identifying unusual or suspicious access patterns. This is particularly useful for security monitoring and incident response.",
"LevelOfRisk": 5
}
]
},
{
"Id": "3.1.10",
"Description": "Ensure That the Log_disconnections Database Flag for Cloud SQL PostgreSQL Instance Is Set to On",
"Checks": [
"cloudsql_instance_postgres_log_disconnections_flag"
],
"Attributes": [
{
"Title": "Log_disconnections Database Flag for Cloud SQL PostgreSQL Instance Is Set to On",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "The log_disconnections setting should be enabled to log the end of each PostgreSQL session, including session duration.",
"AdditionalInformation": "By default, PostgreSQL does not log session termination details, making it difficult to track session activity. Enabling log_disconnections helps monitor session durations and detect unusual activity. Combined with log_connections, it provides a complete audit trail of user access, aiding in troubleshooting and security monitoring.",
"LevelOfRisk": 4
}
]
},
{
"Id": "3.1.11",
"Description": "Ensure Log_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set Appropriately",
"Checks": [
"cloudsql_instance_postgres_log_statement_flag"
],
"Attributes": [
{
"Title": "Log_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set Appropriately",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "The log_statement setting in PostgreSQL determines which SQL statements are logged. Acceptable values include none, ddl, mod, and all. A recommended setting is ddl, which logs all data definition statements unless otherwise specified by the organizations logging policy.",
"AdditionalInformation": "Proper SQL statement logging is crucial for auditing and forensic analysis. If too many statements are logged, it can become difficult to extract relevant information; if too few are logged, critical details may be missing. Setting log_statement to an appropriate value, such as ddl, ensures a balance between comprehensive auditing and log manageability, aiding in database security and compliance.",
"LevelOfRisk": 5
}
]
},
{
"Id": "3.1.12",
"Description": "Ensure that the Log_min_messages Flag for a Cloud SQL PostgreSQL Instance is set at minimum to 'Warning'",
"Checks": [
"cloudsql_instance_postgres_log_min_messages_flag"
],
"Attributes": [
{
"Title": "Log_min_messages Flag for a Cloud SQL PostgreSQL Instance is set at minimum to Warning",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "The log_min_messages setting in PostgreSQL defines the minimum severity level for messages to be logged as errors. Accepted values range from DEBUG5 (least severe) to PANIC (most severe). Best practice is to set this value to ERROR, ensuring that only critical issues are logged unless an organizations policy requires a different threshold.",
"AdditionalInformation": "Proper logging is essential for troubleshooting and forensic analysis. If log_min_messages is not configured correctly, important error messages may be missed or unnecessary logs may clutter records. Setting this parameter to ERROR helps maintain a balance between capturing relevant issues and avoiding excessive log noise, improving system monitoring and security.",
"LevelOfRisk": 5
}
]
},
{
"Id": "3.1.13",
"Description": "Ensure Log_min_error_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set to Error or Stricter",
"Checks": [
"cloudsql_instance_postgres_log_min_error_statement_flag"
],
"Attributes": [
{
"Title": "Log_min_error_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set to Error or Stricter",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "The log_min_error_statement setting in PostgreSQL defines the minimum severity level for statements to be logged as errors. Valid values range from DEBUG5 (least severe) to PANIC (most severe). It is recommended to set this value to ERROR or stricter to ensure only relevant error statements are logged.",
"AdditionalInformation": "Proper logging aids in troubleshooting and forensic analysis. If log_min_error_statement is set too leniently, excessive log entries may make it difficult to identify actual errors. Conversely, if it is set too strictly, important errors may be missed. Setting this parameter to ERROR or higher ensures that significant issues are recorded while avoiding unnecessary log clutter.",
"LevelOfRisk": 3
}
]
},
{
"Id": "3.1.14",
"Description": "Ensure That the Log_min_duration_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set to '-1' (Disabled) ",
"Checks": [
"cloudsql_instance_postgres_log_min_duration_statement_flag"
],
"Attributes": [
{
"Title": "Log_min_duration_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set to -1 (Disabled) ",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "The log_min_duration_statement setting in PostgreSQL determines the minimum execution time (in milliseconds) required for a statement to be logged. It is recommended to disable this setting by setting its value to -1.",
"AdditionalInformation": "Logging SQL statements may expose sensitive information, which could lead to security risks if recorded in logs. Disabling this setting ensures that confidential data is not inadvertently captured. This recommendation applies to PostgreSQL database instances.",
"LevelOfRisk": 3
}
]
},
{
"Id": "3.1.15",
"Description": "Ensure That 'cloudsql.enable_pgaudit' Database Flag for each Cloud Sql Postgresql Instance Is Set to 'on' For Centralized Logging",
"Checks": [
"cloudsql_instance_postgres_enable_pgaudit_flag"
],
"Attributes": [
{
"Title": "cloudsql.enable_pgaudit Database Flag for each Cloud Sql Postgresql Instance Is Set to on For Centralized Logging",
"Section": "3. Logging and Monitoring",
"SubSection": "3.1 Logging",
"AttributeDescription": "Ensure that the cloudsql.enable_pgaudit database flag is set to on for Cloud SQL PostgreSQL instances to enable centralized logging and auditing.",
"AdditionalInformation": "Enabling the pgaudit extension provides detailed session and object-level logging, which helps organizations comply with security standards such as government, financial, and ISO regulations. This logging capability enhances threat detection by monitoring security events on the database instance. Additionally, enabling this flag allows logs to be sent to Google Logs Explorer for centralized access and monitoring. This recommendation applies specifically to PostgreSQL database instances.",
"LevelOfRisk": 4
}
]
},
{
"Id": "3.2.1",
"Description": "Ensure That Retention Policies on Cloud Storage Buckets Used for Exporting Logs Are Configured Using Bucket Lock",
"Checks": [
"cloudstorage_bucket_log_retention_policy_lock"
],
"Attributes": [
{
"Title": "Retention Policies on Cloud Storage Buckets Used for Exporting Logs Are Configured Using Bucket Lock ",
"Section": "3. Logging and Monitoring",
"SubSection": "3.2 Retention",
"AttributeDescription": "Enabling retention policies on log storage buckets prevents logs from being overwritten or accidentally deleted. It is recommended to configure retention policies and enable Bucket Lock for all storage buckets used as log sinks.",
"AdditionalInformation": "Cloud Logging allows logs to be exported to storage buckets through sinks. Without a retention policy, logs can be altered or deleted, making it difficult to perform security investigations or comply with audit requirements. To ensure logs remain intact for forensics and security analysis: 1.Set a retention policy on log storage buckets to prevent early deletion. 2.Enable Bucket Lock to make the policy immutable, ensuring logs cannot be altered even by privileged users. 3.Apply appropriate access controls to protect logs from unauthorized access. By implementing retention policies and Bucket Lock, organizations preserve critical security logs, prevent attackers from covering their tracks, and enhance compliance with security regulations.",
"LevelOfRisk": 5
}
]
},
{
"Id": "3.3.1",
"Description": "Ensure Cloud Asset Inventory Is Enabled",
"Checks": [
"iam_cloud_asset_inventory_enabled"
],
"Attributes": [
{
"Title": "Cloud Asset Inventory Enabled",
"Section": "3. Logging and Monitoring",
"SubSection": "3.3 Monitoring",
"AttributeDescription": "Google Cloud Asset Inventory provides a historical view of GCP resources and IAM policies using a time-series database. It captures metadata on cloud resources, policy configurations, and runtime data. Enabling Cloud Asset Inventory allows for efficient searching and exporting of asset data.",
"AdditionalInformation": "Cloud Asset Inventory enhances security analysis, resource change tracking, and compliance auditing by maintaining a detailed history of GCP resources and their configurations. Enabling it across all GCP projects ensures visibility into changes, helping organizations detect misconfigurations, track policy changes, and strengthen security posture.",
"LevelOfRisk": 4
}
]
},
{
"Id": "3.3.2",
"Description": "Ensure 'Access Approval' is 'Enabled'",
"Checks": [
"iam_account_access_approval_enabled"
],
"Attributes": [
{
"Title": "Access Aproval Enabled",
"Section": "3. Logging and Monitoring",
"SubSection": "3.3 Monitoring",
"AttributeDescription": "GCP Access Approval allows organizations to require explicit approval before Google support personnel can access their projects. Administrators can assign security roles in IAM to specific users who can review and approve these requests. Notifications of access requests, including the requesting Google employees details, are sent via email or Pub/Sub messages, providing transparency and control.",
"AdditionalInformation": "Managing who accesses your organizations data is critical for information security. While Google support may require access for troubleshooting, Access Approval ensures that access is only granted when explicitly authorized. This feature adds an additional layer of security and logging, ensuring that only approved Google personnel can access sensitive information.",
"LevelOfRisk": 5
}
]
},
{
"Id": "4.1.1",
"Description": "Ensure That DNSSEC Is Enabled for Cloud DNS ",
"Checks": [
"dns_dnssec_disabled"
],
"Attributes": [
{
"Title": "DNSSEC Is Enabled for Cloud DNS ",
"Section": "4. Encryption",
"SubSection": "4.1 In-Transit",
"AttributeDescription": "Cloud DNS provides a scalable and reliable domain name system (DNS) service. Domain Name System Security Extensions (DNSSEC) enhance DNS security by protecting domains against DNS hijacking, man-in-the-middle attacks, and other threats.",
"AdditionalInformation": "DNSSEC cryptographically signs DNS records, ensuring the integrity and authenticity of DNS responses. Without DNSSEC, attackers can manipulate DNS lookups, redirecting users to malicious websites through DNS hijacking or spoofing attacks. Enabling DNSSEC helps prevent unauthorized modifications to DNS records, reducing the risk of phishing, malware distribution, and other cyber threats.",
"LevelOfRisk": 1
}
]
},
{
"Id": "4.1.2",
"Description": "Ensure That the Cloud SQL Database Instance Requires All Incoming Connections To Use SSL",
"Checks": [
"cloudsql_instance_ssl_connections"
],
"Attributes": [
{
"Title": "Cloud SQL Database Instance Requires All Incoming Connections To Use SSL",
"Section": "4. Encryption",
"SubSection": "4.1 In-Transit",
"AttributeDescription": "Require all incoming connections to SQL database instances to use SSL encryption.",
"AdditionalInformation": "Unencrypted SQL database connections are vulnerable to man-in-the-middle (MITM) attacks, which can expose sensitive data such as credentials, queries, and results. Enforcing SSL ensures secure communication by encrypting data in transit, protecting against interception and unauthorized access. This recommendation applies to PostgreSQL, MySQL (Generation 1 and 2), and SQL Server 2017 instances.",
"LevelOfRisk": 5
}
]
},
{
"Id": "4.1.3",
"Description": "Ensure That RSASHA1 Is Not Used for the Key-Signing Key in Cloud DNS DNSSEC",
"Checks": [
"dns_rsasha1_in_use_to_key_sign_in_dnssec"
],
"Attributes": [
{
"Title": "RSASHA1 Is Not Used for the Key-Signing Key in Cloud DNS DNSSEC",
"Section": "4. Encryption",
"SubSection": "4.1 In-Transit",
"AttributeDescription": "DNSSEC (Domain Name System Security Extensions) relies on cryptographic algorithms to ensure the integrity and authenticity of DNS responses. It is important to use strong and recommended algorithms for key signing to maintain robust security. SHA-1 is deprecated and requires explicit approval from Google if used.",
"AdditionalInformation": "DNSSEC signing algorithms play a critical role in securing DNS transactions. Using weak or outdated algorithms can expose DNS infrastructure to spoofing, hijacking, and other attacks. Organizations should select recommended and secure algorithms when enabling DNSSEC to protect DNS records from unauthorized modifications. If adjustments to DNSSEC settings are required, DNSSEC must be disabled and re-enabled with the updated configurations.",
"LevelOfRisk": 5
}
]
},
{
"Id": "4.1.4",
"Description": "Ensure That RSASHA1 Is Not Used for the Zone-Signing Key in Cloud DNS DNSSEC",
"Checks": [
"dns_rsasha1_in_use_to_zone_sign_in_dnssec"
],
"Attributes": [
{
"Title": "RSASHA1 Is Not Used for the Zone-Signing Key in Cloud DNS DNSSEC",
"Section": "4. Encryption",
"SubSection": "4.1 In-Transit",
"AttributeDescription": "DNSSEC (Domain Name System Security Extensions) enhances DNS security by using cryptographic algorithms for zone signing and transaction security. It is essential to use strong and recommended algorithms for key signing. SHA-1 has been deprecated and requires Googles explicit approval and a support contract if used.",
"AdditionalInformation": "Using weak or outdated cryptographic algorithms compromises DNS integrity and exposes systems to threats like spoofing and hijacking. Organizations should ensure that DNSSEC settings use strong, recommended algorithms. If DNSSEC is already enabled and changes are needed, it must be disabled and re-enabled with updated configurations to apply the changes effectively.",
"LevelOfRisk": 2
}
]
},
{
"Id": "4.2.1",
"Description": "Ensure VM Disks for Critical VMs Are Encrypted With Customer-Supplied Encryption Keys (CSEK)",
"Checks": [
"compute_instance_encryption_with_csek_enabled"
],
"Attributes": [
{
"Title": "VM Disks for Critical VMs Are Encrypted With Customer-Supplied Encryption Keys (CSEK)",
"Section": "4. Encryption",
"SubSection": "4.2 At-Rest",
"AttributeDescription": "Customer-Supplied Encryption Keys (CSEK) is a feature available in Google Cloud Storage and Google Compute Engine, allowing users to supply their own encryption keys. When you provide your key, Google uses it to protect the Google-generated keys that are responsible for encrypting and decrypting your data. By default, Google Compute Engine encrypts all data at rest automatically, managing this encryption for you with no additional action required. However, if you wish to have full control over the encryption process, you can choose to supply your own encryption keys.",
"AdditionalInformation": "By default, Compute Engine automatically encrypts all data at rest, with the service managing the encryption without any further input required from you or your application. However, if you require complete control over encryption, you have the option to provide your own encryption keys to manage the encryption of instance disks.",
"LevelOfRisk": 5
}
]
},
{
"Id": "4.2.2",
"Description": "Ensure that Dataproc Cluster is encrypted using Customer-Managed Encryption Key",
"Checks": [
"dataproc_encrypted_with_cmks_disabled"
],
"Attributes": [
{
"Title": "Dataproc Cluster is encrypted using Customer-Managed Encryption Key",
"Section": "4. Encryption",
"SubSection": "4.2 At-Rest",
"AttributeDescription": "When using Dataproc, the data associated with clusters and jobs is stored on Persistent Disks (PDs) linked to the Compute Engine VMs in your cluster and in a Cloud Storage staging bucket. This data is encrypted using a Google-generated Data Encryption Key (DEK) and a Key Encryption Key (KEK). The Customer-Managed Encryption Keys (CMEK) feature allows you to create, use, and revoke the KEK, although Google still controls the DEK used to encrypt the data.",
"AdditionalInformation": "Dataproc cluster data is encrypted using Google-managed keys: the Data Encryption Key (DEK) and the Key Encryption Key (KEK). If you wish to have control over the encryption of your cluster data, you can use your own Customer-Managed Keys (CMKs). Cloud KMS Customer-Managed Keys can add an extra layer of security and are commonly used in environments with strict compliance and security requirements.",
"LevelOfRisk": 4
}
]
},
{
"Id": "4.2.3",
"Description": "Ensure BigQuery datasets are encrypted with Customer-Managed Keys (CMKs).",
"Checks": [
"bigquery_dataset_cmk_encryption"
],
"Attributes": [
{
"Title": "BigQuery datasets are encrypted with Customer-Managed Keys (CMKs).",
"Section": "4. Encryption",
"SubSection": "4.2 At-Rest",
"AttributeDescription": "Ensure that BigQuery datasets are encrypted using Customer-Managed Keys (CMKs) to gain more granular control over the data encryption and decryption process.",
"AdditionalInformation": "For enhanced control over encryption, Customer-Managed Encryption Keys (CMEK) can be implemented as a key management solution for BigQuery datasets.",
"LevelOfRisk": 3
}
]
},
{
"Id": "4.2.4",
"Description": "Ensure BigQuery tables are encrypted with Customer-Managed Keys (CMKs).",
"Checks": [
"bigquery_table_cmk_encryption"
],
"Attributes": [
{
"Title": "BigQuery tables are encrypted with Customer-Managed Keys (CMKs).",
"Section": "4. Encryption",
"SubSection": "4.2 At-Rest",
"AttributeDescription": "Ensure that BigQuery tables are encrypted using Customer-Managed Keys (CMKs) for more granular control over the data encryption and decryption process.",
"AdditionalInformation": "For greater control over encryption, Customer-Managed Encryption Keys (CMEK) can be utilized as the key management solution for BigQuery tables.",
"LevelOfRisk": 3
}
]
}
]
}

View File

@@ -183,6 +183,18 @@ class KISA_ISMSP_Requirement_Attribute(BaseModel):
NonComplianceCases: Optional[list[str]]
# Prowler ThreatScore Requirement Attribute
class Prowler_ThreatScore_Requirement_Attribute(BaseModel):
"""Prowler ThreatScore Requirement Attribute"""
Title: str
Section: str
SubSection: str
AttributeDescription: str
AdditionalInformation: str
LevelOfRisk: int
# Base Compliance Model
# TODO: move this to compliance folder
class Compliance_Requirement(BaseModel):
@@ -198,6 +210,7 @@ class Compliance_Requirement(BaseModel):
ISO27001_2013_Requirement_Attribute,
AWS_Well_Architected_Requirement_Attribute,
KISA_ISMSP_Requirement_Attribute,
Prowler_ThreatScore_Requirement_Attribute,
# Generic_Compliance_Requirement_Attribute must be the last one since it is the fallback for generic compliance framework
Generic_Compliance_Requirement_Attribute,
]

View File

@@ -11,6 +11,9 @@ from prowler.lib.outputs.compliance.kisa_ismsp.kisa_ismsp import get_kisa_ismsp_
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack import (
get_mitre_attack_table,
)
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore import (
get_prowler_threatscore_table,
)
def display_compliance_table(
@@ -72,6 +75,15 @@ def display_compliance_table(
output_directory,
compliance_overview,
)
elif "threatscore_" in compliance_framework:
get_prowler_threatscore_table(
findings,
bulk_checks_metadata,
compliance_framework,
output_filename,
output_directory,
compliance_overview,
)
else:
get_generic_compliance_table(
findings,

View File

@@ -0,0 +1,81 @@
from typing import Optional
from pydantic import BaseModel
class ProwlerThreatScoreAWSModel(BaseModel):
"""
ProwlerThreatScoreAWSModel generates a finding's output in AWS Prowler ThreatScore Compliance format.
"""
Provider: str
Description: str
AccountId: str
Region: str
AssessmentDate: str
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Title: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str]
Requirements_Attributes_AttributeDescription: str
Requirements_Attributes_AdditionalInformation: str
Requirements_Attributes_LevelOfRisk: int
Status: str
StatusExtended: str
ResourceId: str
ResourceName: str
CheckId: str
Muted: bool
class ProwlerThreatScoreAzureModel(BaseModel):
"""
ProwlerThreatScoreAzureModel generates a finding's output in Azure Prowler ThreatScore Compliance format.
"""
Provider: str
Description: str
SubscriptionId: str
Location: str
AssessmentDate: str
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Title: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str]
Requirements_Attributes_AttributeDescription: str
Requirements_Attributes_AdditionalInformation: str
Requirements_Attributes_LevelOfRisk: int
Status: str
StatusExtended: str
ResourceId: str
ResourceName: str
CheckId: str
Muted: bool
class ProwlerThreatScoreGCPModel(BaseModel):
"""
ProwlerThreatScoreGCPModel generates a finding's output in GCP Prowler ThreatScore Compliance format.
"""
Provider: str
Description: str
ProjectId: str
Location: str
AssessmentDate: str
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Title: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str]
Requirements_Attributes_AttributeDescription: str
Requirements_Attributes_AdditionalInformation: str
Requirements_Attributes_LevelOfRisk: int
Status: str
StatusExtended: str
ResourceId: str
ResourceName: str
CheckId: str
Muted: bool

View File

@@ -0,0 +1,121 @@
from colorama import Fore, Style
from tabulate import tabulate
from prowler.config.config import orange_color
def get_prowler_threatscore_table(
findings: list,
bulk_checks_metadata: dict,
compliance_framework: str,
output_filename: str,
output_directory: str,
compliance_overview: bool,
):
pillar_table = {
"Provider": [],
"Pillar": [],
"Status": [],
"Score": [],
"Muted": [],
}
pass_count = []
fail_count = []
muted_count = []
pillars = {}
score_per_pillar = {}
number_findings_per_pillar = {}
for index, finding in enumerate(findings):
check = bulk_checks_metadata[finding.check_metadata.CheckID]
check_compliances = check.Compliance
for compliance in check_compliances:
if compliance.Framework == "ProwlerThreatScore":
for requirement in compliance.Requirements:
for attribute in requirement.Attributes:
pillar = attribute.Section
if pillar not in score_per_pillar.keys():
score_per_pillar[pillar] = 0
number_findings_per_pillar[pillar] = 0
if finding.status == "FAIL" and not finding.muted:
score_per_pillar[pillar] += attribute.LevelOfRisk
number_findings_per_pillar[pillar] += 1
if pillar not in pillars:
pillars[pillar] = {"FAIL": 0, "PASS": 0, "Muted": 0}
if finding.muted:
if index not in muted_count:
muted_count.append(index)
pillars[pillar]["Muted"] += 1
else:
if finding.status == "FAIL" and index not in fail_count:
fail_count.append(index)
pillars[pillar]["FAIL"] += 1
elif finding.status == "PASS" and index not in pass_count:
pass_count.append(index)
pillars[pillar]["PASS"] += 1
pillars = dict(sorted(pillars.items()))
for pillar in pillars:
pillar_table["Provider"].append(compliance.Provider)
pillar_table["Pillar"].append(pillar)
if number_findings_per_pillar[pillar] == 0:
pillar_table["Score"].append(
f"{Style.BRIGHT}{Fore.GREEN}0{Style.RESET_ALL}"
)
else:
pillar_table["Score"].append(
f"{Style.BRIGHT}{Fore.RED}{score_per_pillar[pillar] / number_findings_per_pillar[pillar]:.2f}/5{Style.RESET_ALL}"
)
if pillars[pillar]["FAIL"] > 0:
pillar_table["Status"].append(
f"{Fore.RED}FAIL({pillars[pillar]['FAIL']}){Style.RESET_ALL}"
)
else:
pillar_table["Status"].append(
f"{Fore.GREEN}PASS({pillars[pillar]['PASS']}){Style.RESET_ALL}"
)
pillar_table["Muted"].append(
f"{orange_color}{pillars[pillar]['Muted']}{Style.RESET_ALL}"
)
if (
len(fail_count) + len(pass_count) + len(muted_count) > 1
): # If there are no resources, don't print the compliance table
print(
f"\nCompliance Status of {Fore.YELLOW}{compliance_framework.upper()}{Style.RESET_ALL} Framework:"
)
total_findings_count = len(fail_count) + len(pass_count) + len(muted_count)
overview_table = [
[
f"{Fore.RED}{round(len(fail_count) / total_findings_count * 100, 2)}% ({len(fail_count)}) FAIL{Style.RESET_ALL}",
f"{Fore.GREEN}{round(len(pass_count) / total_findings_count * 100, 2)}% ({len(pass_count)}) PASS{Style.RESET_ALL}",
f"{orange_color}{round(len(muted_count) / total_findings_count * 100, 2)}% ({len(muted_count)}) MUTED{Style.RESET_ALL}",
]
]
print(tabulate(overview_table, tablefmt="rounded_grid"))
if not compliance_overview:
if len(fail_count) > 0 and len(pillar_table["Pillar"]) > 0:
print(
f"\nFramework {Fore.YELLOW}{compliance_framework.upper()}{Style.RESET_ALL} Results:"
)
print(
tabulate(
pillar_table,
tablefmt="rounded_grid",
headers="keys",
)
)
print(
f"{Style.BRIGHT}\n=== Risk Score Guide ===\nScore ranges from 1 (lowest risk) to 5 (highest risk), indicating the severity of the potential impact.{Style.RESET_ALL}"
)
print(
f"{Style.BRIGHT}(Only sections containing results appear, the score is calculated as the sum of the level of risk of the failed findings divided by the number of failed findings){Style.RESET_ALL}"
)
print(f"\nDetailed results of {compliance_framework.upper()} are in:")
print(
f" - CSV: {output_directory}/compliance/{output_filename}_{compliance_framework}.csv\n"
)

View File

@@ -0,0 +1,91 @@
from prowler.lib.check.compliance_models import Compliance
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
from prowler.lib.outputs.compliance.prowler_threatscore.models import (
ProwlerThreatScoreAWSModel,
)
from prowler.lib.outputs.finding import Finding
class ProwlerThreatScoreAWS(ComplianceOutput):
"""
This class represents the AWS Prowler ThreatScore compliance output.
Attributes:
- _data (list): A list to store transformed data from findings.
- _file_descriptor (TextIOWrapper): A file descriptor to write data to a file.
Methods:
- transform: Transforms findings into AWS Prowler ThreatScore compliance format.
"""
def transform(
self,
findings: list[Finding],
compliance: Compliance,
compliance_name: str,
) -> None:
"""
Transforms a list of findings into AWS Prowler ThreatScore compliance format.
Parameters:
- findings (list): A list of findings.
- compliance (Compliance): A compliance model.
- compliance_name (str): The name of the compliance model.
Returns:
- None
"""
for finding in findings:
# Get the compliance requirements for the finding
finding_requirements = finding.compliance.get(compliance_name, [])
for requirement in compliance.Requirements:
if requirement.Id in finding_requirements:
for attribute in requirement.Attributes:
compliance_row = ProwlerThreatScoreAWSModel(
Provider=finding.provider,
Description=compliance.Description,
AccountId=finding.account_uid,
Region=finding.region,
AssessmentDate=str(finding.timestamp),
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Title=attribute.Title,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
Status=finding.status,
StatusExtended=finding.status_extended,
ResourceId=finding.resource_uid,
ResourceName=finding.resource_name,
CheckId=finding.check_id,
Muted=finding.muted,
)
self._data.append(compliance_row)
# Add manual requirements to the compliance output
for requirement in compliance.Requirements:
if not requirement.Checks:
for attribute in requirement.Attributes:
compliance_row = ProwlerThreatScoreAWSModel(
Provider=compliance.Provider.lower(),
Description=compliance.Description,
AccountId="",
Region="",
AssessmentDate=str(finding.timestamp),
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Title=attribute.Title,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
Status="MANUAL",
StatusExtended="Manual check",
ResourceId="manual_check",
ResourceName="Manual check",
CheckId="manual",
Muted=False,
)
self._data.append(compliance_row)

View File

@@ -0,0 +1,91 @@
from prowler.lib.check.compliance_models import Compliance
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
from prowler.lib.outputs.compliance.prowler_threatscore.models import (
ProwlerThreatScoreAzureModel,
)
from prowler.lib.outputs.finding import Finding
class ProwlerThreatScoreAzure(ComplianceOutput):
"""
This class represents the Azure Prowler ThreatScore compliance output.
Attributes:
- _data (list): A list to store transformed data from findings.
- _file_descriptor (TextIOWrapper): A file descriptor to write data to a file.
Methods:
- transform: Transforms findings into Azure Prowler ThreatScore compliance format.
"""
def transform(
self,
findings: list[Finding],
compliance: Compliance,
compliance_name: str,
) -> None:
"""
Transforms a list of findings into Azure Prowler ThreatScore compliance format.
Parameters:
- findings (list): A list of findings.
- compliance (Compliance): A compliance model.
- compliance_name (str): The name of the compliance model.
Returns:
- None
"""
for finding in findings:
# Get the compliance requirements for the finding
finding_requirements = finding.compliance.get(compliance_name, [])
for requirement in compliance.Requirements:
if requirement.Id in finding_requirements:
for attribute in requirement.Attributes:
compliance_row = ProwlerThreatScoreAzureModel(
Provider=finding.provider,
Description=compliance.Description,
SubscriptionId=finding.account_uid,
Location=finding.region,
AssessmentDate=str(finding.timestamp),
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Title=attribute.Title,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
Status=finding.status,
StatusExtended=finding.status_extended,
ResourceId=finding.resource_uid,
ResourceName=finding.resource_name,
CheckId=finding.check_id,
Muted=finding.muted,
)
self._data.append(compliance_row)
# Add manual requirements to the compliance output
for requirement in compliance.Requirements:
if not requirement.Checks:
for attribute in requirement.Attributes:
compliance_row = ProwlerThreatScoreAzureModel(
Provider=compliance.Provider.lower(),
Description=compliance.Description,
SubscriptionId="",
Location="",
AssessmentDate=str(finding.timestamp),
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Title=attribute.Title,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
Status="MANUAL",
StatusExtended="Manual check",
ResourceId="manual_check",
ResourceName="Manual check",
CheckId="manual",
Muted=False,
)
self._data.append(compliance_row)

View File

@@ -0,0 +1,91 @@
from prowler.lib.check.compliance_models import Compliance
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
from prowler.lib.outputs.compliance.prowler_threatscore.models import (
ProwlerThreatScoreGCPModel,
)
from prowler.lib.outputs.finding import Finding
class ProwlerThreatScoreGCP(ComplianceOutput):
"""
This class represents the GCP Prowler ThreatScore compliance output.
Attributes:
- _data (list): A list to store transformed data from findings.
- _file_descriptor (TextIOWrapper): A file descriptor to write data to a file.
Methods:
- transform: Transforms findings into GCP Prowler ThreatScore compliance format.
"""
def transform(
self,
findings: list[Finding],
compliance: Compliance,
compliance_name: str,
) -> None:
"""
Transforms a list of findings into GCP Prowler ThreatScore compliance format.
Parameters:
- findings (list): A list of findings.
- compliance (Compliance): A compliance model.
- compliance_name (str): The name of the compliance model.
Returns:
- None
"""
for finding in findings:
# Get the compliance requirements for the finding
finding_requirements = finding.compliance.get(compliance_name, [])
for requirement in compliance.Requirements:
if requirement.Id in finding_requirements:
for attribute in requirement.Attributes:
compliance_row = ProwlerThreatScoreGCPModel(
Provider=finding.provider,
Description=compliance.Description,
ProjectId=finding.account_uid,
Location=finding.region,
AssessmentDate=str(finding.timestamp),
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Title=attribute.Title,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
Status=finding.status,
StatusExtended=finding.status_extended,
ResourceId=finding.resource_uid,
ResourceName=finding.resource_name,
CheckId=finding.check_id,
Muted=finding.muted,
)
self._data.append(compliance_row)
# Add manual requirements to the compliance output
for requirement in compliance.Requirements:
if not requirement.Checks:
for attribute in requirement.Attributes:
compliance_row = ProwlerThreatScoreGCPModel(
Provider=compliance.Provider.lower(),
Description=compliance.Description,
ProjectId="",
Location="",
AssessmentDate=str(finding.timestamp),
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Title=attribute.Title,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
Status="MANUAL",
StatusExtended="Manual check",
ResourceId="manual_check",
ResourceName="Manual check",
CheckId="manual",
Muted=False,
)
self._data.append(compliance_row)

0
prowler_threatscore_aws Normal file
View File

View File

0
prowler_threatscore_gcp Normal file
View File

View File

@@ -13,6 +13,7 @@ from prowler.lib.check.compliance_models import (
Mitre_Requirement_Attribute_AWS,
Mitre_Requirement_Attribute_Azure,
Mitre_Requirement_Attribute_GCP,
Prowler_ThreatScore_Requirement_Attribute,
)
CIS_1_4_AWS_NAME = "cis_1.4_aws"
@@ -803,3 +804,129 @@ KISA_ISMSP_AWS = Compliance(
),
],
)
PROWLER_THREATSCORE_AWS_NAME = "prowler_threatscore_aws"
PROWLER_THREATSCORE_AWS = Compliance(
Framework="ProwlerThreatScore",
Version="1.0",
Provider="AWS",
Description="Prowler ThreatScore Compliance Framework for AWS ensures that the AWS account is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Forensic Readiness and Encryption",
Requirements=[
Compliance_Requirement(
Id="1.1.1",
Description="Ensure MFA is enabled for the 'root' user account",
Attributes=[
Prowler_ThreatScore_Requirement_Attribute(
Title="MFA enabled for 'root'",
Section="1. IAM",
SubSection="1.1 Authentication",
AttributeDescription="The root user account holds the highest level of privileges within an AWS account. Enabling Multi-Factor Authentication (MFA) enhances security by adding an additional layer of protection beyond just a username and password. With MFA activated, users must provide their credentials (username and password) along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.",
AdditionalInformation="Enabling MFA enhances console security by requiring the authenticating user to both possess a time-sensitive key-generating device and have knowledge of their credentials.",
LevelOfRisk=5,
)
],
Checks=[
"iam_root_mfa_enabled",
],
),
Compliance_Requirement(
Id="1.1.2",
Description="Ensure hardware MFA is enabled for the 'root' user account",
Attributes=[
Prowler_ThreatScore_Requirement_Attribute(
Title="CloudTrail logging enabled",
Section="1. IAM",
SubSection="1.1 Authentication",
AttributeDescription="The root user account in AWS has the highest level of privileges. Multi-Factor Authentication (MFA) enhances security by adding an extra layer of protection beyond a username and password. When MFA is enabled, users must enter their credentials along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.",
AdditionalInformation="A hardware MFA has a smaller attack surface compared to a virtual MFA. Unlike a virtual MFA, which relies on a mobile device that may be vulnerable to malware or compromise, a hardware MFA operates independently, reducing exposure to potential security threats.",
LevelOfRisk=3,
)
],
Checks=[],
),
],
)
PROWLER_THREATSCORE_AZURE_NAME = "prowler_threatscore_azure"
PROWLER_THREATSCORE_AZURE = Compliance(
Framework="ProwlerThreatScore",
Version="1.0",
Provider="Azure",
Description="Prowler ThreatScore Compliance Framework for Azure ensures that the Azure account is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Forensic Readiness and Encryption",
Requirements=[
Compliance_Requirement(
Id="1.1.1",
Description="Ensure MFA is enabled for the 'root' user account",
Attributes=[
Prowler_ThreatScore_Requirement_Attribute(
Title="MFA enabled for 'root'",
Section="1. IAM",
SubSection="1.1 Authentication",
AttributeDescription="The root user account holds the highest level of privileges within an AWS account. Enabling Multi-Factor Authentication (MFA) enhances security by adding an additional layer of protection beyond just a username and password. With MFA activated, users must provide their credentials (username and password) along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.",
AdditionalInformation="Enabling MFA enhances console security by requiring the authenticating user to both possess a time-sensitive key-generating device and have knowledge of their credentials.",
LevelOfRisk=5,
)
],
Checks=[
"iam_root_mfa_enabled",
],
),
Compliance_Requirement(
Id="1.1.2",
Description="Ensure hardware MFA is enabled for the 'root' user account",
Attributes=[
Prowler_ThreatScore_Requirement_Attribute(
Title="CloudTrail logging enabled",
Section="1. IAM",
SubSection="1.1 Authentication",
AttributeDescription="The root user account in AWS has the highest level of privileges. Multi-Factor Authentication (MFA) enhances security by adding an extra layer of protection beyond a username and password. When MFA is enabled, users must enter their credentials along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.",
AdditionalInformation="A hardware MFA has a smaller attack surface compared to a virtual MFA. Unlike a virtual MFA, which relies on a mobile device that may be vulnerable to malware or compromise, a hardware MFA operates independently, reducing exposure to potential security threats.",
LevelOfRisk=3,
)
],
Checks=[],
),
],
)
PROWLER_THREATSCORE_GCP_NAME = "prowler_threatscore_gcp"
PROWLER_THREATSCORE_GCP = Compliance(
Framework="ProwlerThreatScore",
Version="1.0",
Provider="GCP",
Description="Prowler ThreatScore Compliance Framework for GCP ensures that the GCP account is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Forensic Readiness and Encryption",
Requirements=[
Compliance_Requirement(
Id="1.1.1",
Description="Ensure MFA is enabled for the 'root' user account",
Attributes=[
Prowler_ThreatScore_Requirement_Attribute(
Title="MFA enabled for 'root'",
Section="1. IAM",
SubSection="1.1 Authentication",
AttributeDescription="The root user account holds the highest level of privileges within an AWS account. Enabling Multi-Factor Authentication (MFA) enhances security by adding an additional layer of protection beyond just a username and password. With MFA activated, users must provide their credentials (username and password) along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.",
AdditionalInformation="Enabling MFA enhances console security by requiring the authenticating user to both possess a time-sensitive key-generating device and have knowledge of their credentials.",
LevelOfRisk=5,
)
],
Checks=[
"iam_root_mfa_enabled",
],
),
Compliance_Requirement(
Id="1.1.2",
Description="Ensure hardware MFA is enabled for the 'root' user account",
Attributes=[
Prowler_ThreatScore_Requirement_Attribute(
Title="CloudTrail logging enabled",
Section="1. IAM",
SubSection="1.1 Authentication",
AttributeDescription="The root user account in AWS has the highest level of privileges. Multi-Factor Authentication (MFA) enhances security by adding an extra layer of protection beyond a username and password. When MFA is enabled, users must enter their credentials along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.",
AdditionalInformation="A hardware MFA has a smaller attack surface compared to a virtual MFA. Unlike a virtual MFA, which relies on a mobile device that may be vulnerable to malware or compromise, a hardware MFA operates independently, reducing exposure to potential security threats.",
LevelOfRisk=3,
)
],
Checks=[],
),
],
)

View File

@@ -0,0 +1,140 @@
from datetime import datetime
from io import StringIO
from freezegun import freeze_time
from mock import patch
from prowler.lib.outputs.compliance.prowler_threatscore.models import (
ProwlerThreatScoreAWSModel,
)
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_aws import (
ProwlerThreatScoreAWS,
)
from tests.lib.outputs.compliance.fixtures import (
PROWLER_THREATSCORE_AWS,
PROWLER_THREATSCORE_AWS_NAME,
)
from tests.lib.outputs.fixtures.fixtures import generate_finding_output
from tests.providers.aws.utils import AWS_ACCOUNT_NUMBER, AWS_REGION_EU_WEST_1
class TestProwlerThreatScoreAWS:
def test_output_transform(self):
findings = [
generate_finding_output(compliance={"ProwlerThreatScore-1.0": "1.1.1"})
]
output = ProwlerThreatScoreAWS(
findings, PROWLER_THREATSCORE_AWS, PROWLER_THREATSCORE_AWS_NAME
)
output_data = output.data[0]
assert isinstance(output_data, ProwlerThreatScoreAWSModel)
assert output_data.Provider == "aws"
assert output_data.Description == PROWLER_THREATSCORE_AWS.Description
assert output_data.AccountId == AWS_ACCOUNT_NUMBER
assert output_data.Region == AWS_REGION_EU_WEST_1
assert output_data.Requirements_Id == PROWLER_THREATSCORE_AWS.Requirements[0].Id
assert (
output_data.Requirements_Description
== PROWLER_THREATSCORE_AWS.Requirements[0].Description
)
assert (
output_data.Requirements_Attributes_Title
== PROWLER_THREATSCORE_AWS.Requirements[0].Attributes[0].Title
)
assert (
output_data.Requirements_Attributes_Section
== PROWLER_THREATSCORE_AWS.Requirements[0].Attributes[0].Section
)
assert (
output_data.Requirements_Attributes_SubSection
== PROWLER_THREATSCORE_AWS.Requirements[0].Attributes[0].SubSection
)
assert (
output_data.Requirements_Attributes_AttributeDescription
== PROWLER_THREATSCORE_AWS.Requirements[0]
.Attributes[0]
.AttributeDescription
)
assert (
output_data.Requirements_Attributes_AdditionalInformation
== PROWLER_THREATSCORE_AWS.Requirements[0]
.Attributes[0]
.AdditionalInformation
)
assert (
output_data.Requirements_Attributes_LevelOfRisk
== PROWLER_THREATSCORE_AWS.Requirements[0].Attributes[0].LevelOfRisk
)
assert output_data.Status == "PASS"
assert output_data.StatusExtended == ""
assert output_data.ResourceId == ""
assert output_data.ResourceName == ""
assert output_data.CheckId == "test-check-id"
assert not output_data.Muted
# Test manual check
output_data_manual = output.data[1]
assert output_data_manual.Provider == "aws"
assert output_data_manual.AccountId == ""
assert output_data_manual.Region == ""
assert (
output_data_manual.Requirements_Id
== PROWLER_THREATSCORE_AWS.Requirements[1].Id
)
assert (
output_data_manual.Requirements_Description
== PROWLER_THREATSCORE_AWS.Requirements[1].Description
)
assert (
output_data_manual.Requirements_Attributes_Title
== PROWLER_THREATSCORE_AWS.Requirements[1].Attributes[0].Title
)
assert (
output_data_manual.Requirements_Attributes_Section
== PROWLER_THREATSCORE_AWS.Requirements[1].Attributes[0].Section
)
assert (
output_data_manual.Requirements_Attributes_SubSection
== PROWLER_THREATSCORE_AWS.Requirements[1].Attributes[0].SubSection
)
assert (
output_data_manual.Requirements_Attributes_AttributeDescription
== PROWLER_THREATSCORE_AWS.Requirements[1]
.Attributes[0]
.AttributeDescription
)
assert (
output_data_manual.Requirements_Attributes_AdditionalInformation
== PROWLER_THREATSCORE_AWS.Requirements[1]
.Attributes[0]
.AdditionalInformation
)
assert (
output_data_manual.Requirements_Attributes_LevelOfRisk
== PROWLER_THREATSCORE_AWS.Requirements[1].Attributes[0].LevelOfRisk
)
assert output_data_manual.Status == "MANUAL"
assert output_data_manual.StatusExtended == "Manual check"
assert output_data_manual.ResourceId == "manual_check"
assert output_data_manual.ResourceName == "Manual check"
assert output_data_manual.CheckId == "manual"
assert not output_data_manual.Muted
@freeze_time(datetime.now())
def test_batch_write_data_to_file(self):
mock_file = StringIO()
findings = [
generate_finding_output(compliance={"ProwlerThreatScore-1.0": "1.1.1"})
]
output = ProwlerThreatScoreAWS(
findings, PROWLER_THREATSCORE_AWS, PROWLER_THREATSCORE_AWS_NAME
)
output._file_descriptor = mock_file
with patch.object(mock_file, "close", return_value=None):
output.batch_write_data_to_file()
mock_file.seek(0)
content = mock_file.read()
expected_csv = f"PROVIDER;DESCRIPTION;ACCOUNTID;REGION;ASSESSMENTDATE;REQUIREMENTS_ID;REQUIREMENTS_DESCRIPTION;REQUIREMENTS_ATTRIBUTES_TITLE;REQUIREMENTS_ATTRIBUTES_SECTION;REQUIREMENTS_ATTRIBUTES_SUBSECTION;REQUIREMENTS_ATTRIBUTES_ATTRIBUTEDESCRIPTION;REQUIREMENTS_ATTRIBUTES_ADDITIONALINFORMATION;REQUIREMENTS_ATTRIBUTES_LEVELOFRISK;STATUS;STATUSEXTENDED;RESOURCEID;RESOURCENAME;CHECKID;MUTED\r\naws;Prowler ThreatScore Compliance Framework for AWS ensures that the AWS account is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Forensic Readiness and Encryption;123456789012;eu-west-1;{datetime.now()};1.1.1;Ensure MFA is enabled for the 'root' user account;MFA enabled for 'root';1. IAM;1.1 Authentication;The root user account holds the highest level of privileges within an AWS account. Enabling Multi-Factor Authentication (MFA) enhances security by adding an additional layer of protection beyond just a username and password. With MFA activated, users must provide their credentials (username and password) along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.;Enabling MFA enhances console security by requiring the authenticating user to both possess a time-sensitive key-generating device and have knowledge of their credentials.;5;PASS;;;;test-check-id;False\r\naws;Prowler ThreatScore Compliance Framework for AWS ensures that the AWS account is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Forensic Readiness and Encryption;;;{datetime.now()};1.1.2;Ensure hardware MFA is enabled for the 'root' user account;CloudTrail logging enabled;1. IAM;1.1 Authentication;The root user account in AWS has the highest level of privileges. Multi-Factor Authentication (MFA) enhances security by adding an extra layer of protection beyond a username and password. When MFA is enabled, users must enter their credentials along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.;A hardware MFA has a smaller attack surface compared to a virtual MFA. Unlike a virtual MFA, which relies on a mobile device that may be vulnerable to malware or compromise, a hardware MFA operates independently, reducing exposure to potential security threats.;3;MANUAL;Manual check;manual_check;Manual check;manual;False\r\n"
assert content == expected_csv

View File

@@ -0,0 +1,150 @@
from datetime import datetime
from io import StringIO
from freezegun import freeze_time
from mock import patch
from prowler.lib.outputs.compliance.prowler_threatscore.models import (
ProwlerThreatScoreAzureModel,
)
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_azure import (
ProwlerThreatScoreAzure,
)
from tests.lib.outputs.compliance.fixtures import (
PROWLER_THREATSCORE_AZURE,
PROWLER_THREATSCORE_AZURE_NAME,
)
from tests.lib.outputs.fixtures.fixtures import generate_finding_output
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
AZURE_SUBSCRIPTION_NAME,
)
class TestProwlerThreatScoreAzure:
def test_output_transform(self):
findings = [
generate_finding_output(
compliance={"ProwlerThreatScore-1.0": "1.1.1"},
provider="azure",
account_name=AZURE_SUBSCRIPTION_NAME,
account_uid=AZURE_SUBSCRIPTION_ID,
region="",
)
]
output = ProwlerThreatScoreAzure(
findings, PROWLER_THREATSCORE_AZURE, PROWLER_THREATSCORE_AZURE_NAME
)
output_data = output.data[0]
assert isinstance(output_data, ProwlerThreatScoreAzureModel)
assert output_data.Provider == "azure"
assert output_data.Description == PROWLER_THREATSCORE_AZURE.Description
assert output_data.SubscriptionId == AZURE_SUBSCRIPTION_ID
assert output_data.Location == ""
assert (
output_data.Requirements_Id == PROWLER_THREATSCORE_AZURE.Requirements[0].Id
)
assert (
output_data.Requirements_Description
== PROWLER_THREATSCORE_AZURE.Requirements[0].Description
)
assert (
output_data.Requirements_Attributes_Title
== PROWLER_THREATSCORE_AZURE.Requirements[0].Attributes[0].Title
)
assert (
output_data.Requirements_Attributes_Section
== PROWLER_THREATSCORE_AZURE.Requirements[0].Attributes[0].Section
)
assert (
output_data.Requirements_Attributes_SubSection
== PROWLER_THREATSCORE_AZURE.Requirements[0].Attributes[0].SubSection
)
assert (
output_data.Requirements_Attributes_AttributeDescription
== PROWLER_THREATSCORE_AZURE.Requirements[0]
.Attributes[0]
.AttributeDescription
)
assert (
output_data.Requirements_Attributes_AdditionalInformation
== PROWLER_THREATSCORE_AZURE.Requirements[0]
.Attributes[0]
.AdditionalInformation
)
assert (
output_data.Requirements_Attributes_LevelOfRisk
== PROWLER_THREATSCORE_AZURE.Requirements[0].Attributes[0].LevelOfRisk
)
assert output_data.Status == "PASS"
assert output_data.StatusExtended == ""
assert output_data.ResourceId == ""
assert output_data.ResourceName == ""
assert output_data.CheckId == "test-check-id"
assert not output_data.Muted
# Test manual check
output_data_manual = output.data[1]
assert output_data_manual.Provider == "azure"
assert output_data_manual.SubscriptionId == ""
assert output_data_manual.Location == ""
assert (
output_data_manual.Requirements_Id
== PROWLER_THREATSCORE_AZURE.Requirements[1].Id
)
assert (
output_data_manual.Requirements_Description
== PROWLER_THREATSCORE_AZURE.Requirements[1].Description
)
assert (
output_data_manual.Requirements_Attributes_Title
== PROWLER_THREATSCORE_AZURE.Requirements[1].Attributes[0].Title
)
assert (
output_data_manual.Requirements_Attributes_Section
== PROWLER_THREATSCORE_AZURE.Requirements[1].Attributes[0].Section
)
assert (
output_data_manual.Requirements_Attributes_SubSection
== PROWLER_THREATSCORE_AZURE.Requirements[1].Attributes[0].SubSection
)
assert (
output_data_manual.Requirements_Attributes_AttributeDescription
== PROWLER_THREATSCORE_AZURE.Requirements[1]
.Attributes[0]
.AttributeDescription
)
assert (
output_data_manual.Requirements_Attributes_AdditionalInformation
== PROWLER_THREATSCORE_AZURE.Requirements[1]
.Attributes[0]
.AdditionalInformation
)
assert (
output_data_manual.Requirements_Attributes_LevelOfRisk
== PROWLER_THREATSCORE_AZURE.Requirements[1].Attributes[0].LevelOfRisk
)
assert output_data_manual.Status == "MANUAL"
assert output_data_manual.StatusExtended == "Manual check"
assert output_data_manual.ResourceId == "manual_check"
assert output_data_manual.ResourceName == "Manual check"
assert output_data_manual.CheckId == "manual"
@freeze_time(datetime.now())
def test_batch_write_data_to_file(self):
mock_file = StringIO()
findings = [
generate_finding_output(compliance={"ProwlerThreatScore-1.0": "1.1.1"})
]
output = ProwlerThreatScoreAzure(
findings, PROWLER_THREATSCORE_AZURE, PROWLER_THREATSCORE_AZURE_NAME
)
output._file_descriptor = mock_file
with patch.object(mock_file, "close", return_value=None):
output.batch_write_data_to_file()
mock_file.seek(0)
content = mock_file.read()
expected_csv = f"PROVIDER;DESCRIPTION;SUBSCRIPTIONID;LOCATION;ASSESSMENTDATE;REQUIREMENTS_ID;REQUIREMENTS_DESCRIPTION;REQUIREMENTS_ATTRIBUTES_TITLE;REQUIREMENTS_ATTRIBUTES_SECTION;REQUIREMENTS_ATTRIBUTES_SUBSECTION;REQUIREMENTS_ATTRIBUTES_ATTRIBUTEDESCRIPTION;REQUIREMENTS_ATTRIBUTES_ADDITIONALINFORMATION;REQUIREMENTS_ATTRIBUTES_LEVELOFRISK;STATUS;STATUSEXTENDED;RESOURCEID;RESOURCENAME;CHECKID;MUTED\r\naws;Prowler ThreatScore Compliance Framework for Azure ensures that the Azure account is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Forensic Readiness and Encryption;123456789012;eu-west-1;{datetime.now()};1.1.1;Ensure MFA is enabled for the 'root' user account;MFA enabled for 'root';1. IAM;1.1 Authentication;The root user account holds the highest level of privileges within an AWS account. Enabling Multi-Factor Authentication (MFA) enhances security by adding an additional layer of protection beyond just a username and password. With MFA activated, users must provide their credentials (username and password) along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.;Enabling MFA enhances console security by requiring the authenticating user to both possess a time-sensitive key-generating device and have knowledge of their credentials.;5;PASS;;;;test-check-id;False\r\nazure;Prowler ThreatScore Compliance Framework for Azure ensures that the Azure account is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Forensic Readiness and Encryption;;;{datetime.now()};1.1.2;Ensure hardware MFA is enabled for the 'root' user account;CloudTrail logging enabled;1. IAM;1.1 Authentication;The root user account in AWS has the highest level of privileges. Multi-Factor Authentication (MFA) enhances security by adding an extra layer of protection beyond a username and password. When MFA is enabled, users must enter their credentials along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.;A hardware MFA has a smaller attack surface compared to a virtual MFA. Unlike a virtual MFA, which relies on a mobile device that may be vulnerable to malware or compromise, a hardware MFA operates independently, reducing exposure to potential security threats.;3;MANUAL;Manual check;manual_check;Manual check;manual;False\r\n"
assert content == expected_csv

View File

@@ -0,0 +1,147 @@
from datetime import datetime
from io import StringIO
from freezegun import freeze_time
from mock import patch
from prowler.lib.outputs.compliance.prowler_threatscore.models import (
ProwlerThreatScoreGCPModel,
)
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_gcp import (
ProwlerThreatScoreGCP,
)
from tests.lib.outputs.compliance.fixtures import (
PROWLER_THREATSCORE_GCP,
PROWLER_THREATSCORE_GCP_NAME,
)
from tests.lib.outputs.fixtures.fixtures import generate_finding_output
from tests.providers.gcp.gcp_fixtures import GCP_PROJECT_ID
class TestProwlerThreatScoreGCP:
def test_output_transform(self):
findings = [
generate_finding_output(
compliance={"ProwlerThreatScore-1.0": "1.1.1"},
provider="gcp",
account_name=GCP_PROJECT_ID,
account_uid=GCP_PROJECT_ID,
region="",
)
]
output = ProwlerThreatScoreGCP(
findings, PROWLER_THREATSCORE_GCP, PROWLER_THREATSCORE_GCP_NAME
)
output_data = output.data[0]
assert isinstance(output_data, ProwlerThreatScoreGCPModel)
assert output_data.Provider == "gcp"
assert output_data.Description == PROWLER_THREATSCORE_GCP.Description
assert output_data.ProjectId == GCP_PROJECT_ID
assert output_data.Location == ""
assert output_data.Requirements_Id == PROWLER_THREATSCORE_GCP.Requirements[0].Id
assert (
output_data.Requirements_Description
== PROWLER_THREATSCORE_GCP.Requirements[0].Description
)
assert (
output_data.Requirements_Attributes_Title
== PROWLER_THREATSCORE_GCP.Requirements[0].Attributes[0].Title
)
assert (
output_data.Requirements_Attributes_Section
== PROWLER_THREATSCORE_GCP.Requirements[0].Attributes[0].Section
)
assert (
output_data.Requirements_Attributes_SubSection
== PROWLER_THREATSCORE_GCP.Requirements[0].Attributes[0].SubSection
)
assert (
output_data.Requirements_Attributes_AttributeDescription
== PROWLER_THREATSCORE_GCP.Requirements[0]
.Attributes[0]
.AttributeDescription
)
assert (
output_data.Requirements_Attributes_AdditionalInformation
== PROWLER_THREATSCORE_GCP.Requirements[0]
.Attributes[0]
.AdditionalInformation
)
assert (
output_data.Requirements_Attributes_LevelOfRisk
== PROWLER_THREATSCORE_GCP.Requirements[0].Attributes[0].LevelOfRisk
)
assert output_data.Status == "PASS"
assert output_data.StatusExtended == ""
assert output_data.ResourceId == ""
assert output_data.ResourceName == ""
assert output_data.CheckId == "test-check-id"
assert not output_data.Muted
# Test manual check
output_data_manual = output.data[1]
assert output_data_manual.Provider == "gcp"
assert output_data_manual.ProjectId == ""
assert output_data_manual.Location == ""
assert (
output_data_manual.Requirements_Id
== PROWLER_THREATSCORE_GCP.Requirements[1].Id
)
assert (
output_data_manual.Requirements_Description
== PROWLER_THREATSCORE_GCP.Requirements[1].Description
)
assert (
output_data_manual.Requirements_Attributes_Title
== PROWLER_THREATSCORE_GCP.Requirements[1].Attributes[0].Title
)
assert (
output_data_manual.Requirements_Attributes_Section
== PROWLER_THREATSCORE_GCP.Requirements[1].Attributes[0].Section
)
assert (
output_data_manual.Requirements_Attributes_SubSection
== PROWLER_THREATSCORE_GCP.Requirements[1].Attributes[0].SubSection
)
assert (
output_data_manual.Requirements_Attributes_AttributeDescription
== PROWLER_THREATSCORE_GCP.Requirements[1]
.Attributes[0]
.AttributeDescription
)
assert (
output_data_manual.Requirements_Attributes_AdditionalInformation
== PROWLER_THREATSCORE_GCP.Requirements[1]
.Attributes[0]
.AdditionalInformation
)
assert (
output_data_manual.Requirements_Attributes_LevelOfRisk
== PROWLER_THREATSCORE_GCP.Requirements[1].Attributes[0].LevelOfRisk
)
assert output_data_manual.Status == "MANUAL"
assert output_data_manual.StatusExtended == "Manual check"
assert output_data_manual.ResourceId == "manual_check"
assert output_data_manual.ResourceName == "Manual check"
assert output_data_manual.CheckId == "manual"
assert not output_data_manual.Muted
@freeze_time(datetime.now())
def test_batch_write_data_to_file(self):
mock_file = StringIO()
findings = [
generate_finding_output(compliance={"ProwlerThreatScore-1.0": "1.1.1"})
]
output = ProwlerThreatScoreGCP(
findings, PROWLER_THREATSCORE_GCP, PROWLER_THREATSCORE_GCP_NAME
)
output._file_descriptor = mock_file
with patch.object(mock_file, "close", return_value=None):
output.batch_write_data_to_file()
mock_file.seek(0)
content = mock_file.read()
expected_csv = f"PROVIDER;DESCRIPTION;PROJECTID;LOCATION;ASSESSMENTDATE;REQUIREMENTS_ID;REQUIREMENTS_DESCRIPTION;REQUIREMENTS_ATTRIBUTES_TITLE;REQUIREMENTS_ATTRIBUTES_SECTION;REQUIREMENTS_ATTRIBUTES_SUBSECTION;REQUIREMENTS_ATTRIBUTES_ATTRIBUTEDESCRIPTION;REQUIREMENTS_ATTRIBUTES_ADDITIONALINFORMATION;REQUIREMENTS_ATTRIBUTES_LEVELOFRISK;STATUS;STATUSEXTENDED;RESOURCEID;RESOURCENAME;CHECKID;MUTED\r\naws;Prowler ThreatScore Compliance Framework for GCP ensures that the GCP account is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Forensic Readiness and Encryption;123456789012;eu-west-1;{datetime.now()};1.1.1;Ensure MFA is enabled for the 'root' user account;MFA enabled for 'root';1. IAM;1.1 Authentication;The root user account holds the highest level of privileges within an AWS account. Enabling Multi-Factor Authentication (MFA) enhances security by adding an additional layer of protection beyond just a username and password. With MFA activated, users must provide their credentials (username and password) along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.;Enabling MFA enhances console security by requiring the authenticating user to both possess a time-sensitive key-generating device and have knowledge of their credentials.;5;PASS;;;;test-check-id;False\r\ngcp;Prowler ThreatScore Compliance Framework for GCP ensures that the GCP account is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Forensic Readiness and Encryption;;;{datetime.now()};1.1.2;Ensure hardware MFA is enabled for the 'root' user account;CloudTrail logging enabled;1. IAM;1.1 Authentication;The root user account in AWS has the highest level of privileges. Multi-Factor Authentication (MFA) enhances security by adding an extra layer of protection beyond a username and password. When MFA is enabled, users must enter their credentials along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.;A hardware MFA has a smaller attack surface compared to a virtual MFA. Unlike a virtual MFA, which relies on a mobile device that may be vulnerable to malware or compromise, a hardware MFA operates independently, reducing exposure to potential security threats.;3;MANUAL;Manual check;manual_check;Manual check;manual;False\r\n"
assert content == expected_csv

View File

@@ -0,0 +1,36 @@
import csv
import json
import sys
# Convert a CSV file following the ThreatScore CSV format into a Prowler Compliance JSON file
# CSV fields:
# Id, Title, Description, Section, SubSection, AttributeDescription, AdditionalInformation, LevelOfRisk, Checks
# get the CSV filename to convert from
file_name = sys.argv[1]
# read the CSV file rows and use the column fields to form the Prowler compliance JSON file 'prowler_threatscore_aws.json'
output = {"Framework": "ProwlerThreatScore", "Version": "1.0", "Requirements": []}
with open(file_name, newline="", encoding="utf-8") as f:
reader = csv.reader(f, delimiter=",")
for row in reader:
attribute = {
"Title": row[1],
"Section": row[3],
"SubSection": row[4],
"AttributeDescription": row[5],
"AdditionalInformation": row[6],
"LevelOfRisk": row[7],
}
output["Requirements"].append(
{
"Id": row[0],
"Description": row[2],
"Checks": list(map(str.strip, row[8].split(","))),
"Attributes": [attribute],
}
)
# Write the output Prowler compliance JSON file 'prowler_threatscore_aws.json' locally
with open("prowler_threatscore_azure.json", "w", encoding="utf-8") as outfile:
json.dump(output, outfile, indent=4, ensure_ascii=False)

View File

@@ -0,0 +1,52 @@
import csv
import json
import sys
file_name_output = sys.argv[1] # It is the output CSV file
file_name_compliance = sys.argv[2] # It is the compliance JSON file
number_of_findings_per_pillar = {}
score_per_pillar = {}
# Read the compliance JSON file
with open(file_name_compliance, "r") as file:
data = json.load(file)
# Read the output CSV file
with open(file_name_output, "r") as file:
reader = csv.reader(file, delimiter=";")
headers = next(reader)
if "CHECK_ID" in headers:
check_id_index = headers.index("CHECK_ID")
if "STATUS" in headers:
status_index = headers.index("STATUS")
if "MUTED" in headers:
muted_index = headers.index("MUTED")
for row in reader:
for requirement in data["Requirements"]:
# Take the column that contains the CHECK_ID
if row[check_id_index] in requirement["Checks"]:
if (
requirement["Attributes"][0]["Section"]
not in number_of_findings_per_pillar.keys()
):
number_of_findings_per_pillar[
requirement["Attributes"][0]["Section"]
] = 0
if (
requirement["Attributes"][0]["Section"]
not in score_per_pillar.keys()
):
score_per_pillar[requirement["Attributes"][0]["Section"]] = 0
if row[status_index] == "FAIL" and row[muted_index] != "TRUE":
number_of_findings_per_pillar[
requirement["Attributes"][0]["Section"]
] += 1
score_per_pillar[
requirement["Attributes"][0]["Section"]
] += requirement["Attributes"][0]["LevelOfRisk"]
for key, value in number_of_findings_per_pillar.items():
print("Pillar:", key)
print("Score:", score_per_pillar[key] / value)
print("--------------------------------")