Compare commits

...

8 Commits

Author SHA1 Message Date
Sergio Garcia
f2aa659bc7 chore(version): update Prowler version (#5679) 2024-11-07 14:53:57 -05:00
Prowler Bot
d619c6de73 chore(aws): deprecate glue_etl_jobs_logging_enabled check (#5677)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2024-11-07 11:54:23 -05:00
Prowler Bot
85d39b48a3 fix(aws): update EKS check in compliance frameworks (#5675)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2024-11-07 11:50:19 -05:00
Prowler Bot
bc70f5cacf fix(guardduty): fix guardduty_is_enabled_fixer test (#5678)
Co-authored-by: Daniel Barranquero <74871504+danibarranqueroo@users.noreply.github.com>
2024-11-07 11:49:46 -05:00
Prowler Bot
9b7ed481c0 fix(mutelist): set arguments while loading providers (#5673)
Co-authored-by: thomscode <thomscode@gmail.com>
2024-11-07 10:20:52 -05:00
Prowler Bot
fae8ad640e fix(docker): add g++ to Dockerfile for presidio-analyzer compatibility (#5648)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2024-11-06 12:19:08 -05:00
Sergio Garcia
5959d81972 chore(version): update Prowler version (#5643) 2024-11-06 08:11:25 +01:00
Sergio Garcia
d84d0e7693 chore(backport): master changes to v4.5 branch (#5641)
Co-authored-by: sansns-aws <107269923+sansns@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2024-11-05 14:13:11 -05:00
32 changed files with 662 additions and 89 deletions

View File

@@ -4,9 +4,9 @@ LABEL maintainer="https://github.com/prowler-cloud/prowler"
# Update system dependencies and install essential tools
#hadolint ignore=DL3018
RUN apk --no-cache upgrade && apk --no-cache add curl git
RUN apk --no-cache upgrade && apk --no-cache add curl git g++
# Create nonroot user
# Create non-root user
RUN mkdir -p /home/prowler && \
echo 'prowler:x:1000:1000:prowler:/home/prowler:' > /etc/passwd && \
echo 'prowler:x:1000:' > /etc/group && \

View File

@@ -63,9 +63,9 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) |
|---|---|---|---|---|
| AWS | 457 | 67 -> `prowler aws --list-services` | 30 -> `prowler aws --list-compliance` | 9 -> `prowler aws --list-categories` |
| AWS | 553 | 77 -> `prowler aws --list-services` | 30 -> `prowler aws --list-compliance` | 9 -> `prowler aws --list-categories` |
| GCP | 77 | 13 -> `prowler gcp --list-services` | 2 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
| Azure | 136 | 17 -> `prowler azure --list-services` | 3 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
| Azure | 138 | 17 -> `prowler azure --list-services` | 3 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | 1 -> `prowler kubernetes --list-compliance` | 7 -> `prowler kubernetes --list-categories` |
# 💻 Installation

View File

@@ -485,7 +485,7 @@
"codeartifact_packages_external_public_publishing_disabled",
"ecr_repositories_not_publicly_accessible",
"efs_not_publicly_accessible",
"eks_endpoints_not_publicly_accessible",
"eks_cluster_not_publicly_accessible",
"elb_internet_facing",
"elbv2_internet_facing",
"s3_account_level_public_access_blocks",
@@ -664,7 +664,7 @@
"awslambda_function_not_publicly_accessible",
"apigateway_restapi_waf_acl_attached",
"cloudfront_distributions_using_waf",
"eks_control_plane_endpoint_access_restricted",
"eks_cluster_not_publicly_accessible",
"sagemaker_models_network_isolation_enabled",
"sagemaker_models_vpc_settings_configured",
"sagemaker_notebook_instance_vpc_settings_configured",

View File

@@ -1509,9 +1509,9 @@
"iam_user_mfa_enabled_console_access",
"networkfirewall_in_all_vpc",
"eks_cluster_network_policy_enabled",
"eks_control_plane_endpoint_access_restricted",
"eks_cluster_not_publicly_accessible",
"eks_cluster_private_nodes_enabled",
"eks_endpoints_not_publicly_accessible",
"eks_cluster_not_publicly_accessible",
"kafka_cluster_is_public",
"kafka_cluster_unrestricted_access_disabled",
"vpc_peering_routing_tables_with_least_privilege",

View File

@@ -1509,9 +1509,9 @@
"iam_user_mfa_enabled_console_access",
"networkfirewall_in_all_vpc",
"eks_cluster_network_policy_enabled",
"eks_control_plane_endpoint_access_restricted",
"eks_cluster_not_publicly_accessible",
"eks_cluster_private_nodes_enabled",
"eks_endpoints_not_publicly_accessible",
"eks_cluster_not_publicly_accessible",
"kafka_cluster_is_public",
"kafka_cluster_unrestricted_access_disabled",
"vpc_peering_routing_tables_with_least_privilege",

View File

@@ -19,7 +19,7 @@
"ec2_ebs_public_snapshot",
"ec2_instance_profile_attached",
"ec2_instance_public_ip",
"eks_endpoints_not_publicly_accessible",
"eks_cluster_not_publicly_accessible",
"emr_cluster_master_nodes_no_public_ip",
"iam_aws_attached_policy_no_administrative_privileges",
"iam_customer_attached_policy_no_administrative_privileges",
@@ -61,7 +61,7 @@
"ec2_ebs_public_snapshot",
"ec2_instance_profile_attached",
"ec2_instance_public_ip",
"eks_endpoints_not_publicly_accessible",
"eks_cluster_not_publicly_accessible",
"emr_cluster_master_nodes_no_public_ip",
"iam_aws_attached_policy_no_administrative_privileges",
"iam_customer_attached_policy_no_administrative_privileges",
@@ -102,7 +102,7 @@
"Checks": [
"ec2_ebs_public_snapshot",
"ec2_instance_public_ip",
"eks_endpoints_not_publicly_accessible",
"eks_cluster_not_publicly_accessible",
"emr_cluster_master_nodes_no_public_ip",
"awslambda_function_not_publicly_accessible",
"awslambda_function_url_public",

View File

@@ -971,7 +971,7 @@
"Checks": [
"ec2_ebs_public_snapshot",
"ec2_instance_public_ip",
"eks_endpoints_not_publicly_accessible",
"eks_cluster_not_publicly_accessible",
"emr_cluster_master_nodes_no_public_ip",
"awslambda_function_url_public",
"rds_instance_no_public_access",

View File

@@ -3043,9 +3043,7 @@
{
"Id": "9.4",
"Description": "Ensure that Register with Entra ID is enabled on App Service",
"Checks": [
""
],
"Checks": [],
"Attributes": [
{
"Section": "9. AppService",
@@ -3175,9 +3173,7 @@
{
"Id": "9.10",
"Description": "Ensure Azure Key Vaults are Used to Store Secrets",
"Checks": [
""
],
"Checks": [],
"Attributes": [
{
"Section": "9. AppService",

View File

@@ -12,7 +12,7 @@ from prowler.lib.logger import logger
timestamp = datetime.today()
timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)
prowler_version = "4.5.0"
prowler_version = "4.5.1"
html_logo_url = "https://github.com/prowler-cloud/prowler/"
square_logo_img = "https://prowler.com/wp-content/uploads/logo-html.png"
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"

View File

@@ -1262,7 +1262,9 @@ class AwsProvider(Provider):
logger.critical(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
raise error
if raise_on_exception:
raise error
return Connection(error=error)
@staticmethod
def create_sts_session(

View File

@@ -23,7 +23,9 @@
"Url": "https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-add-availability-zone.html"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -23,7 +23,9 @@
"Url": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/multi-az.html#"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -23,7 +23,9 @@
"Url": "https://redis.io/blog/highly-available-in-memory-cloud-datastores/"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -23,7 +23,9 @@
"Url": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/ElastiCache/elasticache-multi-az.html#"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -23,7 +23,9 @@
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -23,7 +23,9 @@
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-subnets.html"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -1,7 +1,7 @@
{
"Provider": "aws",
"CheckID": "glue_etl_jobs_logging_enabled",
"CheckTitle": "Check if Glue ETL Jobs have logging enabled.",
"CheckTitle": "[DEPRECATED] Check if Glue ETL Jobs have logging enabled.",
"CheckType": [
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
],
@@ -10,7 +10,7 @@
"ResourceIdTemplate": "arn:partition:glue:region:account-id:job/job-name",
"Severity": "medium",
"ResourceType": "AwsGlueJob",
"Description": "Ensure that Glue ETL Jobs have CloudWatch logs enabled.",
"Description": "[DEPRECATED] Ensure that Glue ETL Jobs have CloudWatch logs enabled.",
"Risk": "Without logging enabled, AWS Glue jobs lack visibility into job activities and failures, making it difficult to detect unauthorized access, troubleshoot issues, and ensure compliance. This may result in untracked security incidents or operational issues that affect data processing.",
"RelatedUrl": "https://docs.aws.amazon.com/glue/latest/dg/monitor-continuous-logging.html",
"Remediation": {
@@ -28,5 +28,5 @@
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
"Notes": "This check is being removed since logs for all AWS Glue jobs are now always sent to Amazon CloudWatch."
}

View File

@@ -25,7 +25,9 @@
"Url": "https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/rabbitmq-broker-architecture.html#rabbitmq-broker-architecture-cluster"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -23,7 +23,9 @@
"Url": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-9"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -25,7 +25,9 @@
"Url": "https://aws.amazon.com/es/blogs/networking-and-content-delivery/deployment-models-for-aws-network-firewall/"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -23,7 +23,9 @@
"Url": "https://aws.amazon.com/rds/features/multi-az/"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -0,0 +1,32 @@
{
"Provider": "aws",
"CheckID": "rds_cluster_protected_by_backup_plan",
"CheckTitle": "Check if RDS clusters are protected by a backup plan.",
"CheckType": [
"Software and Configuration Checks, AWS Security Best Practices"
],
"ServiceName": "rds",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rds:region:account-id:db-cluster",
"Severity": "medium",
"ResourceType": "AwsRdsDbInstance",
"Description": "Check if RDS clusters are protected by a backup plan.",
"Risk": "Without a backup plan, RDS clusters are vulnerable to data loss, accidental deletion, or corruption. This could lead to significant operational disruptions or loss of critical data.",
"RelatedUrl": "https://docs.aws.amazon.com/aws-backup/latest/devguide/assigning-resources.html",
"Remediation": {
"Code": {
"CLI": "aws backup create-backup-plan --backup-plan , aws backup tag-resource --resource-arn <rds-cluster-arn> --tags Key=backup,Value=true",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/rds-controls.html#rds-26",
"Terraform": ""
},
"Recommendation": {
"Text": "Create a backup plan for the RDS cluster to protect it from data loss, accidental deletion, or corruption.",
"Url": "https://docs.aws.amazon.com/aws-backup/latest/devguide/assigning-resources.html"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
}

View File

@@ -0,0 +1,33 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.backup.backup_client import backup_client
from prowler.providers.aws.services.rds.rds_client import rds_client
class rds_cluster_protected_by_backup_plan(Check):
def execute(self):
findings = []
for db_cluster_arn, db_cluster in rds_client.db_clusters.items():
report = Check_Report_AWS(self.metadata())
report.region = db_cluster.region
report.resource_id = db_cluster.id
report.resource_arn = db_cluster_arn
report.resource_tags = db_cluster.tags
report.status = "FAIL"
report.status_extended = (
f"RDS Cluster {db_cluster.id} is not protected by a backup plan."
)
if (
db_cluster_arn in backup_client.protected_resources
or f"arn:{rds_client.audited_partition}:rds:*:*:cluster:*"
in backup_client.protected_resources
or "*" in backup_client.protected_resources
):
report.status = "PASS"
report.status_extended = (
f"RDS Cluster {db_cluster.id} is protected by a backup plan."
)
findings.append(report)
return findings

View File

@@ -23,7 +23,9 @@
"Url": "https://aws.amazon.com/rds/features/multi-az/"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -25,7 +25,9 @@
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html"
}
},
"Categories": [],
"Categories": [
"redundancy"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -161,54 +161,54 @@ class Provider(ABC):
if not isinstance(Provider._global, provider_class):
if "aws" in provider_class_name.lower():
provider_class(
arguments.aws_retries_max_attempts,
arguments.role,
arguments.session_duration,
arguments.external_id,
arguments.role_session_name,
arguments.mfa,
arguments.profile,
set(arguments.region) if arguments.region else None,
arguments.organizations_role,
arguments.scan_unused_services,
arguments.resource_tag,
arguments.resource_arn,
arguments.config_file,
arguments.mutelist_file,
retries_max_attempts=arguments.aws_retries_max_attempts,
role_arn=arguments.role,
session_duration=arguments.session_duration,
external_id=arguments.external_id,
role_session_name=arguments.role_session_name,
mfa=arguments.mfa,
profile=arguments.profile,
regions=set(arguments.region) if arguments.region else None,
organizations_role_arn=arguments.organizations_role,
scan_unused_services=arguments.scan_unused_services,
resource_tags=arguments.resource_tag,
resource_arn=arguments.resource_arn,
config_path=arguments.config_file,
mutelist_path=arguments.mutelist_file,
fixer_config=fixer_config,
)
elif "azure" in provider_class_name.lower():
provider_class(
arguments.az_cli_auth,
arguments.sp_env_auth,
arguments.browser_auth,
arguments.managed_identity_auth,
arguments.tenant_id,
arguments.azure_region,
arguments.subscription_id,
arguments.config_file,
arguments.mutelist_file,
az_cli_auth=arguments.az_cli_auth,
sp_env_auth=arguments.sp_env_auth,
browser_auth=arguments.browser_auth,
managed_identity_auth=arguments.managed_identity_auth,
tenant_id=arguments.tenant_id,
region=arguments.azure_region,
subscription_ids=arguments.subscription_id,
config_path=arguments.config_file,
mutelist_path=arguments.mutelist_file,
fixer_config=fixer_config,
)
elif "gcp" in provider_class_name.lower():
provider_class(
arguments.organization_id,
arguments.project_id,
arguments.excluded_project_id,
arguments.credentials_file,
arguments.impersonate_service_account,
arguments.list_project_id,
arguments.config_file,
arguments.mutelist_file,
organization_id=arguments.organization_id,
project_ids=arguments.project_id,
excluded_project_ids=arguments.excluded_project_id,
credentials_file=arguments.credentials_file,
impersonate_service_account=arguments.impersonate_service_account,
list_project_ids=arguments.list_project_id,
config_path=arguments.config_file,
mutelist_path=arguments.mutelist_file,
fixer_config=fixer_config,
)
elif "kubernetes" in provider_class_name.lower():
provider_class(
arguments.kubeconfig_file,
arguments.context,
arguments.namespace,
arguments.config_file,
arguments.mutelist_file,
kubeconfig_file=arguments.kubeconfig_file,
context=arguments.context,
namespace=arguments.namespace,
config_path=arguments.config_file,
mutelist_path=arguments.mutelist_file,
fixer_config=fixer_config,
)

View File

@@ -411,7 +411,7 @@ class GcpProvider(Provider):
@staticmethod
def get_projects(
credentials: Credentials, organization_id: str
credentials: Credentials, organization_id: str = None
) -> dict[str, GCPProject]:
"""
Get the projects accessible by the provided credentials. If an organization ID is provided, only the projects under that organization are returned.

View File

@@ -23,7 +23,7 @@ packages = [
{include = "dashboard"}
]
readme = "README.md"
version = "4.5.0"
version = "4.5.1"
[tool.poetry.dependencies]
alive-progress = "3.1.5"

View File

@@ -1443,6 +1443,18 @@ aws:
)
assert connection.error.code == 1015
@mock_aws
def test_test_connection_generic_exception(self):
with patch(
"prowler.providers.aws.aws_provider.AwsProvider.setup_session",
side_effect=Exception(),
):
connection = AwsProvider.test_connection(raise_on_exception=False)
assert isinstance(connection, Connection)
assert not connection.is_connected
assert isinstance(connection.error, Exception)
@mock_aws
def test_create_sts_session(self):
current_session = session.Session()

View File

@@ -1,35 +1,93 @@
from unittest import mock
from uuid import uuid4
import botocore
import botocore.client
from moto import mock_aws
from tests.providers.aws.utils import (
AWS_ACCOUNT_ARN,
AWS_ACCOUNT_NUMBER,
AWS_REGION_EU_WEST_1,
set_mocked_aws_provider,
)
DETECTOR_ID = str(uuid4())
DETECTOR_ARN = f"arn:aws:guardduty:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:detector/{DETECTOR_ID}"
mock_make_api_call = botocore.client.BaseClient._make_api_call
def mock_make_api_call_create_detector_success(self, operation_name, kwarg):
if operation_name == "CreateDetector":
return {"DetectorId": DETECTOR_ID}
elif operation_name == "GetDetector":
return {"Status": "ENABLED"}
return mock_make_api_call(self, operation_name, kwarg)
def mock_make_api_call_create_detector_failure(self, operation_name, kwarg):
if operation_name == "CreateDetector":
raise botocore.exceptions.ClientError(
{
"Error": {
"Code": "AccessDeniedException",
"Message": "User: arn:aws:iam::012345678901:user/test is not authorized to perform: guardduty:CreateDetector",
}
},
"CreateDetector",
)
return mock_make_api_call(self, operation_name, kwarg)
class Test_guardduty_is_enabled_fixer:
@mock_aws
def test_guardduty_is_enabled_fixer(self):
regional_client = mock.MagicMock()
guardduty_client = mock.MagicMock
guardduty_client.region = AWS_REGION_EU_WEST_1
guardduty_client.detectors = []
guardduty_client.audited_account_arn = AWS_ACCOUNT_ARN
regional_client.create_detector.return_value = None
guardduty_client.regional_clients = {AWS_REGION_EU_WEST_1: regional_client}
with mock.patch(
"prowler.providers.aws.services.guardduty.guardduty_service.GuardDuty",
guardduty_client,
"botocore.client.BaseClient._make_api_call",
new=mock_make_api_call_create_detector_success,
):
from prowler.providers.aws.services.guardduty.guardduty_is_enabled.guardduty_is_enabled_fixer import (
fixer,
from prowler.providers.aws.services.guardduty.guardduty_service import (
GuardDuty,
)
assert fixer(AWS_REGION_EU_WEST_1)
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
), mock.patch(
"prowler.providers.aws.services.guardduty.guardduty_is_enabled.guardduty_is_enabled_fixer.guardduty_client",
new=GuardDuty(aws_provider),
):
from prowler.providers.aws.services.guardduty.guardduty_is_enabled.guardduty_is_enabled_fixer import (
fixer,
)
assert fixer(AWS_REGION_EU_WEST_1)
@mock_aws
def test_guardduty_is_enabled_fixer_failure(self):
with mock.patch(
"botocore.client.BaseClient._make_api_call",
new=mock_make_api_call_create_detector_failure,
):
from prowler.providers.aws.services.guardduty.guardduty_service import (
GuardDuty,
)
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
), mock.patch(
"prowler.providers.aws.services.guardduty.guardduty_is_enabled.guardduty_is_enabled_fixer.guardduty_client",
new=GuardDuty(aws_provider),
):
from prowler.providers.aws.services.guardduty.guardduty_is_enabled.guardduty_is_enabled_fixer import (
fixer,
)
assert not fixer(AWS_REGION_EU_WEST_1)

View File

@@ -0,0 +1,416 @@
from unittest import mock
from moto import mock_aws
from tests.providers.aws.utils import (
AWS_ACCOUNT_NUMBER,
AWS_REGION_US_EAST_1,
set_mocked_aws_provider,
)
class Test_rds_cluster_protected_by_backup_plan:
@mock_aws
def test_rds_no_clusters(self):
from prowler.providers.aws.services.backup.backup_service import Backup
from prowler.providers.aws.services.rds.rds_service import RDS
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.rds_client",
new=RDS(aws_provider),
), mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.backup_client",
new=Backup(aws_provider),
):
# Test Check
from prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan import (
rds_cluster_protected_by_backup_plan,
)
check = rds_cluster_protected_by_backup_plan()
result = check.execute()
assert len(result) == 0
@mock_aws
def test_rds_cluster_no_existing_backup_plans(self):
cluster = mock.MagicMock()
backup = mock.MagicMock()
from prowler.providers.aws.services.rds.rds_service import DBCluster
arn = f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1"
cluster.db_clusters = {
arn: DBCluster(
id="db-cluster-1",
arn=f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1",
endpoint="db-cluster-1.c9akciq32.rds.amazonaws.com",
backtrack=1,
parameter_group="test",
engine_version="13.3",
status="available",
public=False,
encrypted=True,
deletion_protection=False,
auto_minor_version_upgrade=True,
multi_az=False,
username="admin",
iam_auth=False,
name="db-cluster-1",
region="us-east-1",
cluster_class="db.m1.small",
engine="aurora-postgres",
allocated_storage=10,
tags=[],
)
}
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.rds_client",
new=cluster,
), mock.patch(
"prowler.providers.aws.services.rds.rds_client.rds_client",
new=cluster,
), mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.backup_client",
new=backup,
), mock.patch(
"prowler.providers.aws.services.backup.backup_client.backup_client",
new=backup,
):
# Test Check
from prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan import (
rds_cluster_protected_by_backup_plan,
)
check = rds_cluster_protected_by_backup_plan()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "RDS Cluster db-cluster-1 is not protected by a backup plan."
)
assert result[0].resource_id == "db-cluster-1"
assert result[0].region == AWS_REGION_US_EAST_1
assert (
result[0].resource_arn
== f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1"
)
assert result[0].resource_tags == []
def test_rds_cluster_without_backup_plan(self):
cluster = mock.MagicMock()
backup = mock.MagicMock()
from prowler.providers.aws.services.rds.rds_service import DBCluster
arn = f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1"
cluster.db_clusters = {
arn: DBCluster(
id="db-cluster-1",
arn=f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1",
endpoint="db-cluster-1.c9akciq32.rds.amazonaws.com",
backtrack=1,
parameter_group="test",
engine_version="13.3",
status="available",
public=False,
encrypted=True,
deletion_protection=False,
auto_minor_version_upgrade=True,
multi_az=False,
username="admin",
iam_auth=False,
name="db-cluster-1",
region="us-east-1",
cluster_class="db.m1.small",
engine="aurora-postgres",
allocated_storage=10,
tags=[],
)
}
backup.protected_resources = [
f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-master-2"
]
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.rds_client",
new=cluster,
), mock.patch(
"prowler.providers.aws.services.rds.rds_client.rds_client",
new=cluster,
), mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.backup_client",
new=backup,
), mock.patch(
"prowler.providers.aws.services.backup.backup_client.backup_client",
new=backup,
):
# Test Check
from prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan import (
rds_cluster_protected_by_backup_plan,
)
check = rds_cluster_protected_by_backup_plan()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "RDS Cluster db-cluster-1 is not protected by a backup plan."
)
assert result[0].resource_id == "db-cluster-1"
assert result[0].region == AWS_REGION_US_EAST_1
assert (
result[0].resource_arn
== f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1"
)
assert result[0].resource_tags == []
def test_rds_cluster_with_backup_plan(self):
cluster = mock.MagicMock()
from prowler.providers.aws.services.rds.rds_service import DBCluster
arn = f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1"
cluster.db_clusters = {
arn: DBCluster(
id="db-cluster-1",
arn=f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1",
endpoint="db-cluster-1.c9akciq32.rds.amazonaws.com",
backtrack=1,
parameter_group="test",
engine_version="13.3",
status="available",
public=False,
encrypted=True,
deletion_protection=False,
auto_minor_version_upgrade=True,
multi_az=False,
username="admin",
iam_auth=False,
name="db-cluster-1",
region="us-east-1",
cluster_class="db.m1.small",
engine="aurora-postgres",
allocated_storage=10,
tags=[],
)
}
backup = mock.MagicMock()
backup.protected_resources = [arn]
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.rds_client",
new=cluster,
), mock.patch(
"prowler.providers.aws.services.rds.rds_client.rds_client",
new=cluster,
), mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.backup_client",
new=backup,
), mock.patch(
"prowler.providers.aws.services.backup.backup_client.backup_client",
new=backup,
):
# Test Check
from prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan import (
rds_cluster_protected_by_backup_plan,
)
check = rds_cluster_protected_by_backup_plan()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "RDS Cluster db-cluster-1 is protected by a backup plan."
)
assert result[0].resource_id == "db-cluster-1"
assert result[0].region == AWS_REGION_US_EAST_1
assert (
result[0].resource_arn
== f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1"
)
assert result[0].resource_tags == []
def test_rds_cluster_with_backup_plan_via_cluster_wildcard(self):
cluster = mock.MagicMock()
cluster.audited_partition = "aws"
from prowler.providers.aws.services.rds.rds_service import DBCluster
arn = "arn:aws:rds:*:*:cluster:*"
cluster.db_clusters = {
f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1": DBCluster(
id="db-cluster-1",
arn=f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1",
endpoint="db-cluster-1.c9akciq32.rds.amazonaws.com",
backtrack=1,
parameter_group="test",
engine_version="13.3",
status="available",
public=False,
encrypted=True,
deletion_protection=False,
auto_minor_version_upgrade=True,
multi_az=False,
username="admin",
iam_auth=False,
name="db-cluster-1",
region="us-east-1",
cluster_class="db.m1.small",
engine="aurora-postgres",
allocated_storage=10,
tags=[],
)
}
backup = mock.MagicMock()
backup.protected_resources = [arn]
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.rds_client",
new=cluster,
), mock.patch(
"prowler.providers.aws.services.rds.rds_client.rds_client",
new=cluster,
), mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.backup_client",
new=backup,
), mock.patch(
"prowler.providers.aws.services.backup.backup_client.backup_client",
new=backup,
):
# Test Check
from prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan import (
rds_cluster_protected_by_backup_plan,
)
check = rds_cluster_protected_by_backup_plan()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "RDS Cluster db-cluster-1 is protected by a backup plan."
)
assert result[0].resource_id == "db-cluster-1"
assert result[0].region == AWS_REGION_US_EAST_1
assert (
result[0].resource_arn
== f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1"
)
assert result[0].resource_tags == []
def test_rds_cluster_with_backup_plan_via_all_wildcard(self):
cluster = mock.MagicMock()
from prowler.providers.aws.services.rds.rds_service import DBCluster
arn = "*"
cluster.db_clusters = {
f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1": DBCluster(
id="db-cluster-1",
arn=f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1",
endpoint="db-cluster-1.c9akciq32.rds.amazonaws.com",
backtrack=1,
parameter_group="test",
engine_version="13.3",
status="available",
public=False,
encrypted=True,
deletion_protection=False,
auto_minor_version_upgrade=True,
multi_az=False,
username="admin",
iam_auth=False,
name="db-cluster-1",
region="us-east-1",
cluster_class="db.m1.small",
engine="aurora-postgres",
allocated_storage=10,
tags=[],
)
}
backup = mock.MagicMock()
backup.protected_resources = [arn]
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.rds_client",
new=cluster,
), mock.patch(
"prowler.providers.aws.services.rds.rds_client.rds_client",
new=cluster,
), mock.patch(
"prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan.backup_client",
new=backup,
), mock.patch(
"prowler.providers.aws.services.backup.backup_client.backup_client",
new=backup,
):
# Test Check
from prowler.providers.aws.services.rds.rds_cluster_protected_by_backup_plan.rds_cluster_protected_by_backup_plan import (
rds_cluster_protected_by_backup_plan,
)
check = rds_cluster_protected_by_backup_plan()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "RDS Cluster db-cluster-1 is protected by a backup plan."
)
assert result[0].resource_id == "db-cluster-1"
assert result[0].region == AWS_REGION_US_EAST_1
assert (
result[0].resource_arn
== f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:db:db-cluster-1"
)
assert result[0].resource_tags == []