mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-01-25 02:08:11 +00:00
Compare commits
13 Commits
dev-memory
...
4.3.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5256d29605 | ||
|
|
17012ec1a4 | ||
|
|
8461257428 | ||
|
|
26a5ffaf82 | ||
|
|
563ddb3707 | ||
|
|
2c11c3d6f9 | ||
|
|
e050f44d63 | ||
|
|
4fd3405bbf | ||
|
|
a1c2caa745 | ||
|
|
f639dc8bf4 | ||
|
|
35325d9f40 | ||
|
|
71503b553a | ||
|
|
d91a240ea8 |
7
.github/pull_request_template.md
vendored
7
.github/pull_request_template.md
vendored
@@ -2,11 +2,18 @@
|
||||
|
||||
Please include relevant motivation and context for this PR.
|
||||
|
||||
If fixes an issue please add it with `Fix #XXXX`
|
||||
|
||||
### Description
|
||||
|
||||
Please include a summary of the change and which issue is fixed. List any dependencies that are required for this change.
|
||||
|
||||
### Checklist
|
||||
|
||||
- Are there new checks included in this PR? Yes / No
|
||||
- If so, do we need to update permissions for the provider? Please review this carefully.
|
||||
- [ ] Review if the code is being covered by tests.
|
||||
- [ ] Review if code is being documented following this specification https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings
|
||||
|
||||
### License
|
||||
|
||||
|
||||
2
.github/workflows/find-secrets.yml
vendored
2
.github/workflows/find-secrets.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: TruffleHog OSS
|
||||
uses: trufflesecurity/trufflehog@v3.80.2
|
||||
uses: trufflesecurity/trufflehog@3.80.4
|
||||
with:
|
||||
path: ./
|
||||
base: ${{ github.event.repository.default_branch }}
|
||||
|
||||
@@ -10,7 +10,7 @@ Execute Prowler in verbose mode (like in Version 2):
|
||||
prowler <provider> --verbose
|
||||
```
|
||||
## Filter findings by status
|
||||
Prowler can filter the findings by their status:
|
||||
Prowler can filter the findings by their status, so you can see only in the CLI and in the reports the findings with a specific status:
|
||||
```console
|
||||
prowler <provider> --status [PASS, FAIL, MANUAL]
|
||||
```
|
||||
|
||||
@@ -58,22 +58,28 @@ Resources:
|
||||
- 'account:Get*'
|
||||
- 'appstream:Describe*'
|
||||
- 'appstream:List*'
|
||||
- 'backup:List*'
|
||||
- 'cloudtrail:GetInsightSelectors'
|
||||
- 'codeartifact:List*'
|
||||
- 'codebuild:BatchGet*'
|
||||
- 'cognito-idp:GetUserPoolMfaConfig'
|
||||
- 'dlm:Get*'
|
||||
- 'drs:Describe*'
|
||||
- 'ds:Get*'
|
||||
- 'ds:Describe*'
|
||||
- 'ds:List*'
|
||||
- 'dynamodb:GetResourcePolicy'
|
||||
- 'ec2:GetEbsEncryptionByDefault'
|
||||
- 'ec2:GetSnapshotBlockPublicAccessState'
|
||||
- 'ec2:GetInstanceMetadataDefaults'
|
||||
- 'ecr:Describe*'
|
||||
- 'ecr:GetRegistryScanningConfiguration'
|
||||
- 'elasticfilesystem:DescribeBackupPolicy'
|
||||
- 'glue:GetConnections'
|
||||
- 'glue:GetSecurityConfiguration*'
|
||||
- 'glue:SearchTables'
|
||||
- 'lambda:GetFunction*'
|
||||
- 'logs:FilterLogEvents'
|
||||
- 'lightsail:GetRelationalDatabases'
|
||||
- 'macie2:GetMacieSession'
|
||||
- 's3:GetAccountPublicAccessBlock'
|
||||
@@ -82,8 +88,10 @@ Resources:
|
||||
- 'securityhub:BatchImportFindings'
|
||||
- 'securityhub:GetFindings'
|
||||
- 'ssm:GetDocument'
|
||||
- 'ssm-incidents:List*'
|
||||
- 'support:Describe*'
|
||||
- 'tag:GetTagKeys'
|
||||
- 'wellarchitected:List*'
|
||||
Resource: '*'
|
||||
- PolicyName: ProwlerScanRoleAdditionalViewPrivilegesApiGateway
|
||||
PolicyDocument:
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
"ds:List*",
|
||||
"dynamodb:GetResourcePolicy",
|
||||
"ec2:GetEbsEncryptionByDefault",
|
||||
"ec2:GetSnapshotBlockPublicAccessState",
|
||||
"ec2:GetInstanceMetadataDefaults",
|
||||
"ecr:Describe*",
|
||||
"ecr:GetRegistryScanningConfiguration",
|
||||
|
||||
22
poetry.lock
generated
22
poetry.lock
generated
@@ -712,17 +712,17 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "boto3"
|
||||
version = "1.34.149"
|
||||
version = "1.34.151"
|
||||
description = "The AWS SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "boto3-1.34.149-py3-none-any.whl", hash = "sha256:11edeeacdd517bda3b7615b754d8440820cdc9ddd66794cc995a9693ddeaa3be"},
|
||||
{file = "boto3-1.34.149.tar.gz", hash = "sha256:f4e6489ba9dc7fb37d53e0e82dbc97f2cb0a4969ef3970e2c88b8f94023ae81a"},
|
||||
{file = "boto3-1.34.151-py3-none-any.whl", hash = "sha256:35bc76faacf1667d3fbb66c1966acf2230ef26206557efc26d9d9d79337bef43"},
|
||||
{file = "boto3-1.34.151.tar.gz", hash = "sha256:30498a76b6f651ee2af7ae8edc1704379279ab8b91f1a8dd1f4ddf51259b0bc2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
botocore = ">=1.34.149,<1.35.0"
|
||||
botocore = ">=1.34.151,<1.35.0"
|
||||
jmespath = ">=0.7.1,<2.0.0"
|
||||
s3transfer = ">=0.10.0,<0.11.0"
|
||||
|
||||
@@ -731,13 +731,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
|
||||
|
||||
[[package]]
|
||||
name = "botocore"
|
||||
version = "1.34.150"
|
||||
version = "1.34.151"
|
||||
description = "Low-level, data-driven core of boto 3."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "botocore-1.34.150-py3-none-any.whl", hash = "sha256:b988d47f4d502df85befce11a48002421e4e6ea4289997b5e0261bac5fa76ce6"},
|
||||
{file = "botocore-1.34.150.tar.gz", hash = "sha256:4d23387e0f076d87b637a2a35c0ff2b8daca16eace36b63ce27f65630c6b375a"},
|
||||
{file = "botocore-1.34.151-py3-none-any.whl", hash = "sha256:9018680d7d4a8060c26d127ceec5ab5b270879f423ea39b863d8a46f3e34c404"},
|
||||
{file = "botocore-1.34.151.tar.gz", hash = "sha256:0d0968e427a94378f295b49d59170dad539938487ec948de3d030f06092ec6dc"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1586,13 +1586,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
|
||||
|
||||
[[package]]
|
||||
name = "google-api-python-client"
|
||||
version = "2.138.0"
|
||||
version = "2.139.0"
|
||||
description = "Google API Client Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "google_api_python_client-2.138.0-py2.py3-none-any.whl", hash = "sha256:1dd279124e4e77cbda4769ffb4abe7e7c32528ef1e18739320fef2a07b750764"},
|
||||
{file = "google_api_python_client-2.138.0.tar.gz", hash = "sha256:31080fbf0e64687876135cc23d1bec1ca3b80d7702177dd17b04131ea889eb70"},
|
||||
{file = "google_api_python_client-2.139.0-py2.py3-none-any.whl", hash = "sha256:1850a92505d91a82e2ca1635ab2b8dff179f4b67082c2651e1db332e8039840c"},
|
||||
{file = "google_api_python_client-2.139.0.tar.gz", hash = "sha256:ed4bc3abe2c060a87412465b4e8254620bbbc548eefc5388e2c5ff912d36a68b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -4898,4 +4898,4 @@ test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-it
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.9,<3.13"
|
||||
content-hash = "97181474bd8e13193f35529d5a173633b0c14079676c10536839912a136a95e3"
|
||||
content-hash = "324ea427d651cea1513f4a7be9b86f420eb75efcd4e54e7021835e517cd81525"
|
||||
|
||||
@@ -3,7 +3,5 @@ import sys
|
||||
|
||||
from prowler.__main__ import prowler
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
sys.exit(prowler())
|
||||
|
||||
@@ -5,15 +5,6 @@ import sys
|
||||
from os import environ
|
||||
|
||||
from colorama import Fore, Style
|
||||
from prowler.providers.aws.services.ec2.ec2_service import PaginatedDict, PaginatedList
|
||||
import pdb
|
||||
import psutil
|
||||
import os
|
||||
|
||||
def check_memory_usage():
|
||||
process = psutil.Process(os.getpid())
|
||||
memory_info = process.memory_info()
|
||||
return memory_info.rss # Resident Set Size: memory in bytes
|
||||
|
||||
from prowler.config.config import (
|
||||
csv_file_suffix,
|
||||
@@ -79,18 +70,9 @@ from prowler.providers.aws.lib.s3.s3 import S3
|
||||
from prowler.providers.aws.lib.security_hub.security_hub import SecurityHub
|
||||
from prowler.providers.common.provider import Provider
|
||||
from prowler.providers.common.quick_inventory import run_provider_quick_inventory
|
||||
from memory_profiler import profile
|
||||
|
||||
from pympler import asizeof
|
||||
from pympler import tracker
|
||||
from pympler import muppy
|
||||
from pympler import summary
|
||||
import objgraph
|
||||
|
||||
from memory_profiler import profile
|
||||
|
||||
def prowler():
|
||||
#tr = tracker.SummaryTracker()
|
||||
# Parse Arguments
|
||||
# Refactor(CLI)
|
||||
parser = ProwlerArgumentParser()
|
||||
@@ -196,9 +178,7 @@ def prowler():
|
||||
categories,
|
||||
provider,
|
||||
)
|
||||
#pdb.set_trace() # Break
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at checks_to_execute: {memory_usage / (1024 * 1024)} MB")
|
||||
|
||||
# if --list-checks-json, dump a json file and exit
|
||||
if args.list_checks_json:
|
||||
print(list_checks_json(provider, sorted(checks_to_execute)))
|
||||
@@ -213,10 +193,6 @@ def prowler():
|
||||
Provider.set_global_provider(args)
|
||||
global_provider = Provider.get_global_provider()
|
||||
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at global_provider = Provider. __main__.py:217 : {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
|
||||
# Print Provider Credentials
|
||||
if not args.only_logs:
|
||||
global_provider.print_credentials()
|
||||
@@ -266,11 +242,7 @@ def prowler():
|
||||
sys.exit()
|
||||
|
||||
# Execute checks
|
||||
paginated = 0
|
||||
if paginated:
|
||||
findings = PaginatedList()
|
||||
else:
|
||||
findings = []
|
||||
findings = []
|
||||
|
||||
if len(checks_to_execute):
|
||||
findings = execute_checks(
|
||||
@@ -283,9 +255,7 @@ def prowler():
|
||||
logger.error(
|
||||
"There are no checks to execute. Please, check your input arguments"
|
||||
)
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at execute_checks __main__.py:284 {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
|
||||
# Prowler Fixer
|
||||
if global_provider.output_options.fixer:
|
||||
print(f"{Style.BRIGHT}\nRunning Prowler Fixer, please wait...{Style.RESET_ALL}")
|
||||
@@ -337,11 +307,6 @@ def prowler():
|
||||
]
|
||||
|
||||
generated_outputs = {"regular": [], "compliance": []}
|
||||
logger.debug("Output generated")
|
||||
|
||||
#pdb.set_trace() # Break
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at findings_output: {memory_usage / (1024 * 1024)} MB")
|
||||
|
||||
if args.output_formats:
|
||||
for mode in args.output_formats:
|
||||
@@ -358,9 +323,7 @@ def prowler():
|
||||
generated_outputs["regular"].append(csv_output)
|
||||
# Write CSV Finding Object to file
|
||||
csv_output.batch_write_data_to_file()
|
||||
#pdb.set_trace() # Break
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at csv_output_batch_write_data: {memory_usage / (1024 * 1024)} MB")
|
||||
|
||||
if mode == "json-asff":
|
||||
asff_output = ASFF(
|
||||
findings=finding_outputs,
|
||||
@@ -389,9 +352,7 @@ def prowler():
|
||||
html_output.batch_write_data_to_file(
|
||||
provider=global_provider, stats=stats
|
||||
)
|
||||
#pdb.set_trace() # Break
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at html_output_batch_write: {memory_usage / (1024 * 1024)} MB")
|
||||
|
||||
# Compliance Frameworks
|
||||
input_compliance_frameworks = set(
|
||||
global_provider.output_options.output_modes
|
||||
@@ -632,7 +593,6 @@ def prowler():
|
||||
aws_partition=global_provider.identity.partition,
|
||||
aws_session=global_provider.session.current_session,
|
||||
findings=asff_output.data,
|
||||
status=global_provider.output_options.status,
|
||||
send_only_fails=global_provider.output_options.send_sh_only_fails,
|
||||
aws_security_hub_available_regions=security_hub_regions,
|
||||
)
|
||||
@@ -686,22 +646,12 @@ def prowler():
|
||||
print(
|
||||
f"\nDetailed compliance results are in {Fore.YELLOW}{global_provider.output_options.output_directory}/compliance/{Style.RESET_ALL}\n"
|
||||
)
|
||||
# Print the memory usage of the largest objects
|
||||
#all_objects = muppy.get_objects()
|
||||
#sum1 = summary.summarize(all_objects)
|
||||
#summary.print_(sum1)
|
||||
#objgraph.show_most_common_types(limit=20)
|
||||
#objgraph.show_growth()
|
||||
|
||||
|
||||
# If custom checks were passed, remove the modules
|
||||
if checks_folder:
|
||||
remove_custom_checks_module(checks_folder, provider)
|
||||
|
||||
# If there are failed findings exit code 3, except if -z is input
|
||||
#pdb.set_trace() # Break
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at ending: {memory_usage / (1024 * 1024)} MB")
|
||||
if (
|
||||
not args.ignore_exit_code_3
|
||||
and stats["total_fail"] > 0
|
||||
|
||||
@@ -10,7 +10,7 @@ from prowler.lib.logger import logger
|
||||
|
||||
timestamp = datetime.today()
|
||||
timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)
|
||||
prowler_version = "4.3.0"
|
||||
prowler_version = "4.3.1"
|
||||
html_logo_url = "https://github.com/prowler-cloud/prowler/"
|
||||
square_logo_img = "https://prowler.com/wp-content/uploads/logo-html.png"
|
||||
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
|
||||
|
||||
@@ -9,7 +9,6 @@ import traceback
|
||||
from pkgutil import walk_packages
|
||||
from types import ModuleType
|
||||
from typing import Any
|
||||
from memory_profiler import profile
|
||||
|
||||
from alive_progress import alive_bar
|
||||
from colorama import Fore, Style
|
||||
@@ -24,14 +23,6 @@ from prowler.lib.outputs.outputs import report
|
||||
from prowler.lib.utils.utils import open_file, parse_json_file, print_boxes
|
||||
from prowler.providers.common.models import Audit_Metadata
|
||||
|
||||
import pdb
|
||||
import psutil
|
||||
import os
|
||||
|
||||
def check_memory_usage():
|
||||
process = psutil.Process(os.getpid())
|
||||
memory_info = process.memory_info()
|
||||
return memory_info.rss # Resident Set Size: memory in bytes
|
||||
|
||||
# Load all checks metadata
|
||||
def bulk_load_checks_metadata(provider: str) -> dict:
|
||||
@@ -442,14 +433,11 @@ def list_modules(provider: str, service: str):
|
||||
|
||||
|
||||
# Import an input check using its path
|
||||
|
||||
def import_check(check_path: str) -> ModuleType:
|
||||
|
||||
print(f"{check_memory_usage() / (1024 * 1024)} MB : Memory usage before import {check_path}")
|
||||
lib = importlib.import_module(f"{check_path}")
|
||||
print(f"{check_memory_usage() / (1024 * 1024)} MB : Memory usage after import {check_path}")
|
||||
return lib
|
||||
|
||||
|
||||
def run_check(check: Check, verbose: bool = False, only_logs: bool = False) -> list:
|
||||
"""
|
||||
Run the check and return the findings
|
||||
@@ -718,6 +706,14 @@ def execute(
|
||||
check_class, verbose, global_provider.output_options.only_logs
|
||||
)
|
||||
|
||||
# Exclude findings per status
|
||||
if global_provider.output_options.status:
|
||||
check_findings = [
|
||||
finding
|
||||
for finding in check_findings
|
||||
if finding.status in global_provider.output_options.status
|
||||
]
|
||||
|
||||
# Update Audit Status
|
||||
services_executed.add(service)
|
||||
checks_executed.add(check_name)
|
||||
|
||||
@@ -94,7 +94,6 @@ class Check(ABC, Check_Metadata_Model):
|
||||
)
|
||||
# Store it to validate them with Pydantic
|
||||
data = Check_Metadata_Model.parse_file(metadata_file).dict()
|
||||
# data = {}
|
||||
# Calls parents init function
|
||||
super().__init__(**data)
|
||||
# TODO: verify that the CheckID is the same as the filename and classname
|
||||
|
||||
@@ -86,7 +86,6 @@ class Finding(BaseModel):
|
||||
notes: str
|
||||
prowler_version: str = prowler_version
|
||||
|
||||
|
||||
@classmethod
|
||||
def generate_output(
|
||||
cls, provider: Provider, check_output: Check_Report
|
||||
@@ -187,7 +186,7 @@ class Finding(BaseModel):
|
||||
f"prowler-{provider.type}-{check_output.check_metadata.CheckID}-{output_data['account_uid']}-"
|
||||
f"{output_data['region']}-{output_data['resource_name']}"
|
||||
)
|
||||
logger.debug("Generating finding: = %s", output_data["finding_uid"])
|
||||
|
||||
return cls(**output_data)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
|
||||
@@ -25,6 +25,7 @@ def stdout_report(finding, color, verbose, status, fix):
|
||||
)
|
||||
|
||||
|
||||
# TODO: Only pass check_findings, provider.output_options and provider.type
|
||||
def report(check_findings, provider):
|
||||
try:
|
||||
output_options = provider.output_options
|
||||
|
||||
@@ -4,7 +4,6 @@ import sys
|
||||
from argparse import Namespace
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
from boto3 import client, session
|
||||
from boto3.session import Session
|
||||
from botocore.config import Config
|
||||
@@ -50,6 +49,7 @@ from prowler.providers.aws.models import (
|
||||
from prowler.providers.common.models import Audit_Metadata
|
||||
from prowler.providers.common.provider import Provider
|
||||
|
||||
|
||||
class AwsProvider(Provider):
|
||||
_type: str = "aws"
|
||||
_identity: AWSIdentityInfo
|
||||
@@ -62,7 +62,7 @@ class AwsProvider(Provider):
|
||||
_output_options: AWSOutputOptions
|
||||
# TODO: this is not optional, enforce for all providers
|
||||
audit_metadata: Audit_Metadata
|
||||
|
||||
|
||||
def __init__(self, arguments: Namespace):
|
||||
logger.info("Initializing AWS provider ...")
|
||||
######## Parse Arguments
|
||||
|
||||
@@ -2877,6 +2877,8 @@
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"eu-central-1",
|
||||
"eu-north-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
@@ -7658,6 +7660,8 @@
|
||||
"payment-cryptography": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-northeast-1",
|
||||
"eu-central-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-2"
|
||||
|
||||
@@ -22,7 +22,7 @@ class SecurityHub:
|
||||
|
||||
Methods:
|
||||
__init__: Initializes the SecurityHub object with necessary attributes.
|
||||
filter: Filters findings based on region and status, returning a dictionary with findings per region.
|
||||
filter: Filters findings based on region, returning a dictionary with findings per region.
|
||||
verify_enabled_per_region: Verifies and stores enabled regions with SecurityHub clients.
|
||||
batch_send_to_security_hub: Sends findings to Security Hub and returns the count of successfully sent findings.
|
||||
archive_previous_findings: Archives findings that are not present in the current execution.
|
||||
@@ -41,7 +41,6 @@ class SecurityHub:
|
||||
aws_account_id: str,
|
||||
aws_partition: str,
|
||||
findings: list[AWSSecurityFindingFormat] = [],
|
||||
status: list[str] = [],
|
||||
aws_security_hub_available_regions: list[str] = [],
|
||||
send_only_fails: bool = False,
|
||||
) -> "SecurityHub":
|
||||
@@ -50,20 +49,19 @@ class SecurityHub:
|
||||
self._aws_partition = aws_partition
|
||||
|
||||
self._enabled_regions = None
|
||||
self._findings_per_region = None
|
||||
self._findings_per_region = {}
|
||||
|
||||
if aws_security_hub_available_regions:
|
||||
self._enabled_regions = self.verify_enabled_per_region(
|
||||
aws_security_hub_available_regions
|
||||
)
|
||||
if findings and self._enabled_regions:
|
||||
self._findings_per_region = self.filter(findings, send_only_fails, status)
|
||||
self._findings_per_region = self.filter(findings, send_only_fails)
|
||||
|
||||
def filter(
|
||||
self,
|
||||
findings: list[AWSSecurityFindingFormat],
|
||||
send_only_fails: bool,
|
||||
status: list[str],
|
||||
) -> dict:
|
||||
"""
|
||||
Filters the given list of findings based on the provided criteria and returns a dictionary containing findings per region.
|
||||
@@ -71,46 +69,38 @@ class SecurityHub:
|
||||
Args:
|
||||
findings (list[AWSSecurityFindingFormat]): List of findings to filter.
|
||||
send_only_fails (bool): Flag indicating whether to send only findings with status 'FAILED'.
|
||||
status (list[str]): List of valid statuses to filter the findings.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing findings per region after applying the filtering criteria.
|
||||
"""
|
||||
|
||||
findings_per_region = {}
|
||||
try:
|
||||
# Create a key per audited region
|
||||
for region in self._enabled_regions.keys():
|
||||
findings_per_region[region] = []
|
||||
|
||||
# Create a key per audited region
|
||||
for region in self._enabled_regions.keys():
|
||||
findings_per_region[region] = []
|
||||
|
||||
for finding in findings:
|
||||
# We don't send findings to not enabled regions
|
||||
if finding.Resources[0].Region not in findings_per_region:
|
||||
continue
|
||||
|
||||
if (
|
||||
finding.Compliance.Status != "FAILED"
|
||||
or finding.Compliance.Status == "WARNING"
|
||||
) and send_only_fails:
|
||||
continue
|
||||
|
||||
# SecurityHub valid statuses are: PASSED, FAILED, WARNING
|
||||
if status:
|
||||
if finding.Compliance.Status == "PASSED" and "PASS" not in status:
|
||||
continue
|
||||
if finding.Compliance.Status == "FAILED" and "FAIL" not in status:
|
||||
continue
|
||||
# Check muted finding
|
||||
if finding.Compliance.Status == "WARNING":
|
||||
for finding in findings:
|
||||
# We don't send findings to not enabled regions
|
||||
if finding.Resources[0].Region not in findings_per_region:
|
||||
continue
|
||||
|
||||
# Get the finding region
|
||||
# We can do that since the finding always stores just one finding
|
||||
region = finding.Resources[0].Region
|
||||
if (
|
||||
finding.Compliance.Status != "FAILED"
|
||||
or finding.Compliance.Status == "WARNING"
|
||||
) and send_only_fails:
|
||||
continue
|
||||
|
||||
# Include that finding within their region
|
||||
findings_per_region[region].append(finding)
|
||||
# Get the finding region
|
||||
# We can do that since the finding always stores just one finding
|
||||
region = finding.Resources[0].Region
|
||||
|
||||
# Include that finding within their region
|
||||
findings_per_region[region].append(finding)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__} -- [{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
return findings_per_region
|
||||
|
||||
def verify_enabled_per_region(
|
||||
|
||||
@@ -17,7 +17,7 @@ class documentdb_cluster_backup_enabled(Check):
|
||||
report.status_extended = (
|
||||
f"DocumentDB Cluster {cluster.id} does not have backup enabled."
|
||||
)
|
||||
if cluster.backup_retention_period > documentdb_client.audit_config.get(
|
||||
if cluster.backup_retention_period >= documentdb_client.audit_config.get(
|
||||
"minimum_backup_retention_period", 7
|
||||
):
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
|
||||
from prowler.providers.aws.services.ec2.ec2_service import PaginatedDict, PaginatedList
|
||||
|
||||
|
||||
class ec2_instance_internet_facing_with_instance_profile(Check):
|
||||
def execute(self):
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
from datetime import datetime, timezone
|
||||
from pympler import asizeof
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
|
||||
import sys
|
||||
import gc
|
||||
|
||||
|
||||
class ec2_instance_older_than_specific_days(Check):
|
||||
def execute(self):
|
||||
@@ -14,10 +12,6 @@ class ec2_instance_older_than_specific_days(Check):
|
||||
max_ec2_instance_age_in_days = ec2_client.audit_config.get(
|
||||
"max_ec2_instance_age_in_days", 180
|
||||
)
|
||||
size_bytes = asizeof.asizeof(ec2_client.instances)
|
||||
size_mb = size_bytes / (1024 * 1024)
|
||||
print("Size of dictionary:", size_mb, "MB")
|
||||
|
||||
for instance in ec2_client.instances:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = instance.region
|
||||
@@ -37,6 +31,4 @@ class ec2_instance_older_than_specific_days(Check):
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
ec2_client.cleanup()
|
||||
return findings
|
||||
|
||||
@@ -3,430 +3,67 @@ from typing import Optional
|
||||
|
||||
from botocore.client import ClientError
|
||||
from pydantic import BaseModel
|
||||
from pympler import asizeof
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.lib.service.service import AWSService
|
||||
import sys
|
||||
import gc
|
||||
|
||||
import dill as pickle
|
||||
import os
|
||||
import atexit
|
||||
from collections import deque
|
||||
from sys import getsizeof
|
||||
import tempfile
|
||||
|
||||
import boto3
|
||||
from moto import mock_aws
|
||||
from memory_profiler import profile
|
||||
import pdb
|
||||
import psutil
|
||||
import os
|
||||
|
||||
def check_memory_usage():
|
||||
process = psutil.Process(os.getpid())
|
||||
memory_info = process.memory_info()
|
||||
return memory_info.rss # Resident Set Size: memory in bytes
|
||||
|
||||
|
||||
class PaginatedList:
|
||||
instance_counter = 0
|
||||
|
||||
def __init__(self, page_size=1):
|
||||
self.page_size = page_size
|
||||
self.file_paths = []
|
||||
self.cache = {}
|
||||
self.length = 0 # Track the length dynamically
|
||||
self.instance_id = PaginatedList.instance_counter
|
||||
PaginatedList.instance_counter += 1
|
||||
self.temp_dir = tempfile.mkdtemp(prefix=f'paginated_list_{self.instance_id}_', dir='/Users/snaow/repos/prowler')
|
||||
atexit.register(self.cleanup)
|
||||
|
||||
def _save_page(self, page_data, page_num):
|
||||
file_path = os.path.join(self.temp_dir, f'page_{page_num}.pkl')
|
||||
with open(file_path, 'wb') as f:
|
||||
pickle.dump(page_data, f)
|
||||
if page_num >= len(self.file_paths):
|
||||
self.file_paths.append(file_path)
|
||||
else:
|
||||
self.file_paths[page_num] = file_path
|
||||
|
||||
def _load_page(self, page_num):
|
||||
if page_num in self.cache:
|
||||
return self.cache[page_num]
|
||||
with open(self.file_paths[page_num], 'rb') as f:
|
||||
page_data = pickle.load(f)
|
||||
self.cache[page_num] = page_data
|
||||
return page_data
|
||||
|
||||
def __getitem__(self, index):
|
||||
if index < 0 or index >= self.length:
|
||||
raise IndexError('Index out of range')
|
||||
page_num = index // self.page_size
|
||||
page_index = index % self.page_size
|
||||
page_data = self._load_page(page_num)
|
||||
return page_data[page_index]
|
||||
|
||||
def __setitem__(self, index, value):
|
||||
if index < 0 or index >= self.length:
|
||||
raise IndexError('Index out of range')
|
||||
page_num = index // self.page_size
|
||||
page_index = index % self.page_size
|
||||
page_data = self._load_page(page_num)
|
||||
page_data[page_index] = value
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
|
||||
def __delitem__(self, index):
|
||||
if index < 0 or index >= self.length:
|
||||
raise IndexError('Index out of range')
|
||||
page_num = index // self.page_size
|
||||
page_index = index % self.page_size
|
||||
page_data = self._load_page(page_num)
|
||||
del page_data[page_index]
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
self.length -= 1
|
||||
|
||||
# Shift subsequent elements
|
||||
for i in range(index, self.length):
|
||||
next_page_num = (i + 1) // self.page_size
|
||||
next_page_index = (i + 1) % self.page_size
|
||||
if next_page_index == 0:
|
||||
self._save_page(page_data, page_num)
|
||||
page_num = next_page_num
|
||||
page_data = self._load_page(page_num)
|
||||
page_data[page_index] = page_data.pop(next_page_index)
|
||||
page_index = next_page_index
|
||||
|
||||
# Save the last page
|
||||
self._save_page(page_data, page_num)
|
||||
|
||||
# Remove the last page if it's empty
|
||||
if self.length % self.page_size == 0:
|
||||
os.remove(self.file_paths.pop())
|
||||
self.cache.pop(page_num, None)
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def __iter__(self):
|
||||
for page_num in range(len(self.file_paths)):
|
||||
page_data = self._load_page(page_num)
|
||||
for item in page_data:
|
||||
yield item
|
||||
|
||||
def append(self, value):
|
||||
page_num = self.length // self.page_size
|
||||
page_index = self.length % self.page_size
|
||||
if page_num >= len(self.file_paths):
|
||||
self._save_page([], page_num)
|
||||
page_data = self._load_page(page_num)
|
||||
page_data.append(value)
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
self.length += 1
|
||||
|
||||
def extend(self, values):
|
||||
for value in values:
|
||||
self.append(value)
|
||||
|
||||
def remove(self, value):
|
||||
for index, item in enumerate(self):
|
||||
if item == value:
|
||||
del self[index]
|
||||
return
|
||||
raise ValueError(f"{value} not in list")
|
||||
|
||||
def pop(self, index=-1):
|
||||
if self.length == 0:
|
||||
raise IndexError("pop from empty list")
|
||||
if index < 0:
|
||||
index += self.length
|
||||
value = self[index]
|
||||
del self[index]
|
||||
return value
|
||||
|
||||
def clear(self):
|
||||
self.cache.clear()
|
||||
self.file_paths = []
|
||||
self.length = 0
|
||||
|
||||
def index(self, value, start=0, stop=None):
|
||||
if stop is None:
|
||||
stop = self.length
|
||||
for i in range(start, stop):
|
||||
if self[i] == value:
|
||||
return i
|
||||
raise ValueError(f"{value} is not in list")
|
||||
|
||||
def get(self, index, default=None):
|
||||
try:
|
||||
return self[index]
|
||||
except IndexError:
|
||||
return default
|
||||
|
||||
def cleanup(self):
|
||||
for file_path in self.file_paths:
|
||||
if os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
if os.path.exists(self.temp_dir):
|
||||
os.rmdir(self.temp_dir)
|
||||
|
||||
def __del__(self):
|
||||
self.cleanup()
|
||||
|
||||
|
||||
class PaginatedDict:
|
||||
instance_counter = 0
|
||||
|
||||
def __init__(self, page_size=1):
|
||||
self.page_size = page_size
|
||||
self.file_paths = []
|
||||
self.cache = {}
|
||||
self.key_to_page = {}
|
||||
self.length = 0 # Track the number of items
|
||||
self.instance_id = PaginatedDict.instance_counter
|
||||
PaginatedDict.instance_counter += 1
|
||||
self.temp_dir = tempfile.mkdtemp(prefix=f'paginated_dict_{self.instance_id}_', dir='/Users/snaow/repos/prowler')
|
||||
print(f"Temporary directory for instance {self.instance_id}: {self.temp_dir}")
|
||||
atexit.register(self.cleanup)
|
||||
|
||||
def _save_page(self, page_data, page_num):
|
||||
file_path = os.path.join(self.temp_dir, f'page_{page_num}.pkl')
|
||||
with open(file_path, 'wb') as f:
|
||||
pickle.dump(page_data, f)
|
||||
if page_num >= len(self.file_paths):
|
||||
self.file_paths.append(file_path)
|
||||
else:
|
||||
self.file_paths[page_num] = file_path
|
||||
|
||||
def _load_page(self, page_num):
|
||||
if page_num in self.cache:
|
||||
return self.cache[page_num]
|
||||
with open(self.file_paths[page_num], 'rb') as f:
|
||||
page_data = pickle.load(f)
|
||||
self.cache[page_num] = page_data
|
||||
return page_data
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if key in self.key_to_page:
|
||||
page_num = self.key_to_page[key]
|
||||
page_data = self._load_page(page_num)
|
||||
page_data[key] = value
|
||||
else:
|
||||
page_num = self.length // self.page_size
|
||||
if page_num >= len(self.file_paths):
|
||||
self._save_page({}, page_num)
|
||||
page_data = self._load_page(page_num)
|
||||
page_data[key] = value
|
||||
self.key_to_page[key] = page_num
|
||||
self.length += 1
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key not in self.key_to_page:
|
||||
raise KeyError(f"Key {key} not found")
|
||||
page_num = self.key_to_page[key]
|
||||
page_data = self._load_page(page_num)
|
||||
return page_data[key]
|
||||
|
||||
def __delitem__(self, key):
|
||||
if key not in self.key_to_page:
|
||||
raise KeyError(f"Key {key} not found")
|
||||
page_num = self.key_to_page[key]
|
||||
page_data = self._load_page(page_num)
|
||||
del page_data[key]
|
||||
del self.key_to_page[key]
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
self.length -= 1
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def __iter__(self):
|
||||
for page_num in range(len(self.file_paths)):
|
||||
page_data = self._load_page(page_num)
|
||||
for key in page_data:
|
||||
yield key
|
||||
|
||||
def get(self, key, default=None):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def keys(self):
|
||||
for key in self:
|
||||
yield key
|
||||
|
||||
def values(self):
|
||||
for key in self:
|
||||
yield self[key]
|
||||
|
||||
def items(self):
|
||||
for key in self:
|
||||
yield (key, self[key])
|
||||
|
||||
def clear(self):
|
||||
self.cache.clear()
|
||||
self.key_to_page.clear()
|
||||
self.file_paths = []
|
||||
self.length = 0
|
||||
|
||||
def cleanup(self):
|
||||
for file_path in self.file_paths:
|
||||
if os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
if os.path.exists(self.temp_dir):
|
||||
os.rmdir(self.temp_dir)
|
||||
|
||||
def __del__(self):
|
||||
self.cleanup()
|
||||
|
||||
################## EC2
|
||||
|
||||
class EC2(AWSService):
|
||||
|
||||
def __init__(self, provider):
|
||||
# Call AWSService's __init__
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at __init__ ec2_service.py : {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
super().__init__(__class__.__name__, provider)
|
||||
self.account_arn_template = f"arn:{self.audited_partition}:ec2:{self.region}:{self.audited_account}:account"
|
||||
paginated = 1
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at super() ec2_service.py : {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
if paginated:
|
||||
self.instances = PaginatedList()
|
||||
self.security_groups = PaginatedList()
|
||||
self.regions_with_sgs = PaginatedList()
|
||||
self.volumes_with_snapshots = PaginatedDict()
|
||||
self.regions_with_snapshots = PaginatedDict()
|
||||
self.network_acls = PaginatedList()
|
||||
self.snapshots = PaginatedList()
|
||||
self.network_interfaces = PaginatedList()
|
||||
self.images = PaginatedList()
|
||||
self.volumes = PaginatedList()
|
||||
self.attributes_for_regions = PaginatedDict()
|
||||
self.ebs_encryption_by_default = PaginatedList()
|
||||
self.elastic_ips = PaginatedList()
|
||||
self.ebs_block_public_access_snapshots_states = PaginatedList()
|
||||
self.instance_metadata_defaults = PaginatedList()
|
||||
self.launch_templates = PaginatedList()
|
||||
else:
|
||||
self.instances = []
|
||||
self.security_groups = []
|
||||
self.regions_with_sgs = []
|
||||
self.volumes_with_snapshots = {}
|
||||
self.regions_with_snapshots = {}
|
||||
self.network_acls = []
|
||||
self.snapshots = []
|
||||
self.network_interfaces = []
|
||||
self.images = []
|
||||
self.volumes = []
|
||||
self.attributes_for_regions = {}
|
||||
self.ebs_encryption_by_default = []
|
||||
self.elastic_ips = []
|
||||
self.ebs_block_public_access_snapshots_states = []
|
||||
self.instance_metadata_defaults = []
|
||||
self.launch_templates = []
|
||||
|
||||
|
||||
self.instances = []
|
||||
self.__threading_call__(self.__describe_instances__)
|
||||
#self.__describe_instances__(next(iter(self.regional_clients.values())))
|
||||
self.__threading_call__(self.__get_instance_user_data__, self.instances)
|
||||
self.security_groups = []
|
||||
self.regions_with_sgs = []
|
||||
self.__threading_call__(self.__describe_security_groups__)
|
||||
self.network_acls = []
|
||||
self.__threading_call__(self.__describe_network_acls__)
|
||||
self.snapshots = []
|
||||
self.volumes_with_snapshots = {}
|
||||
self.regions_with_snapshots = {}
|
||||
self.__threading_call__(self.__describe_snapshots__)
|
||||
self.__threading_call__(self.__determine_public_snapshots__, self.snapshots)
|
||||
self.network_interfaces = []
|
||||
self.__threading_call__(self.__describe_network_interfaces__)
|
||||
self.images = []
|
||||
self.__threading_call__(self.__describe_images__)
|
||||
self.volumes = []
|
||||
self.__threading_call__(self.__describe_volumes__)
|
||||
self.attributes_for_regions = {}
|
||||
self.__threading_call__(self.__get_resources_for_regions__)
|
||||
self.ebs_encryption_by_default = []
|
||||
self.__threading_call__(self.__get_ebs_encryption_settings__)
|
||||
self.elastic_ips = []
|
||||
self.__threading_call__(self.__describe_ec2_addresses__)
|
||||
self.ebs_block_public_access_snapshots_states = []
|
||||
self.__threading_call__(self.__get_snapshot_block_public_access_state__)
|
||||
self.instance_metadata_defaults = []
|
||||
self.__threading_call__(self.__get_instance_metadata_defaults__)
|
||||
self.launch_templates = []
|
||||
self.__threading_call__(self.__describe_launch_templates)
|
||||
self.__threading_call__(
|
||||
self.__get_launch_template_versions__, self.launch_templates
|
||||
)
|
||||
|
||||
print("MY DICT---<>")
|
||||
print(list(self.instances))
|
||||
def cleanup(self):
|
||||
del self.instances
|
||||
del self.security_groups
|
||||
del self.regions_with_sgs
|
||||
del self.volumes_with_snapshots
|
||||
del self.regions_with_snapshots
|
||||
del self.network_acls
|
||||
del self.snapshots
|
||||
del self.network_interfaces
|
||||
del self.images
|
||||
del self.volumes
|
||||
del self.attributes_for_regions
|
||||
del self.ebs_encryption_by_default
|
||||
del self.elastic_ips
|
||||
del self.ebs_block_public_access_snapshots_states
|
||||
del self.instance_metadata_defaults
|
||||
del self.launch_templates
|
||||
gc.collect()
|
||||
|
||||
def __get_volume_arn_template__(self, region):
|
||||
return (
|
||||
f"arn:{self.audited_partition}:ec2:{region}:{self.audited_account}:volume"
|
||||
)
|
||||
|
||||
#@mock_aws
|
||||
|
||||
def __describe_instances__(self, regional_client):
|
||||
try:
|
||||
mock_enabled = 0
|
||||
if mock_enabled:
|
||||
ec2 = boto3.resource('ec2', region_name='eu-west-1')
|
||||
instances = []
|
||||
counter = 0
|
||||
|
||||
instance = ec2.create_instances(
|
||||
ImageId='ami-12345678', # Example AMI ID, replace with a valid one if testing with real AWS
|
||||
MinCount=3000,
|
||||
MaxCount=3000,
|
||||
InstanceType='t2.micro'
|
||||
)[0]
|
||||
instance.wait_until_running()
|
||||
instance.reload()
|
||||
|
||||
describe_instances_paginator = regional_client.get_paginator(
|
||||
"describe_instances"
|
||||
)
|
||||
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at regional_client.get_paginator ({regional_client.region}) : {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
describe_instances_paginator_iterator = describe_instances_paginator.paginate(PaginationConfig={'MaxItems': 1})
|
||||
#describe_instances_paginator_iterator = describe_instances_paginator.paginate()
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at describe_instances_paginator.paginate() ({regional_client.region}) : {memory_usage / (1024 * 1024)} MB")
|
||||
|
||||
for page in describe_instances_paginator_iterator:
|
||||
size_bytes = asizeof.asizeof(page)
|
||||
size_mb = size_bytes / (1024 * 1024)
|
||||
print("\tMemory usage of page", size_mb, "MB")
|
||||
#for page in describe_instances_paginator.paginate():
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"\tMemory usage at describe_instances_paginator.paginate() start : {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
for page in describe_instances_paginator.paginate():
|
||||
for reservation in page["Reservations"]:
|
||||
for instance in reservation["Instances"]:
|
||||
arn = f"arn:{self.audited_partition}:ec2:{regional_client.region}:{self.audited_account}:instance/{instance['InstanceId']}"
|
||||
#print(arn)
|
||||
if not self.audit_resources or (
|
||||
is_resource_filtered(arn, self.audit_resources)
|
||||
):
|
||||
@@ -461,17 +98,6 @@ class EC2(AWSService):
|
||||
tags=instance.get("Tags"),
|
||||
)
|
||||
)
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"\t\tMemory usage at self.instances.append : {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"\tMemory usage at describe_instances_paginator.paginate() end : {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage at the end of describe_instances_paginator ({regional_client.region}): {memory_usage / (1024 * 1024)} MB")
|
||||
|
||||
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -482,8 +108,7 @@ class EC2(AWSService):
|
||||
describe_security_groups_paginator = regional_client.get_paginator(
|
||||
"describe_security_groups"
|
||||
)
|
||||
describe_security_groups_iterator = describe_security_groups_paginator.paginate(PaginationConfig={'MaxItems': 1})
|
||||
for page in describe_security_groups_iterator:
|
||||
for page in describe_security_groups_paginator.paginate():
|
||||
for sg in page["SecurityGroups"]:
|
||||
arn = f"arn:{self.audited_partition}:ec2:{regional_client.region}:{self.audited_account}:security-group/{sg['GroupId']}"
|
||||
if not self.audit_resources or (
|
||||
@@ -510,9 +135,6 @@ class EC2(AWSService):
|
||||
)
|
||||
if sg["GroupName"] != "default":
|
||||
self.regions_with_sgs.append(regional_client.region)
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage after __describe_security_groups__: {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -523,7 +145,7 @@ class EC2(AWSService):
|
||||
describe_network_acls_paginator = regional_client.get_paginator(
|
||||
"describe_network_acls"
|
||||
)
|
||||
for page in describe_network_acls_paginator.paginate(PaginationConfig={'MaxItems': 1}):
|
||||
for page in describe_network_acls_paginator.paginate():
|
||||
for nacl in page["NetworkAcls"]:
|
||||
arn = f"arn:{self.audited_partition}:ec2:{regional_client.region}:{self.audited_account}:network-acl/{nacl['NetworkAclId']}"
|
||||
if not self.audit_resources or (
|
||||
@@ -543,9 +165,6 @@ class EC2(AWSService):
|
||||
tags=nacl.get("Tags"),
|
||||
)
|
||||
)
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage after __describe_network_acls__: {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -579,9 +198,6 @@ class EC2(AWSService):
|
||||
self.volumes_with_snapshots[snapshot["VolumeId"]] = True
|
||||
# Store that the region has at least one snapshot
|
||||
self.regions_with_snapshots[regional_client.region] = snapshots_in_region
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage after describe_snapshots: {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -640,9 +256,6 @@ class EC2(AWSService):
|
||||
self.__add_network_interfaces_to_security_groups__(
|
||||
eni, interface.get("Groups", [])
|
||||
)
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage after network_interfaces: {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
@@ -670,9 +283,6 @@ class EC2(AWSService):
|
||||
)["UserData"]
|
||||
if "Value" in user_data:
|
||||
instance.user_data = user_data["Value"]
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage after __get_instance_user_data__: {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "InvalidInstanceID.NotFound":
|
||||
logger.warning(
|
||||
@@ -700,9 +310,6 @@ class EC2(AWSService):
|
||||
tags=image.get("Tags"),
|
||||
)
|
||||
)
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage after __describe_images__: {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -728,9 +335,6 @@ class EC2(AWSService):
|
||||
tags=volume.get("Tags"),
|
||||
)
|
||||
)
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage after __describe_volumes__: {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -762,9 +366,6 @@ class EC2(AWSService):
|
||||
tags=address.get("Tags"),
|
||||
)
|
||||
)
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage after __describe_ec2_addresses__: {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -825,9 +426,6 @@ class EC2(AWSService):
|
||||
region=regional_client.region,
|
||||
)
|
||||
)
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage after __get_instance_metadata_defaults__: {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -855,9 +453,6 @@ class EC2(AWSService):
|
||||
"has_snapshots": has_snapshots,
|
||||
"has_volumes": has_volumes,
|
||||
}
|
||||
memory_usage = check_memory_usage()
|
||||
print(f"Memory usage after __get_resources_for_regions__: {memory_usage / (1024 * 1024)} MB")
|
||||
#pdb.set_trace() # Break
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
|
||||
@@ -15,23 +15,27 @@ class ecr_repositories_scan_vulnerabilities_in_latest_image(Check):
|
||||
for repository in registry.repositories:
|
||||
# First check if the repository has images
|
||||
if len(repository.images_details) > 0:
|
||||
# We only want to check the latest image pushed
|
||||
# We only want to check the latest image pushed that is scannable
|
||||
image = repository.images_details[-1]
|
||||
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = repository.region
|
||||
report.resource_id = repository.name
|
||||
report.resource_arn = repository.arn
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned without findings."
|
||||
status_extended_prefix = f"ECR repository '{repository.name}' has scanned the {image.type} container image with digest '{image.latest_digest}' and tag '{image.latest_tag}' "
|
||||
report.status_extended = (
|
||||
status_extended_prefix + "without findings."
|
||||
)
|
||||
if not image.scan_findings_status:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} without a scan."
|
||||
report.status_extended = (
|
||||
status_extended_prefix + "without a scan."
|
||||
)
|
||||
elif image.scan_findings_status == "FAILED":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"ECR repository {repository.name} with scan status FAILED."
|
||||
status_extended_prefix + "with scan status FAILED."
|
||||
)
|
||||
elif (
|
||||
image.scan_findings_status != "FAILED"
|
||||
@@ -42,20 +46,29 @@ class ecr_repositories_scan_vulnerabilities_in_latest_image(Check):
|
||||
and image.scan_findings_severity_count.critical
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}."
|
||||
report.status_extended = (
|
||||
status_extended_prefix
|
||||
+ f"with findings: CRITICAL->{image.scan_findings_severity_count.critical}."
|
||||
)
|
||||
elif minimum_severity == "HIGH" and (
|
||||
image.scan_findings_severity_count.critical
|
||||
or image.scan_findings_severity_count.high
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}."
|
||||
report.status_extended = (
|
||||
status_extended_prefix
|
||||
+ f"with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}."
|
||||
)
|
||||
elif minimum_severity == "MEDIUM" and (
|
||||
image.scan_findings_severity_count.critical
|
||||
or image.scan_findings_severity_count.high
|
||||
or image.scan_findings_severity_count.medium
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}, MEDIUM->{image.scan_findings_severity_count.medium}."
|
||||
report.status_extended = (
|
||||
status_extended_prefix
|
||||
+ f"with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}, MEDIUM->{image.scan_findings_severity_count.medium}."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -17,14 +17,14 @@ class ECR(AWSService):
|
||||
super().__init__(__class__.__name__, provider)
|
||||
self.registry_id = self.audited_account
|
||||
self.registries = {}
|
||||
self.__threading_call__(self.__describe_registries_and_repositories__)
|
||||
self.__threading_call__(self.__describe_repository_policies__)
|
||||
self.__threading_call__(self.__get_image_details__)
|
||||
self.__threading_call__(self.__get_repository_lifecycle_policy__)
|
||||
self.__threading_call__(self.__get_registry_scanning_configuration__)
|
||||
self.__threading_call__(self.__list_tags_for_resource__)
|
||||
self.__threading_call__(self._describe_registries_and_repositories)
|
||||
self.__threading_call__(self._describe_repository_policies)
|
||||
self.__threading_call__(self._get_image_details)
|
||||
self.__threading_call__(self._get_repository_lifecycle_policy)
|
||||
self.__threading_call__(self._get_registry_scanning_configuration)
|
||||
self.__threading_call__(self._list_tags_for_resource)
|
||||
|
||||
def __describe_registries_and_repositories__(self, regional_client):
|
||||
def _describe_registries_and_repositories(self, regional_client):
|
||||
logger.info("ECR - Describing registries and repositories...")
|
||||
regional_registry_repositories = []
|
||||
try:
|
||||
@@ -64,7 +64,7 @@ class ECR(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_repository_policies__(self, regional_client):
|
||||
def _describe_repository_policies(self, regional_client):
|
||||
logger.info("ECR - Describing repository policies...")
|
||||
try:
|
||||
if regional_client.region in self.registries:
|
||||
@@ -91,7 +91,7 @@ class ECR(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_repository_lifecycle_policy__(self, regional_client):
|
||||
def _get_repository_lifecycle_policy(self, regional_client):
|
||||
logger.info("ECR - Getting repository lifecycle policy...")
|
||||
try:
|
||||
if regional_client.region in self.registries:
|
||||
@@ -119,7 +119,7 @@ class ECR(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_image_details__(self, regional_client):
|
||||
def _get_image_details(self, regional_client):
|
||||
logger.info("ECR - Getting images details...")
|
||||
try:
|
||||
if regional_client.region in self.registries:
|
||||
@@ -139,55 +139,108 @@ class ECR(AWSService):
|
||||
# The following condition is required since sometimes
|
||||
# the AWS ECR API returns None using the iterator
|
||||
if image is not None:
|
||||
severity_counts = None
|
||||
last_scan_status = None
|
||||
if "imageScanStatus" in image:
|
||||
last_scan_status = image["imageScanStatus"][
|
||||
"status"
|
||||
]
|
||||
|
||||
if "imageScanFindingsSummary" in image:
|
||||
severity_counts = FindingSeverityCounts(
|
||||
critical=0, high=0, medium=0
|
||||
)
|
||||
finding_severity_counts = image[
|
||||
artifact_media_type = image.get(
|
||||
"artifactMediaType", None
|
||||
)
|
||||
tags = image.get("imageTags", [])
|
||||
if ECR._is_artifact_scannable(
|
||||
artifact_media_type, tags
|
||||
):
|
||||
severity_counts = None
|
||||
last_scan_status = None
|
||||
image_digest = image.get("imageDigest")
|
||||
latest_tag = image.get("imageTags", ["None"])[0]
|
||||
image_pushed_at = image.get("imagePushedAt")
|
||||
image_scan_findings_field_name = (
|
||||
"imageScanFindingsSummary"
|
||||
]["findingSeverityCounts"]
|
||||
if "CRITICAL" in finding_severity_counts:
|
||||
severity_counts.critical = (
|
||||
finding_severity_counts["CRITICAL"]
|
||||
)
|
||||
if "HIGH" in finding_severity_counts:
|
||||
severity_counts.high = (
|
||||
finding_severity_counts["HIGH"]
|
||||
)
|
||||
if "MEDIUM" in finding_severity_counts:
|
||||
severity_counts.medium = (
|
||||
finding_severity_counts["MEDIUM"]
|
||||
)
|
||||
latest_tag = "None"
|
||||
if image.get("imageTags"):
|
||||
latest_tag = image["imageTags"][0]
|
||||
repository.images_details.append(
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
image_pushed_at=image["imagePushedAt"],
|
||||
latest_digest=image["imageDigest"],
|
||||
scan_findings_status=last_scan_status,
|
||||
scan_findings_severity_count=severity_counts,
|
||||
)
|
||||
)
|
||||
# Sort the repository images by date pushed
|
||||
repository.images_details.sort(
|
||||
key=lambda image: image.image_pushed_at
|
||||
)
|
||||
if "docker" in artifact_media_type:
|
||||
type = "Docker"
|
||||
elif "oci" in artifact_media_type:
|
||||
type = "OCI"
|
||||
else:
|
||||
type = ""
|
||||
|
||||
# If imageScanStatus is not present or imageScanFindingsSummary is missing,
|
||||
# we need to call DescribeImageScanFindings because AWS' new version of
|
||||
# basic scanning does not support imageScanFindingsSummary and imageScanStatus
|
||||
# in the DescribeImages API.
|
||||
if "imageScanStatus" not in image:
|
||||
try:
|
||||
# use "image" for scan findings to get data the same way as for an image
|
||||
image = (
|
||||
client.describe_image_scan_findings(
|
||||
registryId=self.registries[
|
||||
regional_client.region
|
||||
].id,
|
||||
repositoryName=repository.name,
|
||||
imageId={
|
||||
"imageDigest": image_digest
|
||||
},
|
||||
)
|
||||
)
|
||||
image_scan_findings_field_name = (
|
||||
"imageScanFindings"
|
||||
)
|
||||
except (
|
||||
client.exceptions.ImageNotFoundException
|
||||
) as error:
|
||||
logger.warning(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
continue
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
continue
|
||||
|
||||
if "imageScanStatus" in image:
|
||||
last_scan_status = image["imageScanStatus"][
|
||||
"status"
|
||||
]
|
||||
|
||||
if image_scan_findings_field_name in image:
|
||||
severity_counts = FindingSeverityCounts(
|
||||
critical=0, high=0, medium=0
|
||||
)
|
||||
finding_severity_counts = image[
|
||||
image_scan_findings_field_name
|
||||
]["findingSeverityCounts"]
|
||||
severity_counts.critical = (
|
||||
finding_severity_counts.get(
|
||||
"CRITICAL", 0
|
||||
)
|
||||
)
|
||||
severity_counts.high = (
|
||||
finding_severity_counts.get("HIGH", 0)
|
||||
)
|
||||
severity_counts.medium = (
|
||||
finding_severity_counts.get("MEDIUM", 0)
|
||||
)
|
||||
|
||||
repository.images_details.append(
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
image_pushed_at=image_pushed_at,
|
||||
latest_digest=image_digest,
|
||||
scan_findings_status=last_scan_status,
|
||||
scan_findings_severity_count=severity_counts,
|
||||
artifact_media_type=artifact_media_type,
|
||||
type=type,
|
||||
)
|
||||
)
|
||||
# Sort the repository images by date pushed
|
||||
repository.images_details.sort(
|
||||
key=lambda image: image.image_pushed_at
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __list_tags_for_resource__(self, regional_client):
|
||||
def _list_tags_for_resource(self, regional_client):
|
||||
logger.info("ECR - List Tags...")
|
||||
try:
|
||||
if regional_client.region in self.registries:
|
||||
@@ -215,7 +268,7 @@ class ECR(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_registry_scanning_configuration__(self, regional_client):
|
||||
def _get_registry_scanning_configuration(self, regional_client):
|
||||
logger.info("ECR - Getting Registry Scanning Configuration...")
|
||||
try:
|
||||
if regional_client.region in self.registries:
|
||||
@@ -251,6 +304,44 @@ class ECR(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _is_artifact_scannable(artifact_media_type: str, tags: list[str] = []) -> bool:
|
||||
"""
|
||||
Check if an artifact is scannable based on its media type and tags.
|
||||
|
||||
Args:
|
||||
artifact_media_type (str): The media type of the artifact.
|
||||
tags (list): The list of tags associated with the artifact.
|
||||
|
||||
Returns:
|
||||
bool: True if the artifact is scannable, False otherwise.
|
||||
"""
|
||||
try:
|
||||
if artifact_media_type is None:
|
||||
return False
|
||||
|
||||
# Tools like GoogleContainerTools/jib uses `application/vnd.oci.image.config.v1+json`` also for signatures, which are not scannable.
|
||||
# Luckily, these are tagged with sha-<HASH-CODE>.sig, so that they can still be easily recognized.
|
||||
for tag in tags:
|
||||
if tag.startswith("sha256-") and tag.endswith(".sig"):
|
||||
return False
|
||||
|
||||
scannable_artifact_media_types = [
|
||||
"application/vnd.docker.container.image.v1+json", # Docker image configuration
|
||||
"application/vnd.docker.image.rootfs.diff.tar", # Docker image layer as a tar archive
|
||||
"application/vnd.docker.image.rootfs.diff.tar.gzip", # Docker image layer that is compressed using gzip
|
||||
"application/vnd.oci.image.config.v1+json", # OCI image configuration, but also used by GoogleContainerTools/jib for signatures
|
||||
"application/vnd.oci.image.layer.v1.tar", # Uncompressed OCI image layer
|
||||
"application/vnd.oci.image.layer.v1.tar+gzip", # Compressed OCI image layer
|
||||
]
|
||||
|
||||
return artifact_media_type in scannable_artifact_media_types
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
class FindingSeverityCounts(BaseModel):
|
||||
critical: int
|
||||
@@ -264,6 +355,8 @@ class ImageDetails(BaseModel):
|
||||
image_pushed_at: datetime
|
||||
scan_findings_status: Optional[str]
|
||||
scan_findings_severity_count: Optional[FindingSeverityCounts]
|
||||
artifact_media_type: Optional[str]
|
||||
type: str
|
||||
|
||||
|
||||
class Repository(BaseModel):
|
||||
|
||||
@@ -15,7 +15,7 @@ class neptune_cluster_backup_enabled(Check):
|
||||
report.status_extended = (
|
||||
f"Neptune Cluster {cluster.name} does not have backup enabled."
|
||||
)
|
||||
if cluster.backup_retention_period > neptune_client.audit_config.get(
|
||||
if cluster.backup_retention_period >= neptune_client.audit_config.get(
|
||||
"minimum_backup_retention_period", 7
|
||||
):
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.s3.s3_client import s3_client
|
||||
import importlib
|
||||
import sys
|
||||
import gc
|
||||
|
||||
|
||||
class s3_bucket_object_lock(Check):
|
||||
def execute(self):
|
||||
@@ -25,7 +23,4 @@ class s3_bucket_object_lock(Check):
|
||||
)
|
||||
findings.append(report)
|
||||
|
||||
|
||||
del sys.modules['prowler.providers.aws.services.s3.s3_client']
|
||||
gc.collect()
|
||||
return findings
|
||||
|
||||
@@ -7,273 +7,8 @@ from pydantic import BaseModel
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.lib.service.service import AWSService
|
||||
import os
|
||||
import dill as pickle
|
||||
import os
|
||||
import atexit
|
||||
from collections import deque
|
||||
from sys import getsizeof
|
||||
import tempfile
|
||||
|
||||
|
||||
class PaginatedList:
|
||||
instance_counter = 0
|
||||
|
||||
def __init__(self, page_size=10):
|
||||
self.page_size = page_size
|
||||
self.file_paths = []
|
||||
self.cache = {}
|
||||
self.length = 0 # Track the length dynamically
|
||||
self.instance_id = PaginatedList.instance_counter
|
||||
PaginatedList.instance_counter += 1
|
||||
self.temp_dir = tempfile.mkdtemp(prefix=f'paginated_list_{self.instance_id}_', dir='/Users/snaow/repos/prowler')
|
||||
atexit.register(self.cleanup)
|
||||
|
||||
def _save_page(self, page_data, page_num):
|
||||
file_path = os.path.join(self.temp_dir, f'page_{page_num}.pkl')
|
||||
with open(file_path, 'wb') as f:
|
||||
pickle.dump(page_data, f)
|
||||
if page_num >= len(self.file_paths):
|
||||
self.file_paths.append(file_path)
|
||||
else:
|
||||
self.file_paths[page_num] = file_path
|
||||
|
||||
def _load_page(self, page_num):
|
||||
if page_num in self.cache:
|
||||
return self.cache[page_num]
|
||||
with open(self.file_paths[page_num], 'rb') as f:
|
||||
page_data = pickle.load(f)
|
||||
self.cache[page_num] = page_data
|
||||
return page_data
|
||||
|
||||
def __getitem__(self, index):
|
||||
if index < 0 or index >= self.length:
|
||||
raise IndexError('Index out of range')
|
||||
page_num = index // self.page_size
|
||||
page_index = index % self.page_size
|
||||
page_data = self._load_page(page_num)
|
||||
return page_data[page_index]
|
||||
|
||||
def __setitem__(self, index, value):
|
||||
if index < 0 or index >= self.length:
|
||||
raise IndexError('Index out of range')
|
||||
page_num = index // self.page_size
|
||||
page_index = index % self.page_size
|
||||
page_data = self._load_page(page_num)
|
||||
page_data[page_index] = value
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
|
||||
def __delitem__(self, index):
|
||||
if index < 0 or index >= self.length:
|
||||
raise IndexError('Index out of range')
|
||||
page_num = index // self.page_size
|
||||
page_index = index % self.page_size
|
||||
page_data = self._load_page(page_num)
|
||||
del page_data[page_index]
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
self.length -= 1
|
||||
|
||||
# Shift subsequent elements
|
||||
for i in range(index, self.length):
|
||||
next_page_num = (i + 1) // self.page_size
|
||||
next_page_index = (i + 1) % self.page_size
|
||||
if next_page_index == 0:
|
||||
self._save_page(page_data, page_num)
|
||||
page_num = next_page_num
|
||||
page_data = self._load_page(page_num)
|
||||
page_data[page_index] = page_data.pop(next_page_index)
|
||||
page_index = next_page_index
|
||||
|
||||
# Save the last page
|
||||
self._save_page(page_data, page_num)
|
||||
|
||||
# Remove the last page if it's empty
|
||||
if self.length % self.page_size == 0:
|
||||
os.remove(self.file_paths.pop())
|
||||
self.cache.pop(page_num, None)
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def __iter__(self):
|
||||
for page_num in range(len(self.file_paths)):
|
||||
page_data = self._load_page(page_num)
|
||||
for item in page_data:
|
||||
yield item
|
||||
|
||||
def append(self, value):
|
||||
page_num = self.length // self.page_size
|
||||
page_index = self.length % self.page_size
|
||||
if page_num >= len(self.file_paths):
|
||||
self._save_page([], page_num)
|
||||
page_data = self._load_page(page_num)
|
||||
page_data.append(value)
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
self.length += 1
|
||||
|
||||
def extend(self, values):
|
||||
for value in values:
|
||||
self.append(value)
|
||||
|
||||
def remove(self, value):
|
||||
for index, item in enumerate(self):
|
||||
if item == value:
|
||||
del self[index]
|
||||
return
|
||||
raise ValueError(f"{value} not in list")
|
||||
|
||||
def pop(self, index=-1):
|
||||
if self.length == 0:
|
||||
raise IndexError("pop from empty list")
|
||||
if index < 0:
|
||||
index += self.length
|
||||
value = self[index]
|
||||
del self[index]
|
||||
return value
|
||||
|
||||
def clear(self):
|
||||
self.cache.clear()
|
||||
self.file_paths = []
|
||||
self.length = 0
|
||||
|
||||
def index(self, value, start=0, stop=None):
|
||||
if stop is None:
|
||||
stop = self.length
|
||||
for i in range(start, stop):
|
||||
if self[i] == value:
|
||||
return i
|
||||
raise ValueError(f"{value} is not in list")
|
||||
|
||||
def get(self, index, default=None):
|
||||
try:
|
||||
return self[index]
|
||||
except IndexError:
|
||||
return default
|
||||
|
||||
def cleanup(self):
|
||||
if hasattr(self, 'file_paths'):
|
||||
for file_path in self.file_paths:
|
||||
if os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
if os.path.exists(self.temp_dir):
|
||||
os.rmdir(self.temp_dir)
|
||||
|
||||
def __del__(self):
|
||||
self.cleanup()
|
||||
|
||||
|
||||
class PaginatedDict:
|
||||
instance_counter = 0
|
||||
|
||||
def __init__(self, page_size=1):
|
||||
self.page_size = page_size
|
||||
self.file_paths = []
|
||||
self.cache = {}
|
||||
self.key_to_page = {}
|
||||
self.length = 0 # Track the number of items
|
||||
self.instance_id = PaginatedDict.instance_counter
|
||||
PaginatedDict.instance_counter += 1
|
||||
self.temp_dir = tempfile.mkdtemp(prefix=f'paginated_dict_{self.instance_id}_', dir='/Users/snaow/repos/prowler')
|
||||
print(f"Temporary directory for instance {self.instance_id}: {self.temp_dir}")
|
||||
atexit.register(self.cleanup)
|
||||
|
||||
def _save_page(self, page_data, page_num):
|
||||
file_path = os.path.join(self.temp_dir, f'page_{page_num}.pkl')
|
||||
with open(file_path, 'wb') as f:
|
||||
pickle.dump(page_data, f)
|
||||
if page_num >= len(self.file_paths):
|
||||
self.file_paths.append(file_path)
|
||||
else:
|
||||
self.file_paths[page_num] = file_path
|
||||
|
||||
def _load_page(self, page_num):
|
||||
if page_num in self.cache:
|
||||
return self.cache[page_num]
|
||||
with open(self.file_paths[page_num], 'rb') as f:
|
||||
page_data = pickle.load(f)
|
||||
self.cache[page_num] = page_data
|
||||
return page_data
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if key in self.key_to_page:
|
||||
page_num = self.key_to_page[key]
|
||||
page_data = self._load_page(page_num)
|
||||
page_data[key] = value
|
||||
else:
|
||||
page_num = self.length // self.page_size
|
||||
if page_num >= len(self.file_paths):
|
||||
self._save_page({}, page_num)
|
||||
page_data = self._load_page(page_num)
|
||||
page_data[key] = value
|
||||
self.key_to_page[key] = page_num
|
||||
self.length += 1
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key not in self.key_to_page:
|
||||
raise KeyError(f"Key {key} not found")
|
||||
page_num = self.key_to_page[key]
|
||||
page_data = self._load_page(page_num)
|
||||
return page_data[key]
|
||||
|
||||
def __delitem__(self, key):
|
||||
if key not in self.key_to_page:
|
||||
raise KeyError(f"Key {key} not found")
|
||||
page_num = self.key_to_page[key]
|
||||
page_data = self._load_page(page_num)
|
||||
del page_data[key]
|
||||
del self.key_to_page[key]
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
self.length -= 1
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def __iter__(self):
|
||||
for page_num in range(len(self.file_paths)):
|
||||
page_data = self._load_page(page_num)
|
||||
for key in page_data:
|
||||
yield key
|
||||
|
||||
def get(self, key, default=None):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def keys(self):
|
||||
for key in self:
|
||||
yield key
|
||||
|
||||
def values(self):
|
||||
for key in self:
|
||||
yield self[key]
|
||||
|
||||
def items(self):
|
||||
for key in self:
|
||||
yield (key, self[key])
|
||||
|
||||
def clear(self):
|
||||
self.cache.clear()
|
||||
self.key_to_page.clear()
|
||||
self.file_paths = []
|
||||
self.length = 0
|
||||
|
||||
def cleanup(self):
|
||||
for file_path in self.file_paths:
|
||||
if os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
if os.path.exists(self.temp_dir):
|
||||
os.rmdir(self.temp_dir)
|
||||
|
||||
def __del__(self):
|
||||
self.cleanup()
|
||||
|
||||
################## S3
|
||||
class S3(AWSService):
|
||||
def __init__(self, provider):
|
||||
@@ -292,13 +27,9 @@ class S3(AWSService):
|
||||
self.__threading_call__(self.__get_object_lock_configuration__, self.buckets)
|
||||
self.__threading_call__(self.__get_bucket_tagging__, self.buckets)
|
||||
|
||||
def cleanup(self):
|
||||
del self.regions_with_buckets
|
||||
del self.buckets
|
||||
|
||||
def __list_buckets__(self, provider):
|
||||
logger.info("S3 - Listing buckets...")
|
||||
buckets = PaginatedList()
|
||||
buckets = []
|
||||
try:
|
||||
list_buckets = self.client.list_buckets()
|
||||
for bucket in list_buckets["Buckets"]:
|
||||
|
||||
@@ -10,114 +10,15 @@ from prowler.lib.logger import logger
|
||||
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.lib.service.service import AWSService
|
||||
|
||||
import pickle
|
||||
import os
|
||||
import atexit
|
||||
from collections import deque
|
||||
from sys import getsizeof
|
||||
import tempfile
|
||||
from memory_profiler import profile
|
||||
|
||||
class PaginatedDict:
|
||||
instance_counter = 0
|
||||
|
||||
def __init__(self, page_size=100):
|
||||
self.page_size = page_size
|
||||
self.file_paths = []
|
||||
self.cache = {}
|
||||
self.key_to_page = {}
|
||||
self.length = 0 # Track the number of items
|
||||
self.instance_id = PaginatedDict.instance_counter
|
||||
PaginatedDict.instance_counter += 1
|
||||
self.temp_dir = tempfile.mkdtemp(prefix=f'paginated_dict_{self.instance_id}_', dir='/Users/snaow/repos/prowler')
|
||||
print(f"Temporary directory for instance {self.instance_id}: {self.temp_dir}")
|
||||
atexit.register(self.cleanup)
|
||||
|
||||
def _save_page(self, page_data, page_num):
|
||||
file_path = os.path.join(self.temp_dir, f'page_{page_num}.pkl')
|
||||
with open(file_path, 'wb') as f:
|
||||
pickle.dump(page_data, f)
|
||||
if page_num >= len(self.file_paths):
|
||||
self.file_paths.append(file_path)
|
||||
else:
|
||||
self.file_paths[page_num] = file_path
|
||||
|
||||
def _load_page(self, page_num):
|
||||
if page_num in self.cache:
|
||||
return self.cache[page_num]
|
||||
with open(self.file_paths[page_num], 'rb') as f:
|
||||
page_data = pickle.load(f)
|
||||
self.cache[page_num] = page_data
|
||||
return page_data
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if key in self.key_to_page:
|
||||
page_num = self.key_to_page[key]
|
||||
page_data = self._load_page(page_num)
|
||||
page_data[key] = value
|
||||
else:
|
||||
page_num = self.length // self.page_size
|
||||
if page_num >= len(self.file_paths):
|
||||
self._save_page({}, page_num)
|
||||
page_data = self._load_page(page_num)
|
||||
page_data[key] = value
|
||||
self.key_to_page[key] = page_num
|
||||
self.length += 1
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key not in self.key_to_page:
|
||||
raise KeyError(f"Key {key} not found")
|
||||
page_num = self.key_to_page[key]
|
||||
page_data = self._load_page(page_num)
|
||||
return page_data[key]
|
||||
|
||||
def __delitem__(self, key):
|
||||
if key not in self.key_to_page:
|
||||
raise KeyError(f"Key {key} not found")
|
||||
page_num = self.key_to_page[key]
|
||||
page_data = self._load_page(page_num)
|
||||
del page_data[key]
|
||||
del self.key_to_page[key]
|
||||
self.cache[page_num] = page_data
|
||||
self._save_page(page_data, page_num)
|
||||
self.length -= 1
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def __iter__(self):
|
||||
for page_num in range(len(self.file_paths)):
|
||||
page_data = self._load_page(page_num)
|
||||
for key in page_data:
|
||||
yield key
|
||||
|
||||
def cleanup(self):
|
||||
for file_path in self.file_paths:
|
||||
if os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
if os.path.exists(self.temp_dir):
|
||||
os.rmdir(self.temp_dir)
|
||||
|
||||
def __del__(self):
|
||||
self.cleanup()
|
||||
|
||||
################## SSM
|
||||
class SSM(AWSService):
|
||||
def __init__(self, provider):
|
||||
# Call AWSService's __init__
|
||||
super().__init__(__class__.__name__, provider)
|
||||
paginated = 0
|
||||
if paginated == 1:
|
||||
self.documents = PaginatedDict()
|
||||
self.compliance_resources = PaginatedDict()
|
||||
self.managed_instances = PaginatedDict()
|
||||
else:
|
||||
self.documents = {}
|
||||
self.compliance_resources = {}
|
||||
self.managed_instances = {}
|
||||
|
||||
self.documents = {}
|
||||
self.compliance_resources = {}
|
||||
self.managed_instances = {}
|
||||
self.__threading_call__(self.__list_documents__)
|
||||
self.__threading_call__(self.__get_document__)
|
||||
self.__threading_call__(self.__describe_document_permission__)
|
||||
|
||||
@@ -23,7 +23,7 @@ packages = [
|
||||
{include = "dashboard"}
|
||||
]
|
||||
readme = "README.md"
|
||||
version = "4.3.0"
|
||||
version = "4.3.1"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
alive-progress = "3.1.5"
|
||||
@@ -46,14 +46,14 @@ azure-mgmt-storage = "21.2.1"
|
||||
azure-mgmt-subscription = "3.1.1"
|
||||
azure-mgmt-web = "7.3.0"
|
||||
azure-storage-blob = "12.21.0"
|
||||
boto3 = "1.34.149"
|
||||
botocore = "1.34.150"
|
||||
boto3 = "1.34.151"
|
||||
botocore = "1.34.151"
|
||||
colorama = "0.4.6"
|
||||
cryptography = "43.0.0"
|
||||
dash = "2.17.1"
|
||||
dash-bootstrap-components = "1.6.0"
|
||||
detect-secrets = "1.5.0"
|
||||
google-api-python-client = "2.138.0"
|
||||
google-api-python-client = "2.139.0"
|
||||
google-auth-httplib2 = ">=0.1,<0.3"
|
||||
jsonschema = "4.23.0"
|
||||
kubernetes = "30.1.0"
|
||||
|
||||
@@ -6,6 +6,7 @@ from argparse import Namespace
|
||||
from importlib.machinery import FileFinder
|
||||
from logging import DEBUG, ERROR
|
||||
from pkgutil import ModuleInfo
|
||||
from unittest import mock
|
||||
|
||||
from boto3 import client
|
||||
from colorama import Fore, Style
|
||||
@@ -15,6 +16,7 @@ from moto import mock_aws
|
||||
from prowler.lib.check.check import (
|
||||
exclude_checks_to_run,
|
||||
exclude_services_to_run,
|
||||
execute,
|
||||
list_categories,
|
||||
list_checks_json,
|
||||
list_modules,
|
||||
@@ -29,8 +31,16 @@ from prowler.lib.check.check import (
|
||||
)
|
||||
from prowler.lib.check.models import load_check_metadata
|
||||
from prowler.providers.aws.aws_provider import AwsProvider
|
||||
from prowler.providers.aws.services.accessanalyzer.accessanalyzer_service import (
|
||||
Analyzer,
|
||||
)
|
||||
from tests.lib.check.fixtures.bulk_checks_metadata import test_bulk_checks_metadata
|
||||
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
|
||||
from tests.providers.aws.utils import (
|
||||
AWS_ACCOUNT_ARN,
|
||||
AWS_ACCOUNT_NUMBER,
|
||||
AWS_REGION_US_EAST_1,
|
||||
set_mocked_aws_provider,
|
||||
)
|
||||
|
||||
# AWS_ACCOUNT_NUMBER = "123456789012"
|
||||
# AWS_REGION = "us-east-1"
|
||||
@@ -792,6 +802,65 @@ class TestCheck:
|
||||
== '{\n "aws": [\n "awslambda_function_invoke_api_operations_cloudtrail_logging_enabled",\n "awslambda_function_no_secrets_in_code",\n "awslambda_function_no_secrets_in_variables",\n "awslambda_function_not_publicly_accessible",\n "awslambda_function_url_cors_policy",\n "awslambda_function_url_public",\n "awslambda_function_using_supported_runtimes"\n ]\n}'
|
||||
)
|
||||
|
||||
def test_execute(self):
|
||||
accessanalyzer_client = mock.MagicMock
|
||||
accessanalyzer_client.region = AWS_REGION_US_EAST_1
|
||||
accessanalyzer_client.analyzers = [
|
||||
Analyzer(
|
||||
arn=AWS_ACCOUNT_ARN,
|
||||
name=AWS_ACCOUNT_NUMBER,
|
||||
status="NOT_AVAILABLE",
|
||||
tags=[],
|
||||
type="",
|
||||
region=AWS_REGION_US_EAST_1,
|
||||
)
|
||||
]
|
||||
with mock.patch(
|
||||
"prowler.providers.aws.services.accessanalyzer.accessanalyzer_service.AccessAnalyzer",
|
||||
accessanalyzer_client,
|
||||
):
|
||||
findings = execute(
|
||||
service="accessanalyzer",
|
||||
check_name="accessanalyzer_enabled",
|
||||
global_provider=set_mocked_aws_provider(
|
||||
expected_checks=["accessanalyzer_enabled"]
|
||||
),
|
||||
services_executed={"accessanalyzer"},
|
||||
checks_executed={"accessanalyzer_enabled"},
|
||||
custom_checks_metadata=None,
|
||||
)
|
||||
assert len(findings) == 1
|
||||
|
||||
def test_execute_with_filtering_status(self):
|
||||
accessanalyzer_client = mock.MagicMock
|
||||
accessanalyzer_client.region = AWS_REGION_US_EAST_1
|
||||
accessanalyzer_client.analyzers = [
|
||||
Analyzer(
|
||||
arn=AWS_ACCOUNT_ARN,
|
||||
name=AWS_ACCOUNT_NUMBER,
|
||||
status="NOT_AVAILABLE",
|
||||
tags=[],
|
||||
type="",
|
||||
region=AWS_REGION_US_EAST_1,
|
||||
)
|
||||
]
|
||||
status = ["PASS"]
|
||||
with mock.patch(
|
||||
"prowler.providers.aws.services.accessanalyzer.accessanalyzer_service.AccessAnalyzer",
|
||||
accessanalyzer_client,
|
||||
):
|
||||
findings = execute(
|
||||
service="accessanalyzer",
|
||||
check_name="accessanalyzer_enabled",
|
||||
global_provider=set_mocked_aws_provider(
|
||||
status=status, expected_checks=["accessanalyzer_enabled"]
|
||||
),
|
||||
services_executed={"accessanalyzer"},
|
||||
checks_executed={"accessanalyzer_enabled"},
|
||||
custom_checks_metadata=None,
|
||||
)
|
||||
assert len(findings) == 0
|
||||
|
||||
def test_run_check(self, caplog):
|
||||
caplog.set_level(DEBUG)
|
||||
|
||||
|
||||
@@ -240,7 +240,7 @@ class TestSecurityHub:
|
||||
findings=asff.data,
|
||||
)
|
||||
|
||||
assert security_hub._findings_per_region is None
|
||||
assert security_hub._findings_per_region == {}
|
||||
|
||||
@patch("botocore.client.BaseClient._make_api_call", new=mock_make_api_call)
|
||||
def test_filter_security_hub_findings_per_region_disabled_region(self):
|
||||
@@ -259,24 +259,6 @@ class TestSecurityHub:
|
||||
|
||||
assert security_hub._findings_per_region == {AWS_REGION_EU_WEST_1: []}
|
||||
|
||||
@patch("botocore.client.BaseClient._make_api_call", new=mock_make_api_call)
|
||||
def test_filter_security_hub_findings_per_region_PASS_and_FAIL_statuses(self):
|
||||
findings = [generate_finding_output(status="PASS", region=AWS_REGION_EU_WEST_1)]
|
||||
asff = ASFF(findings=findings)
|
||||
|
||||
security_hub = SecurityHub(
|
||||
aws_session=session.Session(
|
||||
region_name=AWS_REGION_EU_WEST_1,
|
||||
),
|
||||
aws_account_id=AWS_ACCOUNT_NUMBER,
|
||||
aws_partition=AWS_COMMERCIAL_PARTITION,
|
||||
aws_security_hub_available_regions=[AWS_REGION_EU_WEST_1],
|
||||
findings=asff.data,
|
||||
status=["FAIL"],
|
||||
)
|
||||
|
||||
assert security_hub._findings_per_region == {AWS_REGION_EU_WEST_1: []}
|
||||
|
||||
@patch("botocore.client.BaseClient._make_api_call", new=mock_make_api_call)
|
||||
def test_filter_security_hub_findings_per_region_FAIL_and_FAIL_statuses(self):
|
||||
findings = [generate_finding_output(status="FAIL", region=AWS_REGION_EU_WEST_1)]
|
||||
@@ -290,7 +272,6 @@ class TestSecurityHub:
|
||||
aws_partition=AWS_COMMERCIAL_PARTITION,
|
||||
aws_security_hub_available_regions=[AWS_REGION_EU_WEST_1],
|
||||
findings=asff.data,
|
||||
status=["FAIL"],
|
||||
)
|
||||
|
||||
assert security_hub._findings_per_region == {
|
||||
@@ -310,7 +291,6 @@ class TestSecurityHub:
|
||||
aws_partition=AWS_COMMERCIAL_PARTITION,
|
||||
aws_security_hub_available_regions=[AWS_REGION_EU_WEST_1],
|
||||
findings=asff.data,
|
||||
status=[],
|
||||
send_only_fails=True,
|
||||
)
|
||||
|
||||
@@ -329,7 +309,6 @@ class TestSecurityHub:
|
||||
aws_partition=AWS_COMMERCIAL_PARTITION,
|
||||
aws_security_hub_available_regions=[AWS_REGION_EU_WEST_1],
|
||||
findings=asff.data,
|
||||
status=[],
|
||||
send_only_fails=True,
|
||||
)
|
||||
|
||||
@@ -350,11 +329,10 @@ class TestSecurityHub:
|
||||
aws_partition=AWS_COMMERCIAL_PARTITION,
|
||||
aws_security_hub_available_regions=[],
|
||||
findings=asff.data,
|
||||
status=[],
|
||||
send_only_fails=True,
|
||||
)
|
||||
|
||||
assert security_hub._findings_per_region is None
|
||||
assert security_hub._findings_per_region == {}
|
||||
|
||||
@patch("botocore.client.BaseClient._make_api_call", new=mock_make_api_call)
|
||||
def test_filter_security_hub_findings_per_region_muted_fail_with_send_sh_only_fails(
|
||||
@@ -375,7 +353,6 @@ class TestSecurityHub:
|
||||
aws_partition=AWS_COMMERCIAL_PARTITION,
|
||||
aws_security_hub_available_regions=[AWS_REGION_EU_WEST_1],
|
||||
findings=asff.data,
|
||||
status=[],
|
||||
send_only_fails=True,
|
||||
)
|
||||
|
||||
@@ -400,7 +377,6 @@ class TestSecurityHub:
|
||||
aws_partition=AWS_COMMERCIAL_PARTITION,
|
||||
aws_security_hub_available_regions=[AWS_REGION_EU_WEST_1],
|
||||
findings=asff.data,
|
||||
status=["FAIL"],
|
||||
send_only_fails=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -108,6 +108,44 @@ class Test_documentdb_cluster_backup_enabled:
|
||||
assert result[0].resource_id == DOC_DB_CLUSTER_NAME
|
||||
assert result[0].resource_arn == DOC_DB_CLUSTER_ARN
|
||||
|
||||
def test_documentdb_cluster_with_backup_equal_to_recommended(self):
|
||||
documentdb_client = mock.MagicMock
|
||||
documentdb_client.db_clusters = {
|
||||
DOC_DB_CLUSTER_ARN: DBCluster(
|
||||
id=DOC_DB_CLUSTER_NAME,
|
||||
arn=DOC_DB_CLUSTER_ARN,
|
||||
engine="docdb",
|
||||
status="available",
|
||||
backup_retention_period=7,
|
||||
encrypted=True,
|
||||
cloudwatch_logs=[],
|
||||
multi_az=True,
|
||||
parameter_group="default.docdb3.6",
|
||||
deletion_protection=True,
|
||||
region=AWS_REGION,
|
||||
tags=[],
|
||||
)
|
||||
}
|
||||
documentdb_client.audit_config = {"minimum_backup_retention_period": 7}
|
||||
with mock.patch(
|
||||
"prowler.providers.aws.services.documentdb.documentdb_service.DocumentDB",
|
||||
new=documentdb_client,
|
||||
):
|
||||
from prowler.providers.aws.services.documentdb.documentdb_cluster_backup_enabled.documentdb_cluster_backup_enabled import (
|
||||
documentdb_cluster_backup_enabled,
|
||||
)
|
||||
|
||||
check = documentdb_cluster_backup_enabled()
|
||||
result = check.execute()
|
||||
assert result[0].status == "PASS"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"DocumentDB Cluster {DOC_DB_CLUSTER_NAME} has backup enabled with retention period 7 days."
|
||||
)
|
||||
assert result[0].region == AWS_REGION
|
||||
assert result[0].resource_id == DOC_DB_CLUSTER_NAME
|
||||
assert result[0].resource_arn == DOC_DB_CLUSTER_ARN
|
||||
|
||||
def test_documentdb_cluster_with_backup(self):
|
||||
documentdb_client = mock.MagicMock
|
||||
documentdb_client.db_clusters = {
|
||||
|
||||
@@ -18,6 +18,11 @@ repository_arn = (
|
||||
f"arn:aws:ecr:eu-west-1:{AWS_ACCOUNT_NUMBER}:repository/{repository_name}"
|
||||
)
|
||||
latest_tag = "test-tag"
|
||||
latest_digest = "test-digest"
|
||||
docker_container_image_artifact_media_type = (
|
||||
"application/vnd.docker.container.image.v1+json"
|
||||
)
|
||||
oci_media_type = "application/vnd.oci.artifact.v1+json"
|
||||
repo_policy_public = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
@@ -32,8 +37,6 @@ repo_policy_public = {
|
||||
|
||||
|
||||
class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
# Mocked Audit Info
|
||||
|
||||
def test_no_registries(self):
|
||||
ecr_client = mock.MagicMock
|
||||
ecr_client.registries = {}
|
||||
@@ -118,7 +121,7 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
result = check.execute()
|
||||
assert len(result) == 0
|
||||
|
||||
def test_image_scaned_without_findings(self):
|
||||
def test_docker_image_scaned_without_findings(self):
|
||||
ecr_client = mock.MagicMock
|
||||
ecr_client.registries = {}
|
||||
ecr_client.registries[AWS_REGION_EU_WEST_1] = Registry(
|
||||
@@ -135,12 +138,14 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
images_details=[
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
latest_digest="test-digest",
|
||||
latest_digest=latest_digest,
|
||||
image_pushed_at=datetime(2023, 1, 1),
|
||||
scan_findings_status="COMPLETE",
|
||||
scan_findings_severity_count=FindingSeverityCounts(
|
||||
critical=0, high=0, medium=0
|
||||
),
|
||||
artifact_media_type=docker_container_image_artifact_media_type,
|
||||
type="Docker",
|
||||
),
|
||||
],
|
||||
lifecycle_policy=None,
|
||||
@@ -167,10 +172,70 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
assert result[0].status == "PASS"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"ECR repository {repository_name} has imageTag {latest_tag} scanned without findings."
|
||||
== f"ECR repository '{repository_name}' has scanned the Docker container image with digest '{latest_digest}' and tag '{latest_tag}' without findings."
|
||||
)
|
||||
assert result[0].resource_id == repository_name
|
||||
assert result[0].resource_arn == repository_arn
|
||||
assert result[0].region == AWS_REGION_EU_WEST_1
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
def test_oci_image_scaned_without_findings(self):
|
||||
ecr_client = mock.MagicMock
|
||||
ecr_client.registries = {}
|
||||
ecr_client.registries[AWS_REGION_EU_WEST_1] = Registry(
|
||||
id=AWS_ACCOUNT_NUMBER,
|
||||
region=AWS_REGION_EU_WEST_1,
|
||||
scan_type="BASIC",
|
||||
repositories=[
|
||||
Repository(
|
||||
name=repository_name,
|
||||
arn=repository_arn,
|
||||
region=AWS_REGION_EU_WEST_1,
|
||||
scan_on_push=True,
|
||||
policy=repo_policy_public,
|
||||
images_details=[
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
latest_digest=latest_digest,
|
||||
image_pushed_at=datetime(2023, 1, 1),
|
||||
scan_findings_status="COMPLETE",
|
||||
scan_findings_severity_count=FindingSeverityCounts(
|
||||
critical=0, high=0, medium=0
|
||||
),
|
||||
artifact_media_type=docker_container_image_artifact_media_type,
|
||||
type="OCI",
|
||||
),
|
||||
],
|
||||
lifecycle_policy=None,
|
||||
)
|
||||
],
|
||||
rules=[],
|
||||
)
|
||||
ecr_client.audit_config = {}
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=set_mocked_aws_provider(),
|
||||
), mock.patch(
|
||||
"prowler.providers.aws.services.ecr.ecr_repositories_scan_vulnerabilities_in_latest_image.ecr_repositories_scan_vulnerabilities_in_latest_image.ecr_client",
|
||||
ecr_client,
|
||||
):
|
||||
from prowler.providers.aws.services.ecr.ecr_repositories_scan_vulnerabilities_in_latest_image.ecr_repositories_scan_vulnerabilities_in_latest_image import (
|
||||
ecr_repositories_scan_vulnerabilities_in_latest_image,
|
||||
)
|
||||
|
||||
check = ecr_repositories_scan_vulnerabilities_in_latest_image()
|
||||
result = check.execute()
|
||||
assert len(result) == 1
|
||||
assert result[0].status == "PASS"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"ECR repository '{repository_name}' has scanned the OCI container image with digest '{latest_digest}' and tag '{latest_tag}' without findings."
|
||||
)
|
||||
assert result[0].resource_id == repository_name
|
||||
assert result[0].resource_arn == repository_arn
|
||||
assert result[0].region == AWS_REGION_EU_WEST_1
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
def test_image_scanned_with_findings_default_severity_MEDIUM(self):
|
||||
ecr_client = mock.MagicMock
|
||||
@@ -189,12 +254,14 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
images_details=[
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
latest_digest="test-digest",
|
||||
latest_digest=latest_digest,
|
||||
image_pushed_at=datetime(2023, 1, 1),
|
||||
scan_findings_status="COMPLETE",
|
||||
scan_findings_severity_count=FindingSeverityCounts(
|
||||
critical=12, high=34, medium=7
|
||||
),
|
||||
artifact_media_type=docker_container_image_artifact_media_type,
|
||||
type="Docker",
|
||||
)
|
||||
],
|
||||
lifecycle_policy=None,
|
||||
@@ -225,10 +292,12 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
assert result[0].status == "FAIL"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"ECR repository {repository_name} has imageTag {latest_tag} scanned with findings: CRITICAL->{12}, HIGH->{34}, MEDIUM->{7}."
|
||||
== f"ECR repository '{repository_name}' has scanned the Docker container image with digest '{latest_digest}' and tag '{latest_tag}' with findings: CRITICAL->{12}, HIGH->{34}, MEDIUM->{7}."
|
||||
)
|
||||
assert result[0].resource_id == repository_name
|
||||
assert result[0].resource_arn == repository_arn
|
||||
assert result[0].region == AWS_REGION_EU_WEST_1
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
def test_image_scanned_with_findings_default_severity_HIGH(self):
|
||||
ecr_client = mock.MagicMock
|
||||
@@ -247,12 +316,14 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
images_details=[
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
latest_digest="test-digest",
|
||||
latest_digest=latest_digest,
|
||||
image_pushed_at=datetime(2023, 1, 1),
|
||||
scan_findings_status="COMPLETE",
|
||||
scan_findings_severity_count=FindingSeverityCounts(
|
||||
critical=12, high=34, medium=7
|
||||
),
|
||||
artifact_media_type=docker_container_image_artifact_media_type,
|
||||
type="Docker",
|
||||
)
|
||||
],
|
||||
lifecycle_policy=None,
|
||||
@@ -283,10 +354,12 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
assert result[0].status == "FAIL"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"ECR repository {repository_name} has imageTag {latest_tag} scanned with findings: CRITICAL->{12}, HIGH->{34}."
|
||||
== f"ECR repository '{repository_name}' has scanned the Docker container image with digest '{latest_digest}' and tag '{latest_tag}' with findings: CRITICAL->{12}, HIGH->{34}."
|
||||
)
|
||||
assert result[0].resource_id == repository_name
|
||||
assert result[0].resource_arn == repository_arn
|
||||
assert result[0].region == AWS_REGION_EU_WEST_1
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
def test_image_scanned_with_findings_default_severity_CRITICAL(self):
|
||||
ecr_client = mock.MagicMock
|
||||
@@ -305,12 +378,14 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
images_details=[
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
latest_digest="test-digest",
|
||||
latest_digest=latest_digest,
|
||||
image_pushed_at=datetime(2023, 1, 1),
|
||||
scan_findings_status="COMPLETE",
|
||||
scan_findings_severity_count=FindingSeverityCounts(
|
||||
critical=12, high=34, medium=7
|
||||
),
|
||||
artifact_media_type=docker_container_image_artifact_media_type,
|
||||
type="Docker",
|
||||
)
|
||||
],
|
||||
lifecycle_policy=None,
|
||||
@@ -341,10 +416,12 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
assert result[0].status == "FAIL"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"ECR repository {repository_name} has imageTag {latest_tag} scanned with findings: CRITICAL->{12}."
|
||||
== f"ECR repository '{repository_name}' has scanned the Docker container image with digest '{latest_digest}' and tag '{latest_tag}' with findings: CRITICAL->{12}."
|
||||
)
|
||||
assert result[0].resource_id == repository_name
|
||||
assert result[0].resource_arn == repository_arn
|
||||
assert result[0].region == AWS_REGION_EU_WEST_1
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
def test_image_scanned_without_CRITICAL_findings_default_severity_CRITICAL(self):
|
||||
ecr_client = mock.MagicMock
|
||||
@@ -363,12 +440,14 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
images_details=[
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
latest_digest="test-digest",
|
||||
latest_digest=latest_digest,
|
||||
image_pushed_at=datetime(2023, 1, 1),
|
||||
scan_findings_status="COMPLETE",
|
||||
scan_findings_severity_count=FindingSeverityCounts(
|
||||
critical=0, high=34, medium=7
|
||||
),
|
||||
artifact_media_type=docker_container_image_artifact_media_type,
|
||||
type="Docker",
|
||||
)
|
||||
],
|
||||
lifecycle_policy=None,
|
||||
@@ -399,7 +478,7 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
assert result[0].status == "PASS"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"ECR repository {repository_name} has imageTag {latest_tag} scanned without findings."
|
||||
== f"ECR repository '{repository_name}' has scanned the Docker container image with digest '{latest_digest}' and tag '{latest_tag}' without findings."
|
||||
)
|
||||
assert result[0].resource_id == repository_name
|
||||
assert result[0].resource_arn == repository_arn
|
||||
@@ -423,12 +502,14 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
images_details=[
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
latest_digest="test-digest",
|
||||
latest_digest=latest_digest,
|
||||
image_pushed_at=datetime(2023, 1, 1),
|
||||
scan_findings_status="COMPLETE",
|
||||
scan_findings_severity_count=FindingSeverityCounts(
|
||||
critical=0, high=0, medium=7
|
||||
),
|
||||
artifact_media_type=docker_container_image_artifact_media_type,
|
||||
type="Docker",
|
||||
)
|
||||
],
|
||||
lifecycle_policy=None,
|
||||
@@ -459,10 +540,12 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
assert result[0].status == "PASS"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"ECR repository {repository_name} has imageTag {latest_tag} scanned without findings."
|
||||
== f"ECR repository '{repository_name}' has scanned the Docker container image with digest '{latest_digest}' and tag '{latest_tag}' without findings."
|
||||
)
|
||||
assert result[0].resource_id == repository_name
|
||||
assert result[0].resource_arn == repository_arn
|
||||
assert result[0].region == AWS_REGION_EU_WEST_1
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
def test_image_scanned_fail_scan(self):
|
||||
ecr_client = mock.MagicMock
|
||||
@@ -481,12 +564,14 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
images_details=[
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
latest_digest="test-digest",
|
||||
latest_digest=latest_digest,
|
||||
image_pushed_at=datetime(2023, 1, 1),
|
||||
scan_findings_status="FAILED",
|
||||
scan_findings_severity_count=FindingSeverityCounts(
|
||||
critical=0, high=0, medium=0
|
||||
),
|
||||
artifact_media_type=docker_container_image_artifact_media_type,
|
||||
type="Docker",
|
||||
)
|
||||
],
|
||||
lifecycle_policy=None,
|
||||
@@ -513,10 +598,12 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
assert result[0].status == "FAIL"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"ECR repository {repository_name} with scan status FAILED."
|
||||
== f"ECR repository '{repository_name}' has scanned the Docker container image with digest '{latest_digest}' and tag '{latest_tag}' with scan status FAILED."
|
||||
)
|
||||
assert result[0].resource_id == repository_name
|
||||
assert result[0].resource_arn == repository_arn
|
||||
assert result[0].region == AWS_REGION_EU_WEST_1
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
def test_image_not_scanned(self):
|
||||
ecr_client = mock.MagicMock
|
||||
@@ -535,12 +622,14 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
images_details=[
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
latest_digest="test-digest",
|
||||
latest_digest=latest_digest,
|
||||
image_pushed_at=datetime(2023, 1, 1),
|
||||
scan_findings_status="",
|
||||
scan_findings_severity_count=FindingSeverityCounts(
|
||||
critical=0, high=0, medium=0
|
||||
),
|
||||
artifact_media_type=docker_container_image_artifact_media_type,
|
||||
type="Docker",
|
||||
)
|
||||
],
|
||||
lifecycle_policy=None,
|
||||
@@ -567,7 +656,9 @@ class Test_ecr_repositories_scan_vulnerabilities_in_latest_image:
|
||||
assert result[0].status == "FAIL"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"ECR repository {repository_name} has imageTag {latest_tag} without a scan."
|
||||
== f"ECR repository '{repository_name}' has scanned the Docker container image with digest '{latest_digest}' and tag '{latest_tag}' without a scan."
|
||||
)
|
||||
assert result[0].resource_id == repository_name
|
||||
assert result[0].resource_arn == repository_arn
|
||||
assert result[0].region == AWS_REGION_EU_WEST_1
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
@@ -23,6 +23,7 @@ def mock_make_api_call(self, operation_name, kwarg):
|
||||
if operation_name == "DescribeImages":
|
||||
return {
|
||||
"imageDetails": [
|
||||
# Scannable image #1
|
||||
{
|
||||
"imageDigest": "sha256:d8868e50ac4c7104d2200d42f432b661b2da8c1e417ccfae217e6a1e04bb9295",
|
||||
"imageTags": [
|
||||
@@ -35,7 +36,9 @@ def mock_make_api_call(self, operation_name, kwarg):
|
||||
"imageScanFindingsSummary": {
|
||||
"findingSeverityCounts": {"CRITICAL": 1, "HIGH": 2, "MEDIUM": 3}
|
||||
},
|
||||
"artifactMediaType": "application/vnd.docker.container.image.v1+json",
|
||||
},
|
||||
# Scannable image #2
|
||||
{
|
||||
"imageDigest": "sha256:83251ac64627fc331584f6c498b3aba5badc01574e2c70b2499af3af16630eed",
|
||||
"imageTags": [
|
||||
@@ -48,6 +51,64 @@ def mock_make_api_call(self, operation_name, kwarg):
|
||||
"imageScanFindingsSummary": {
|
||||
"findingSeverityCounts": {"CRITICAL": 1, "HIGH": 2, "MEDIUM": 3}
|
||||
},
|
||||
"artifactMediaType": "application/vnd.docker.container.image.v1+json",
|
||||
},
|
||||
# Not scannable image
|
||||
{
|
||||
"imageDigest": "sha256:83251ac64627fc331584f6c498b3aba5badc01574e2c70b2499af3af16630eed",
|
||||
"imageTags": [
|
||||
"sha256-abcdefg123456.sig",
|
||||
],
|
||||
"imagePushedAt": datetime(2023, 1, 2),
|
||||
"artifactMediaType": "application/vnd.docker.container.image.v1+json",
|
||||
},
|
||||
# Scannable image #3
|
||||
{
|
||||
"imageDigest": "sha256:33251ac64627fc331584f6c498b3aba5badc01574e2c70b2499af3af16630eed",
|
||||
"imageTags": [
|
||||
"test-tag3",
|
||||
],
|
||||
"imagePushedAt": datetime(2023, 1, 2),
|
||||
"imageScanFindings": {
|
||||
"findingSeverityCounts": {"CRITICAL": 1, "HIGH": 2, "MEDIUM": 3}
|
||||
},
|
||||
"artifactMediaType": "application/vnd.docker.container.image.v1+json",
|
||||
},
|
||||
# Not scannable image
|
||||
{
|
||||
"imageDigest": "sha256:83251ac64627fc331584f6c498b3aba5badc01574e2c70b2499af3af16630eed",
|
||||
"imageTags": [
|
||||
"sha256-83251ac64627fc331584f6c498b3aba5badc01574e2c70b2499af3af16630eed.sig",
|
||||
],
|
||||
"imagePushedAt": datetime(2023, 1, 2),
|
||||
"imageScanStatus": {
|
||||
"status": "FAILED",
|
||||
},
|
||||
"artifactMediaType": "application/vnd.oci.image.config.v1+json",
|
||||
},
|
||||
# Not scannable image
|
||||
{
|
||||
"imageDigest": "sha256:83251ac64627fc331584f6c498b3aba5badc01574e2c70b2499af3af16630eed",
|
||||
"imageTags": [
|
||||
"test-tag2",
|
||||
],
|
||||
"imagePushedAt": datetime(2023, 1, 2),
|
||||
"imageScanStatus": {
|
||||
"status": "FAILED",
|
||||
},
|
||||
"artifactMediaType": "application/vnd.cncf.notary.v2.signature",
|
||||
},
|
||||
# Scannable image #4
|
||||
{
|
||||
"imageDigest": "sha256:43251ac64627fc331584f6c498b3aba5badc01574e2c70b2499af3af16630eed",
|
||||
"imageTags": [
|
||||
"test-tag4",
|
||||
],
|
||||
"imagePushedAt": datetime(2023, 1, 2),
|
||||
"imageScanStatus": {
|
||||
"status": "FAILED",
|
||||
},
|
||||
"artifactMediaType": "application/vnd.docker.container.image.v1+json",
|
||||
},
|
||||
],
|
||||
}
|
||||
@@ -79,6 +140,16 @@ def mock_make_api_call(self, operation_name, kwarg):
|
||||
},
|
||||
}
|
||||
|
||||
if operation_name == "DescribeImageScanFindings":
|
||||
return {
|
||||
"imageScanStatus": {
|
||||
"status": "COMPLETE",
|
||||
},
|
||||
"imageScanFindings": {
|
||||
"findingSeverityCounts": {"CRITICAL": 3, "HIGH": 4, "MEDIUM": 5}
|
||||
},
|
||||
}
|
||||
|
||||
return make_api_call(self, operation_name, kwarg)
|
||||
|
||||
|
||||
@@ -111,14 +182,14 @@ class Test_ECR_Service:
|
||||
assert regional_client.__class__.__name__ == "ECR"
|
||||
|
||||
# Test ECR session
|
||||
def test__get_session__(self):
|
||||
def test_get_session(self):
|
||||
aws_provider = set_mocked_aws_provider()
|
||||
ecr = ECR(aws_provider)
|
||||
assert ecr.session.__class__.__name__ == "Session"
|
||||
|
||||
# Test describe ECR repositories
|
||||
@mock_aws
|
||||
def test__describe_registries_and_repositories__(self):
|
||||
def test_describe_registries_and_repositories(self):
|
||||
ecr_client = client("ecr", region_name=AWS_REGION_EU_WEST_1)
|
||||
ecr_client.create_repository(
|
||||
repositoryName=repo_name,
|
||||
@@ -144,7 +215,7 @@ class Test_ECR_Service:
|
||||
|
||||
# Test describe ECR repository policies
|
||||
@mock_aws
|
||||
def test__describe_repository_policies__(self):
|
||||
def test_describe_repository_policies(self):
|
||||
ecr_client = client("ecr", region_name=AWS_REGION_EU_WEST_1)
|
||||
ecr_client.create_repository(
|
||||
repositoryName=repo_name,
|
||||
@@ -154,43 +225,25 @@ class Test_ECR_Service:
|
||||
ecr = ECR(aws_provider)
|
||||
assert len(ecr.registries) == 1
|
||||
assert len(ecr.registries[AWS_REGION_EU_WEST_1].repositories) == 1
|
||||
assert ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].name == repo_name
|
||||
assert ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].arn == repo_arn
|
||||
assert ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].scan_on_push
|
||||
|
||||
repository = ecr.registries[AWS_REGION_EU_WEST_1].repositories[0]
|
||||
assert repository.name == repo_name
|
||||
assert repository.arn == repo_arn
|
||||
assert repository.scan_on_push
|
||||
assert repository.policy["Statement"][0]["Sid"] == "Allow Describe Images"
|
||||
assert repository.policy["Statement"][0]["Effect"] == "Allow"
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.policy["Statement"][0]["Sid"]
|
||||
== "Allow Describe Images"
|
||||
)
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.policy["Statement"][0]["Effect"]
|
||||
== "Allow"
|
||||
)
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.policy["Statement"][0]["Principal"]["AWS"][0]
|
||||
repository.policy["Statement"][0]["Principal"]["AWS"][0]
|
||||
== f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root"
|
||||
)
|
||||
assert repository.policy["Statement"][0]["Action"][0] == "ecr:DescribeImages"
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.policy["Statement"][0]["Action"][0]
|
||||
== "ecr:DescribeImages"
|
||||
)
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.policy["Statement"][0]["Action"][1]
|
||||
== "ecr:DescribeRepositories"
|
||||
repository.policy["Statement"][0]["Action"][1] == "ecr:DescribeRepositories"
|
||||
)
|
||||
|
||||
# Test describe ECR repository lifecycle policies
|
||||
@mock_aws
|
||||
def test__get_lifecycle_policies__(self):
|
||||
def test_get_lifecycle_policies(self):
|
||||
ecr_client = client("ecr", region_name=AWS_REGION_EU_WEST_1)
|
||||
ecr_client.create_repository(
|
||||
repositoryName=repo_name,
|
||||
@@ -207,7 +260,7 @@ class Test_ECR_Service:
|
||||
|
||||
# Test get image details
|
||||
@mock_aws
|
||||
def test__get_image_details__(self):
|
||||
def test_get_image_details(self):
|
||||
ecr_client = client("ecr", region_name=AWS_REGION_EU_WEST_1)
|
||||
ecr_client.create_repository(
|
||||
repositoryName=repo_name,
|
||||
@@ -215,6 +268,7 @@ class Test_ECR_Service:
|
||||
)
|
||||
aws_provider = set_mocked_aws_provider()
|
||||
ecr = ECR(aws_provider)
|
||||
|
||||
assert len(ecr.registries) == 1
|
||||
assert len(ecr.registries[AWS_REGION_EU_WEST_1].repositories) == 1
|
||||
assert ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].name == repo_name
|
||||
@@ -222,19 +276,14 @@ class Test_ECR_Service:
|
||||
assert ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].scan_on_push
|
||||
assert (
|
||||
len(ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].images_details)
|
||||
== 2
|
||||
== 4
|
||||
)
|
||||
# First image pushed
|
||||
assert ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].images_details[
|
||||
0
|
||||
].image_pushed_at == datetime(2023, 1, 1)
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.images_details[0]
|
||||
.latest_tag
|
||||
== "test-tag1"
|
||||
first_image = (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].images_details[0]
|
||||
)
|
||||
assert first_image.image_pushed_at == datetime(2023, 1, 1)
|
||||
assert first_image.latest_tag == "test-tag1"
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
@@ -242,85 +291,74 @@ class Test_ECR_Service:
|
||||
.latest_digest
|
||||
== "sha256:d8868e50ac4c7104d2200d42f432b661b2da8c1e417ccfae217e6a1e04bb9295"
|
||||
)
|
||||
assert first_image.scan_findings_status == "COMPLETE"
|
||||
assert first_image.scan_findings_severity_count.critical == 1
|
||||
assert first_image.scan_findings_severity_count.high == 2
|
||||
assert first_image.scan_findings_severity_count.medium == 3
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.images_details[0]
|
||||
.scan_findings_status
|
||||
== "COMPLETE"
|
||||
)
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.images_details[0]
|
||||
.scan_findings_severity_count.critical
|
||||
== 1
|
||||
)
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.images_details[0]
|
||||
.scan_findings_severity_count.high
|
||||
== 2
|
||||
)
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.images_details[0]
|
||||
.scan_findings_severity_count.medium
|
||||
== 3
|
||||
first_image.artifact_media_type
|
||||
== "application/vnd.docker.container.image.v1+json"
|
||||
)
|
||||
|
||||
# Second image pushed
|
||||
assert ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].images_details[
|
||||
1
|
||||
].image_pushed_at == datetime(2023, 1, 2)
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.images_details[1]
|
||||
.latest_tag
|
||||
== "test-tag2"
|
||||
second_image = (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].images_details[1]
|
||||
)
|
||||
assert second_image.image_pushed_at == datetime(2023, 1, 2)
|
||||
assert second_image.latest_tag == "test-tag2"
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.images_details[1]
|
||||
.latest_digest
|
||||
second_image.latest_digest
|
||||
== "sha256:83251ac64627fc331584f6c498b3aba5badc01574e2c70b2499af3af16630eed"
|
||||
)
|
||||
assert second_image.scan_findings_status == "COMPLETE"
|
||||
assert second_image.scan_findings_severity_count.critical == 1
|
||||
assert second_image.scan_findings_severity_count.high == 2
|
||||
assert second_image.scan_findings_severity_count.medium == 3
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.images_details[1]
|
||||
.scan_findings_status
|
||||
== "COMPLETE"
|
||||
second_image.artifact_media_type
|
||||
== "application/vnd.docker.container.image.v1+json"
|
||||
)
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.images_details[1]
|
||||
.scan_findings_severity_count.critical
|
||||
== 1
|
||||
|
||||
# Third image pushed
|
||||
third_image = (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].images_details[2]
|
||||
)
|
||||
assert third_image.image_pushed_at == datetime(2023, 1, 2)
|
||||
assert third_image.latest_tag == "test-tag3"
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.images_details[1]
|
||||
.scan_findings_severity_count.high
|
||||
== 2
|
||||
third_image.latest_digest
|
||||
== "sha256:33251ac64627fc331584f6c498b3aba5badc01574e2c70b2499af3af16630eed"
|
||||
)
|
||||
assert third_image.scan_findings_status == "COMPLETE"
|
||||
assert third_image.scan_findings_severity_count.critical == 3
|
||||
assert third_image.scan_findings_severity_count.high == 4
|
||||
assert third_image.scan_findings_severity_count.medium == 5
|
||||
assert (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1]
|
||||
.repositories[0]
|
||||
.images_details[1]
|
||||
.scan_findings_severity_count.medium
|
||||
== 3
|
||||
third_image.artifact_media_type
|
||||
== "application/vnd.docker.container.image.v1+json"
|
||||
)
|
||||
|
||||
# Fourth image pushed
|
||||
fourth_image = (
|
||||
ecr.registries[AWS_REGION_EU_WEST_1].repositories[0].images_details[3]
|
||||
)
|
||||
assert fourth_image.image_pushed_at == datetime(2023, 1, 2)
|
||||
assert fourth_image.latest_tag == "test-tag4"
|
||||
assert (
|
||||
fourth_image.latest_digest
|
||||
== "sha256:43251ac64627fc331584f6c498b3aba5badc01574e2c70b2499af3af16630eed"
|
||||
)
|
||||
|
||||
assert fourth_image.scan_findings_status == "FAILED"
|
||||
assert fourth_image.scan_findings_severity_count is None
|
||||
assert (
|
||||
fourth_image.artifact_media_type
|
||||
== "application/vnd.docker.container.image.v1+json"
|
||||
)
|
||||
|
||||
# Test get ECR Registries Scanning Configuration
|
||||
@mock_aws
|
||||
def test__get_registry_scanning_configuration__(self):
|
||||
def test_get_registry_scanning_configuration(self):
|
||||
aws_provider = set_mocked_aws_provider()
|
||||
ecr = ECR(aws_provider)
|
||||
assert len(ecr.registries) == 1
|
||||
@@ -332,3 +370,41 @@ class Test_ECR_Service:
|
||||
scan_filters=[{"filter": "*", "filterType": "WILDCARD"}],
|
||||
)
|
||||
]
|
||||
|
||||
def test_is_artifact_scannable_docker(self):
|
||||
assert ECR._is_artifact_scannable(
|
||||
"application/vnd.docker.container.image.v1+json"
|
||||
)
|
||||
|
||||
def test_is_artifact_scannable_layer_tar(self):
|
||||
assert ECR._is_artifact_scannable(
|
||||
"application/vnd.docker.image.rootfs.diff.tar"
|
||||
)
|
||||
|
||||
def test_is_artifact_scannable_layer_gzip(self):
|
||||
assert ECR._is_artifact_scannable(
|
||||
"application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
)
|
||||
|
||||
def test_is_artifact_scannable_oci(self):
|
||||
assert ECR._is_artifact_scannable("application/vnd.oci.image.config.v1+json")
|
||||
|
||||
def test_is_artifact_scannable_oci_tar(self):
|
||||
assert ECR._is_artifact_scannable("application/vnd.oci.image.layer.v1.tar")
|
||||
|
||||
def test_is_artifact_scannable_oci_compressed(self):
|
||||
assert ECR._is_artifact_scannable("application/vnd.oci.image.layer.v1.tar+gzip")
|
||||
|
||||
def test_is_artifact_scannable_none(self):
|
||||
assert not ECR._is_artifact_scannable(None)
|
||||
|
||||
def test_is_artifact_scannable_empty(self):
|
||||
assert not ECR._is_artifact_scannable("")
|
||||
|
||||
def test_is_artifact_scannable_non_scannable_tags(self):
|
||||
assert not ECR._is_artifact_scannable("", ["sha256-abcdefg123456.sig"])
|
||||
|
||||
def test_is_artifact_scannable_scannable_tags(self):
|
||||
assert ECR._is_artifact_scannable(
|
||||
"application/vnd.docker.container.image.v1+json", ["abcdefg123456"]
|
||||
)
|
||||
|
||||
@@ -169,6 +169,61 @@ class Test_neptune_cluster_backup_enabled:
|
||||
)
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
@mock_aws
|
||||
def test_neptune_cluster_with_backup_equal_to_recommended(self):
|
||||
conn = client("neptune", region_name=AWS_REGION_US_EAST_1)
|
||||
conn.create_db_parameter_group(
|
||||
DBParameterGroupName="test",
|
||||
DBParameterGroupFamily="default.neptune",
|
||||
Description="test parameter group",
|
||||
)
|
||||
conn.create_db_cluster(
|
||||
DBClusterIdentifier="db-cluster-1",
|
||||
Engine="neptune",
|
||||
DatabaseName="test-1",
|
||||
DeletionProtection=True,
|
||||
DBClusterParameterGroupName="test",
|
||||
MasterUsername="test",
|
||||
MasterUserPassword="password",
|
||||
EnableIAMDatabaseAuthentication=True,
|
||||
BackupRetentionPeriod=7,
|
||||
StorageEncrypted=True,
|
||||
Tags=[],
|
||||
)
|
||||
from prowler.providers.aws.services.neptune.neptune_service import Neptune
|
||||
|
||||
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
):
|
||||
with mock.patch(
|
||||
"prowler.providers.aws.services.neptune.neptune_cluster_backup_enabled.neptune_cluster_backup_enabled.neptune_client",
|
||||
new=Neptune(aws_provider),
|
||||
):
|
||||
# Test Check
|
||||
from prowler.providers.aws.services.neptune.neptune_cluster_backup_enabled.neptune_cluster_backup_enabled import (
|
||||
neptune_cluster_backup_enabled,
|
||||
)
|
||||
|
||||
check = neptune_cluster_backup_enabled()
|
||||
result = check.execute()
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].status == "PASS"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== "Neptune Cluster db-cluster-1 has backup enabled with retention period 7 days."
|
||||
)
|
||||
assert result[0].resource_id == "db-cluster-1"
|
||||
assert result[0].region == AWS_REGION_US_EAST_1
|
||||
assert (
|
||||
result[0].resource_arn
|
||||
== f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:cluster:db-cluster-1"
|
||||
)
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
@mock_aws
|
||||
def test_neptune_cluster_with_backup(self):
|
||||
conn = client("neptune", region_name=AWS_REGION_US_EAST_1)
|
||||
|
||||
@@ -107,6 +107,7 @@ def set_mocked_aws_provider(
|
||||
original_session: session.Session = None,
|
||||
enabled_regions: set = None,
|
||||
arguments: Namespace = Namespace(),
|
||||
status: list[str] = [],
|
||||
create_default_organization: bool = True,
|
||||
) -> AwsProvider:
|
||||
if create_default_organization:
|
||||
@@ -114,12 +115,13 @@ def set_mocked_aws_provider(
|
||||
create_default_aws_organization()
|
||||
|
||||
# Default arguments
|
||||
arguments = set_default_provider_arguments(arguments)
|
||||
arguments = set_default_provider_arguments(arguments, status)
|
||||
|
||||
# AWS Provider
|
||||
provider = AwsProvider(arguments)
|
||||
|
||||
# Output options
|
||||
|
||||
provider.output_options = arguments, {}
|
||||
|
||||
# Mock Session
|
||||
@@ -156,8 +158,10 @@ def set_mocked_aws_provider(
|
||||
return provider
|
||||
|
||||
|
||||
def set_default_provider_arguments(arguments: Namespace) -> Namespace:
|
||||
arguments.status = []
|
||||
def set_default_provider_arguments(
|
||||
arguments: Namespace, status: list = []
|
||||
) -> Namespace:
|
||||
arguments.status = status
|
||||
arguments.output_formats = []
|
||||
arguments.output_directory = ""
|
||||
arguments.verbose = False
|
||||
|
||||
Reference in New Issue
Block a user