mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-01-25 02:08:11 +00:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f1f0609697 |
@@ -153,7 +153,7 @@ jobs:
|
||||
run: |
|
||||
curl https://api.github.com/repos/${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}/dispatches \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: Bearer ${{ secrets.ACCESS_TOKEN }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
--data '{"event_type":"dispatch","client_payload":{"version":"v3-latest", "tag": "${{ env.LATEST_COMMIT_HASH }}"}}'
|
||||
|
||||
@@ -162,6 +162,6 @@ jobs:
|
||||
run: |
|
||||
curl https://api.github.com/repos/${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}/dispatches \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: Bearer ${{ secrets.ACCESS_TOKEN }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
--data '{"event_type":"dispatch","client_payload":{"version":"release", "tag":"${{ needs.container-build-push.outputs.prowler_version }}"}}'
|
||||
|
||||
2
.github/workflows/find-secrets.yml
vendored
2
.github/workflows/find-secrets.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: TruffleHog OSS
|
||||
uses: trufflesecurity/trufflehog@v3.81.10
|
||||
uses: trufflesecurity/trufflehog@v3.82.6
|
||||
with:
|
||||
path: ./
|
||||
base: ${{ github.event.repository.default_branch }}
|
||||
|
||||
@@ -63,9 +63,9 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
|
||||
|
||||
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) |
|
||||
|---|---|---|---|---|
|
||||
| AWS | 415 | 67 -> `prowler aws --list-services` | 28 -> `prowler aws --list-compliance` | 9 -> `prowler aws --list-categories` |
|
||||
| GCP | 77 | 13 -> `prowler gcp --list-services` | 1 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
|
||||
| Azure | 135 | 16 -> `prowler azure --list-services` | 2 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
|
||||
| AWS | 457 | 67 -> `prowler aws --list-services` | 30 -> `prowler aws --list-compliance` | 9 -> `prowler aws --list-categories` |
|
||||
| GCP | 77 | 13 -> `prowler gcp --list-services` | 2 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
|
||||
| Azure | 136 | 17 -> `prowler azure --list-services` | 3 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
|
||||
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | 1 -> `prowler kubernetes --list-compliance` | 7 -> `prowler kubernetes --list-categories` |
|
||||
|
||||
# 💻 Installation
|
||||
|
||||
@@ -2223,3 +2223,232 @@ def get_section_containers_ens(data, section_1, section_2, section_3, section_4)
|
||||
section_containers.append(section_container)
|
||||
|
||||
return html.Div(section_containers, className="compliance-data-layout")
|
||||
|
||||
|
||||
# This function extracts and compares up to two numeric values, ensuring correct sorting for version-like strings.
|
||||
def extract_numeric_values(value):
|
||||
numbers = re.findall(r"\d+", str(value))
|
||||
if len(numbers) >= 2:
|
||||
return int(numbers[0]), int(numbers[1])
|
||||
elif len(numbers) == 1:
|
||||
return int(numbers[0]), 0
|
||||
return 0, 0
|
||||
|
||||
|
||||
def get_section_containers_kisa_ismsp(data, section_1, section_2):
|
||||
data["STATUS"] = data["STATUS"].apply(map_status_to_icon)
|
||||
data[section_1] = data[section_1].astype(str)
|
||||
data[section_2] = data[section_2].astype(str)
|
||||
data.sort_values(
|
||||
by=section_1,
|
||||
key=lambda x: x.map(extract_numeric_values),
|
||||
ascending=True,
|
||||
inplace=True,
|
||||
)
|
||||
|
||||
findings_counts_section = (
|
||||
data.groupby([section_2, "STATUS"]).size().unstack(fill_value=0)
|
||||
)
|
||||
findings_counts_name = (
|
||||
data.groupby([section_1, "STATUS"]).size().unstack(fill_value=0)
|
||||
)
|
||||
|
||||
section_containers = []
|
||||
|
||||
for name in data[section_1].unique():
|
||||
success_name = (
|
||||
findings_counts_name.loc[name, pass_emoji]
|
||||
if pass_emoji in findings_counts_name.columns
|
||||
else 0
|
||||
)
|
||||
failed_name = (
|
||||
findings_counts_name.loc[name, fail_emoji]
|
||||
if fail_emoji in findings_counts_name.columns
|
||||
else 0
|
||||
)
|
||||
|
||||
fig_name = go.Figure(
|
||||
data=[
|
||||
go.Bar(
|
||||
name="Failed",
|
||||
x=[failed_name],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#e77676"),
|
||||
width=[0.8],
|
||||
),
|
||||
go.Bar(
|
||||
name="Success",
|
||||
x=[success_name],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#45cc6e"),
|
||||
width=[0.8],
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
fig_name.update_layout(
|
||||
barmode="stack",
|
||||
margin=dict(l=10, r=10, t=10, b=10),
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
plot_bgcolor="rgba(0,0,0,0)",
|
||||
showlegend=False,
|
||||
width=350,
|
||||
height=30,
|
||||
xaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
yaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
annotations=[
|
||||
dict(
|
||||
x=success_name + failed_name,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(success_name),
|
||||
showarrow=False,
|
||||
font=dict(color="#45cc6e", size=14),
|
||||
xanchor="left",
|
||||
yanchor="middle",
|
||||
),
|
||||
dict(
|
||||
x=0,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(failed_name),
|
||||
showarrow=False,
|
||||
font=dict(color="#e77676", size=14),
|
||||
xanchor="right",
|
||||
yanchor="middle",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
graph_name = dcc.Graph(
|
||||
figure=fig_name, config={"staticPlot": True}, className="info-bar"
|
||||
)
|
||||
|
||||
graph_div = html.Div(graph_name, className="graph-section")
|
||||
|
||||
direct_internal_items = []
|
||||
|
||||
for section in data[data[section_1] == name][section_2].unique():
|
||||
specific_data = data[
|
||||
(data[section_1] == name) & (data[section_2] == section)
|
||||
]
|
||||
success_section = (
|
||||
findings_counts_section.loc[section, pass_emoji]
|
||||
if pass_emoji in findings_counts_section.columns
|
||||
else 0
|
||||
)
|
||||
failed_section = (
|
||||
findings_counts_section.loc[section, fail_emoji]
|
||||
if fail_emoji in findings_counts_section.columns
|
||||
else 0
|
||||
)
|
||||
|
||||
data_table = dash_table.DataTable(
|
||||
data=specific_data.to_dict("records"),
|
||||
columns=[
|
||||
{"name": i, "id": i}
|
||||
for i in ["CHECKID", "STATUS", "REGION", "ACCOUNTID", "RESOURCEID"]
|
||||
],
|
||||
style_table={"overflowX": "auto"},
|
||||
style_as_list_view=True,
|
||||
style_cell={"textAlign": "left", "padding": "5px"},
|
||||
)
|
||||
|
||||
fig_section = go.Figure(
|
||||
data=[
|
||||
go.Bar(
|
||||
name="Failed",
|
||||
x=[failed_section],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#e77676"),
|
||||
),
|
||||
go.Bar(
|
||||
name="Success",
|
||||
x=[success_section],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#45cc6e"),
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
fig_section.update_layout(
|
||||
barmode="stack",
|
||||
margin=dict(l=10, r=10, t=10, b=10),
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
plot_bgcolor="rgba(0,0,0,0)",
|
||||
showlegend=False,
|
||||
width=350,
|
||||
height=30,
|
||||
xaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
yaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
annotations=[
|
||||
dict(
|
||||
x=success_section + failed_section,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(success_section),
|
||||
showarrow=False,
|
||||
font=dict(color="#45cc6e", size=14),
|
||||
xanchor="left",
|
||||
yanchor="middle",
|
||||
),
|
||||
dict(
|
||||
x=0,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(failed_section),
|
||||
showarrow=False,
|
||||
font=dict(color="#e77676", size=14),
|
||||
xanchor="right",
|
||||
yanchor="middle",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
graph_section = dcc.Graph(
|
||||
figure=fig_section,
|
||||
config={"staticPlot": True},
|
||||
className="info-bar-child",
|
||||
)
|
||||
|
||||
graph_div_section = html.Div(graph_section, className="graph-section-req")
|
||||
|
||||
internal_accordion_item = dbc.AccordionItem(
|
||||
title=section,
|
||||
children=[html.Div([data_table], className="inner-accordion-content")],
|
||||
)
|
||||
|
||||
internal_section_container = html.Div(
|
||||
[
|
||||
graph_div_section,
|
||||
dbc.Accordion(
|
||||
[internal_accordion_item], start_collapsed=True, flush=True
|
||||
),
|
||||
],
|
||||
className="accordion-inner--child",
|
||||
)
|
||||
|
||||
direct_internal_items.append(internal_section_container)
|
||||
|
||||
accordion_item = dbc.AccordionItem(
|
||||
title=f"{name}", children=direct_internal_items
|
||||
)
|
||||
section_container = html.Div(
|
||||
[
|
||||
graph_div,
|
||||
dbc.Accordion([accordion_item], start_collapsed=True, flush=True),
|
||||
],
|
||||
className="accordion-inner",
|
||||
)
|
||||
|
||||
section_containers.append(section_container)
|
||||
|
||||
return html.Div(section_containers, className="compliance-data-layout")
|
||||
|
||||
25
dashboard/compliance/kisa_isms_p_2023_aws.py
Normal file
25
dashboard/compliance/kisa_isms_p_2023_aws.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_kisa_ismsp
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBDOMAIN",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
# "REQUIREMENTS_DESCRIPTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_kisa_ismsp(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SUBDOMAIN", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
25
dashboard/compliance/kisa_isms_p_2023_korean_aws.py
Normal file
25
dashboard/compliance/kisa_isms_p_2023_korean_aws.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_kisa_ismsp
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBDOMAIN",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
# "REQUIREMENTS_DESCRIPTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_kisa_ismsp(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SUBDOMAIN", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -272,7 +272,7 @@ Each Prowler check has metadata associated which is stored at the same level of
|
||||
# Severity holds the check's severity, always in lowercase (critical, high, medium, low or informational)
|
||||
"Severity": "critical",
|
||||
# ResourceType only for AWS, holds the type from here
|
||||
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html
|
||||
# https://docs.aws.amazon.com/securityhub/latest/userguide/asff-resources.html
|
||||
"ResourceType": "Other",
|
||||
# Description holds the title of the check, for now is the same as CheckTitle
|
||||
"Description": "Ensure there are no EC2 AMIs set as Public.",
|
||||
|
||||
@@ -14,10 +14,8 @@ Once that is satisfied go ahead and clone your forked repo:
|
||||
git clone https://github.com/<your-github-user>/prowler
|
||||
cd prowler
|
||||
```
|
||||
For isolation and avoid conflicts with other environments, we recommend usage of `poetry`:
|
||||
```
|
||||
pip install poetry
|
||||
```
|
||||
For isolation and to avoid conflicts with other environments, we recommend using `poetry`, a Python dependency management tool. You can install it by following the instructions [here](https://python-poetry.org/docs/#installation).
|
||||
|
||||
Then install all dependencies including the ones for developers:
|
||||
```
|
||||
poetry install --with dev
|
||||
|
||||
109
docs/index.md
109
docs/index.md
@@ -19,14 +19,40 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
|
||||
## Quick Start
|
||||
### Installation
|
||||
|
||||
Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/), thus can be installed using pip with `Python >= 3.9`:
|
||||
Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/), thus can be installed as Python package with `Python >= 3.9`:
|
||||
|
||||
=== "Generic"
|
||||
=== "pipx"
|
||||
|
||||
[pipx](https://pipx.pypa.io/stable/) is a tool to install Python applications in isolated environments. It is recommended to use `pipx` for a global installation.
|
||||
|
||||
_Requirements_:
|
||||
|
||||
* `Python >= 3.9`
|
||||
* `Python pip >= 3.9`
|
||||
* `pipx` installed: [pipx installation](https://pipx.pypa.io/stable/installation/).
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
_Commands_:
|
||||
|
||||
``` bash
|
||||
pipx install prowler
|
||||
prowler -v
|
||||
```
|
||||
|
||||
To upgrade Prowler to the latest version, run:
|
||||
|
||||
``` bash
|
||||
pipx upgrade prowler
|
||||
```
|
||||
|
||||
=== "pip"
|
||||
|
||||
???+ warning
|
||||
This method is not recommended because it will modify the environment which you choose to install. Consider using [pipx](https://docs.prowler.com/projects/prowler-open-source/en/latest/#__tabbed_1_1) for a global installation.
|
||||
|
||||
_Requirements_:
|
||||
|
||||
* `Python >= 3.9`
|
||||
* `Python pip >= 21.0.0`
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
_Commands_:
|
||||
@@ -36,13 +62,19 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/),
|
||||
prowler -v
|
||||
```
|
||||
|
||||
To upgrade Prowler to the latest version, run:
|
||||
|
||||
``` bash
|
||||
pip install --upgrade prowler
|
||||
```
|
||||
|
||||
=== "Docker"
|
||||
|
||||
_Requirements_:
|
||||
|
||||
* Have `docker` installed: https://docs.docker.com/get-docker/.
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
* In the command below, change `-v` to your local directory path in order to access the reports.
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
_Commands_:
|
||||
|
||||
@@ -54,41 +86,21 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/),
|
||||
--env AWS_SESSION_TOKEN toniblyx/prowler:latest
|
||||
```
|
||||
|
||||
=== "Ubuntu"
|
||||
|
||||
_Requirements for Ubuntu 20.04.3 LTS_:
|
||||
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
* Install python 3.9 with: `sudo apt-get install python3.9`
|
||||
* Remove python 3.8 to avoid conflicts if you can: `sudo apt-get remove python3.8`
|
||||
* Make sure you have the python3 distutils package installed: `sudo apt-get install python3-distutils`
|
||||
* To make sure you use pip for 3.9 get the get-pip script with: `curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py`
|
||||
* Execute it with the proper python version: `sudo python3.9 get-pip.py`
|
||||
* Now you should have pip for 3.9 ready: `pip3.9 --version`
|
||||
|
||||
_Commands_:
|
||||
|
||||
```
|
||||
pip3.9 install prowler
|
||||
export PATH=$PATH:/home/$HOME/.local/bin/
|
||||
prowler -v
|
||||
```
|
||||
|
||||
=== "GitHub"
|
||||
|
||||
_Requirements for Developers_:
|
||||
|
||||
* `git`
|
||||
* `poetry` installed: [poetry installation](https://python-poetry.org/docs/#installation).
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
* `git`, `Python >= 3.9`, `pip` and `poetry` installed (`pip install poetry`)
|
||||
|
||||
_Commands_:
|
||||
|
||||
```
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler
|
||||
poetry shell
|
||||
poetry install
|
||||
python prowler.py -v
|
||||
poetry run python prowler.py -v
|
||||
```
|
||||
???+ note
|
||||
If you want to clone Prowler from Windows, use `git config core.longpaths true` to allow long file paths.
|
||||
@@ -97,15 +109,33 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/),
|
||||
|
||||
_Requirements_:
|
||||
|
||||
* `Python >= 3.9`
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
* Latest Amazon Linux 2 should come with Python 3.9 already installed however it may need pip. Install Python pip 3.9 with: `sudo yum install -y python3-pip`.
|
||||
* Make sure setuptools for python is already installed with: `pip3 install setuptools`
|
||||
|
||||
_Commands_:
|
||||
|
||||
```
|
||||
pip3.9 install prowler
|
||||
export PATH=$PATH:/home/$HOME/.local/bin/
|
||||
python3 -m pip install --user pipx
|
||||
python3 -m pipx ensurepath
|
||||
pipx install prowler
|
||||
prowler -v
|
||||
```
|
||||
|
||||
=== "Ubuntu"
|
||||
|
||||
_Requirements_:
|
||||
|
||||
* `Ubuntu 23.04` or above, if you are using an older version of Ubuntu check [pipx installation](https://docs.prowler.com/projects/prowler-open-source/en/latest/#__tabbed_1_1) and ensure you have `Python >= 3.9`.
|
||||
* `Python >= 3.9`
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
_Commands_:
|
||||
|
||||
``` bash
|
||||
sudo apt update
|
||||
sudo apt install pipx
|
||||
pipx ensurepath
|
||||
pipx install prowler
|
||||
prowler -v
|
||||
```
|
||||
|
||||
@@ -125,7 +155,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/),
|
||||
|
||||
=== "AWS CloudShell"
|
||||
|
||||
After the migration of AWS CloudShell from Amazon Linux 2 to Amazon Linux 2023 [[1]](https://aws.amazon.com/about-aws/whats-new/2023/12/aws-cloudshell-migrated-al2023/) [2](https://docs.aws.amazon.com/cloudshell/latest/userguide/cloudshell-AL2023-migration.html), there is no longer a need to manually compile Python 3.9 as it's already included in AL2023. Prowler can thus be easily installed following the Generic method of installation via pip. Follow the steps below to successfully execute Prowler v4 in AWS CloudShell:
|
||||
After the migration of AWS CloudShell from Amazon Linux 2 to Amazon Linux 2023 [[1]](https://aws.amazon.com/about-aws/whats-new/2023/12/aws-cloudshell-migrated-al2023/) [[2]](https://docs.aws.amazon.com/cloudshell/latest/userguide/cloudshell-AL2023-migration.html), there is no longer a need to manually compile Python 3.9 as it's already included in AL2023. Prowler can thus be easily installed following the Generic method of installation via pip. Follow the steps below to successfully execute Prowler v4 in AWS CloudShell:
|
||||
|
||||
_Requirements_:
|
||||
|
||||
@@ -133,11 +163,13 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/),
|
||||
|
||||
_Commands_:
|
||||
|
||||
```
|
||||
```bash
|
||||
sudo bash
|
||||
adduser prowler
|
||||
su prowler
|
||||
pip install prowler
|
||||
python3 -m pip install --user pipx
|
||||
python3 -m pipx ensurepath
|
||||
pipx install prowler
|
||||
cd /tmp
|
||||
prowler aws
|
||||
```
|
||||
@@ -153,9 +185,12 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/),
|
||||
|
||||
_Commands_:
|
||||
|
||||
```
|
||||
pip install prowler
|
||||
prowler -v
|
||||
```bash
|
||||
python3 -m pip install --user pipx
|
||||
python3 -m pipx ensurepath
|
||||
pipx install prowler
|
||||
cd /tmp
|
||||
prowler azure --az-cli-auth
|
||||
```
|
||||
|
||||
## Prowler container versions
|
||||
|
||||
@@ -13,52 +13,53 @@ The following list includes all the AWS checks with configurable variables that
|
||||
|
||||
| Check Name | Value | Type |
|
||||
|---------------------------------------------------------------|--------------------------------------------------|-----------------|
|
||||
| `iam_user_accesskey_unused` | `max_unused_access_keys_days` | Integer |
|
||||
| `iam_user_console_access_unused` | `max_console_access_days` | Integer |
|
||||
| `ec2_elastic_ip_shodan` | `shodan_api_key` | String |
|
||||
| `ec2_securitygroup_with_many_ingress_egress_rules` | `max_security_group_rules` | Integer |
|
||||
| `ec2_instance_older_than_specific_days` | `max_ec2_instance_age_in_days` | Integer |
|
||||
| `ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports`| `ec2_sg_high_risk_ports` | List of Integer |
|
||||
| `vpc_endpoint_connections_trust_boundaries` | `trusted_account_ids` | List of Strings |
|
||||
| `vpc_endpoint_services_allowed_principals_trust_boundaries` | `trusted_account_ids` | List of Strings |
|
||||
| `cloudwatch_log_group_retention_policy_specific_days_enabled` | `log_group_retention_days` | Integer |
|
||||
| `appstream_fleet_session_idle_disconnect_timeout` | `max_idle_disconnect_timeout_in_seconds` | Integer |
|
||||
| `appstream_fleet_session_disconnect_timeout` | `max_disconnect_timeout_in_seconds` | Integer |
|
||||
| `appstream_fleet_maximum_session_duration` | `max_session_duration_seconds` | Integer |
|
||||
| `awslambda_function_using_supported_runtimes` | `obsolete_lambda_runtimes` | Integer |
|
||||
| `organizations_scp_check_deny_regions` | `organizations_enabled_regions` | List of Strings |
|
||||
| `organizations_delegated_administrators` | `organizations_trusted_delegated_administrators` | List of Strings |
|
||||
| `ecr_repositories_scan_vulnerabilities_in_latest_image` | `ecr_repository_vulnerability_minimum_severity` | String |
|
||||
| `trustedadvisor_premium_support_plan_subscribed` | `verify_premium_support_plans` | Boolean |
|
||||
| `config_recorder_all_regions_enabled` | `mute_non_default_regions` | Boolean |
|
||||
| `drs_job_exist` | `mute_non_default_regions` | Boolean |
|
||||
| `guardduty_is_enabled` | `mute_non_default_regions` | Boolean |
|
||||
| `securityhub_enabled` | `mute_non_default_regions` | Boolean |
|
||||
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_entropy` | Integer |
|
||||
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_minutes` | Integer |
|
||||
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_actions` | List of Strings |
|
||||
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_entropy` | Integer |
|
||||
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_minutes` | Integer |
|
||||
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_actions` | List of Strings |
|
||||
| `codebuild_project_no_secrets_in_variables` | `excluded_sensitive_environment_variables` | List of Strings |
|
||||
| `rds_instance_backup_enabled` | `check_rds_instance_replicas` | Boolean |
|
||||
| `ec2_securitygroup_allow_ingress_from_internet_to_any_port` | `ec2_allowed_interface_types` | List of Strings |
|
||||
| `ec2_securitygroup_allow_ingress_from_internet_to_any_port` | `ec2_allowed_instance_owners` | List of Strings |
|
||||
| `acm_certificates_expiration_check` | `days_to_expire_threshold` | Integer |
|
||||
| `eks_control_plane_logging_all_types_enabled` | `eks_required_log_types` | List of Strings |
|
||||
| `eks_cluster_uses_a_supported_version` | `eks_cluster_oldest_version_supported` | String |
|
||||
| `elbv2_is_in_multiple_az` | `elbv2_min_azs` | Integer |
|
||||
| `elb_is_in_multiple_az` | `elb_min_azs` | Integer |
|
||||
| `appstream_fleet_maximum_session_duration` | `max_session_duration_seconds` | Integer |
|
||||
| `appstream_fleet_session_disconnect_timeout` | `max_disconnect_timeout_in_seconds` | Integer |
|
||||
| `appstream_fleet_session_idle_disconnect_timeout` | `max_idle_disconnect_timeout_in_seconds` | Integer |
|
||||
| `autoscaling_find_secrets_ec2_launch_configuration` | `secrets_ignore_patterns` | List of Strings |
|
||||
| `awslambda_function_no_secrets_in_code` | `secrets_ignore_patterns` | List of Strings |
|
||||
| `awslambda_function_no_secrets_in_variables` | `secrets_ignore_patterns` | List of Strings |
|
||||
| `awslambda_function_using_supported_runtimes` | `obsolete_lambda_runtimes` | Integer |
|
||||
| `awslambda_function_vpc_is_in_multi_azs` | `lambda_min_azs` | Integer |
|
||||
| `cloudformation_stack_outputs_find_secrets` | `secrets_ignore_patterns` | List of Strings |
|
||||
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_actions` | List of Strings |
|
||||
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_entropy` | Integer |
|
||||
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_minutes` | Integer |
|
||||
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_actions` | List of Strings |
|
||||
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_entropy` | Integer |
|
||||
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_minutes` | Integer |
|
||||
| `cloudwatch_log_group_no_secrets_in_logs` | `secrets_ignore_patterns` | List of Strings |
|
||||
| `cloudwatch_log_group_retention_policy_specific_days_enabled` | `log_group_retention_days` | Integer |
|
||||
| `codebuild_project_no_secrets_in_variables` | `excluded_sensitive_environment_variables` | List of Strings |
|
||||
| `codebuild_project_no_secrets_in_variables` | `secrets_ignore_patterns` | List of Strings |
|
||||
| `config_recorder_all_regions_enabled` | `mute_non_default_regions` | Boolean |
|
||||
| `drs_job_exist` | `mute_non_default_regions` | Boolean |
|
||||
| `ec2_elastic_ip_shodan` | `shodan_api_key` | String |
|
||||
| `ec2_instance_older_than_specific_days` | `max_ec2_instance_age_in_days` | Integer |
|
||||
| `ec2_instance_secrets_user_data` | `secrets_ignore_patterns` | List of Strings |
|
||||
| `ec2_launch_template_no_secrets` | `secrets_ignore_patterns` | List of Strings |
|
||||
| `ec2_securitygroup_allow_ingress_from_internet_to_any_port` | `ec2_allowed_instance_owners` | List of Strings |
|
||||
| `ec2_securitygroup_allow_ingress_from_internet_to_any_port` | `ec2_allowed_interface_types` | List of Strings |
|
||||
| `ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports`| `ec2_sg_high_risk_ports` | List of Integer |
|
||||
| `ec2_securitygroup_with_many_ingress_egress_rules` | `max_security_group_rules` | Integer |
|
||||
| `ecs_task_definitions_no_environment_secrets` | `secrets_ignore_patterns` | List of Strings |
|
||||
| `ecr_repositories_scan_vulnerabilities_in_latest_image` | `ecr_repository_vulnerability_minimum_severity` | String |
|
||||
| `eks_cluster_uses_a_supported_version` | `eks_cluster_oldest_version_supported` | String |
|
||||
| `eks_control_plane_logging_all_types_enabled` | `eks_required_log_types` | List of Strings |
|
||||
| `elb_is_in_multiple_az` | `elb_min_azs` | Integer |
|
||||
| `elbv2_is_in_multiple_az` | `elbv2_min_azs` | Integer |
|
||||
| `guardduty_is_enabled` | `mute_non_default_regions` | Boolean |
|
||||
| `iam_user_accesskey_unused` | `max_unused_access_keys_days` | Integer |
|
||||
| `iam_user_console_access_unused` | `max_console_access_days` | Integer |
|
||||
| `organizations_delegated_administrators` | `organizations_trusted_delegated_administrators` | List of Strings |
|
||||
| `organizations_scp_check_deny_regions` | `organizations_enabled_regions` | List of Strings |
|
||||
| `rds_instance_backup_enabled` | `check_rds_instance_replicas` | Boolean |
|
||||
| `securityhub_enabled` | `mute_non_default_regions` | Boolean |
|
||||
| `ssm_document_secrets` | `secrets_ignore_patterns` | List of Strings |
|
||||
| `trustedadvisor_premium_support_plan_subscribed` | `verify_premium_support_plans` | Boolean |
|
||||
| `vpc_endpoint_connections_trust_boundaries` | `trusted_account_ids` | List of Strings |
|
||||
| `vpc_endpoint_services_allowed_principals_trust_boundaries` | `trusted_account_ids` | List of Strings |
|
||||
|
||||
|
||||
## Azure
|
||||
@@ -157,6 +158,7 @@ aws:
|
||||
]
|
||||
|
||||
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
|
||||
# AWS SSM Configuration (aws.ssm_documents_set_as_public)
|
||||
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
|
||||
# Multi account environment: Any additional trusted account number should be added as a space separated list, e.g.
|
||||
# trusted_account_ids : ["123456789012", "098765432109", "678901234567"]
|
||||
|
||||
2233
poetry.lock
generated
2233
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -54,6 +54,7 @@ from prowler.lib.outputs.compliance.compliance import display_compliance_table
|
||||
from prowler.lib.outputs.compliance.ens.ens_aws import AWSENS
|
||||
from prowler.lib.outputs.compliance.generic.generic import GenericCompliance
|
||||
from prowler.lib.outputs.compliance.iso27001.iso27001_aws import AWSISO27001
|
||||
from prowler.lib.outputs.compliance.kisa_ismsp.kisa_ismsp_aws import AWSKISAISMSP
|
||||
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_aws import AWSMitreAttack
|
||||
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_azure import (
|
||||
AzureMitreAttack,
|
||||
@@ -68,8 +69,12 @@ from prowler.lib.outputs.slack.slack import Slack
|
||||
from prowler.lib.outputs.summary_table import display_summary_table
|
||||
from prowler.providers.aws.lib.s3.s3 import S3
|
||||
from prowler.providers.aws.lib.security_hub.security_hub import SecurityHub
|
||||
from prowler.providers.aws.models import AWSOutputOptions
|
||||
from prowler.providers.azure.models import AzureOutputOptions
|
||||
from prowler.providers.common.provider import Provider
|
||||
from prowler.providers.common.quick_inventory import run_provider_quick_inventory
|
||||
from prowler.providers.gcp.models import GCPOutputOptions
|
||||
from prowler.providers.kubernetes.models import KubernetesOutputOptions
|
||||
|
||||
|
||||
def prowler():
|
||||
@@ -190,7 +195,7 @@ def prowler():
|
||||
sys.exit()
|
||||
|
||||
# Provider to scan
|
||||
Provider.set_global_provider(args)
|
||||
Provider.init_global_provider(args)
|
||||
global_provider = Provider.get_global_provider()
|
||||
|
||||
# Print Provider Credentials
|
||||
@@ -235,7 +240,22 @@ def prowler():
|
||||
global_provider.mutelist = args.mutelist_file
|
||||
|
||||
# Setup Output Options
|
||||
global_provider.output_options = (args, bulk_checks_metadata)
|
||||
if provider == "aws":
|
||||
output_options = AWSOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
elif provider == "azure":
|
||||
output_options = AzureOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
elif provider == "gcp":
|
||||
output_options = GCPOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
elif provider == "kubernetes":
|
||||
output_options = KubernetesOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
|
||||
# Run the quick inventory for the provider if available
|
||||
if hasattr(args, "quick_inventory") and args.quick_inventory:
|
||||
@@ -251,6 +271,7 @@ def prowler():
|
||||
global_provider,
|
||||
custom_checks_metadata,
|
||||
args.config_file,
|
||||
output_options,
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
@@ -258,7 +279,7 @@ def prowler():
|
||||
)
|
||||
|
||||
# Prowler Fixer
|
||||
if global_provider.output_options.fixer:
|
||||
if output_options.fixer:
|
||||
print(f"{Style.BRIGHT}\nRunning Prowler Fixer, please wait...{Style.RESET_ALL}")
|
||||
# Check if there are any FAIL findings
|
||||
if any("FAIL" in finding.status for finding in findings):
|
||||
@@ -304,7 +325,8 @@ def prowler():
|
||||
# TODO: this part is needed since the checks generates a Check_Report_XXX and the output uses Finding
|
||||
# This will be refactored for the outputs generate directly the Finding
|
||||
finding_outputs = [
|
||||
Finding.generate_output(global_provider, finding) for finding in findings
|
||||
Finding.generate_output(global_provider, finding, output_options)
|
||||
for finding in findings
|
||||
]
|
||||
|
||||
generated_outputs = {"regular": [], "compliance": []}
|
||||
@@ -312,8 +334,8 @@ def prowler():
|
||||
if args.output_formats:
|
||||
for mode in args.output_formats:
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/"
|
||||
f"{global_provider.output_options.output_filename}"
|
||||
f"{output_options.output_directory}/"
|
||||
f"{output_options.output_filename}"
|
||||
)
|
||||
if mode == "csv":
|
||||
csv_output = CSV(
|
||||
@@ -355,16 +377,16 @@ def prowler():
|
||||
)
|
||||
|
||||
# Compliance Frameworks
|
||||
input_compliance_frameworks = set(
|
||||
global_provider.output_options.output_modes
|
||||
).intersection(get_available_compliance_frameworks(provider))
|
||||
input_compliance_frameworks = set(output_options.output_modes).intersection(
|
||||
get_available_compliance_frameworks(provider)
|
||||
)
|
||||
if provider == "aws":
|
||||
for compliance_name in input_compliance_frameworks:
|
||||
if compliance_name.startswith("cis_"):
|
||||
# Generate CIS Finding Object
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
cis = AWSCIS(
|
||||
findings=finding_outputs,
|
||||
@@ -377,8 +399,8 @@ def prowler():
|
||||
elif compliance_name == "mitre_attack_aws":
|
||||
# Generate MITRE ATT&CK Finding Object
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
mitre_attack = AWSMitreAttack(
|
||||
findings=finding_outputs,
|
||||
@@ -391,8 +413,8 @@ def prowler():
|
||||
elif compliance_name.startswith("ens_"):
|
||||
# Generate ENS Finding Object
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
ens = AWSENS(
|
||||
findings=finding_outputs,
|
||||
@@ -405,8 +427,8 @@ def prowler():
|
||||
elif compliance_name.startswith("aws_well_architected_framework"):
|
||||
# Generate AWS Well-Architected Finding Object
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
aws_well_architected = AWSWellArchitected(
|
||||
findings=finding_outputs,
|
||||
@@ -419,8 +441,8 @@ def prowler():
|
||||
elif compliance_name.startswith("iso27001_"):
|
||||
# Generate ISO27001 Finding Object
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
iso27001 = AWSISO27001(
|
||||
findings=finding_outputs,
|
||||
@@ -430,10 +452,24 @@ def prowler():
|
||||
)
|
||||
generated_outputs["compliance"].append(iso27001)
|
||||
iso27001.batch_write_data_to_file()
|
||||
elif compliance_name.startswith("kisa"):
|
||||
# Generate KISA-ISMS-P Finding Object
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
kisa_ismsp = AWSKISAISMSP(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
create_file_descriptor=True,
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(kisa_ismsp)
|
||||
kisa_ismsp.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
generic_compliance = GenericCompliance(
|
||||
findings=finding_outputs,
|
||||
@@ -449,8 +485,8 @@ def prowler():
|
||||
if compliance_name.startswith("cis_"):
|
||||
# Generate CIS Finding Object
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
cis = AzureCIS(
|
||||
findings=finding_outputs,
|
||||
@@ -463,8 +499,8 @@ def prowler():
|
||||
elif compliance_name == "mitre_attack_azure":
|
||||
# Generate MITRE ATT&CK Finding Object
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
mitre_attack = AzureMitreAttack(
|
||||
findings=finding_outputs,
|
||||
@@ -476,8 +512,8 @@ def prowler():
|
||||
mitre_attack.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
generic_compliance = GenericCompliance(
|
||||
findings=finding_outputs,
|
||||
@@ -493,8 +529,8 @@ def prowler():
|
||||
if compliance_name.startswith("cis_"):
|
||||
# Generate CIS Finding Object
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
cis = GCPCIS(
|
||||
findings=finding_outputs,
|
||||
@@ -507,8 +543,8 @@ def prowler():
|
||||
elif compliance_name == "mitre_attack_gcp":
|
||||
# Generate MITRE ATT&CK Finding Object
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
mitre_attack = GCPMitreAttack(
|
||||
findings=finding_outputs,
|
||||
@@ -520,8 +556,8 @@ def prowler():
|
||||
mitre_attack.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
generic_compliance = GenericCompliance(
|
||||
findings=finding_outputs,
|
||||
@@ -537,8 +573,8 @@ def prowler():
|
||||
if compliance_name.startswith("cis_"):
|
||||
# Generate CIS Finding Object
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
cis = KubernetesCIS(
|
||||
findings=finding_outputs,
|
||||
@@ -550,8 +586,8 @@ def prowler():
|
||||
cis.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{global_provider.output_options.output_directory}/compliance/"
|
||||
f"{global_provider.output_options.output_filename}_{compliance_name}.csv"
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
generic_compliance = GenericCompliance(
|
||||
findings=finding_outputs,
|
||||
@@ -594,7 +630,7 @@ def prowler():
|
||||
aws_partition=global_provider.identity.partition,
|
||||
aws_session=global_provider.session.current_session,
|
||||
findings=asff_output.data,
|
||||
send_only_fails=global_provider.output_options.send_sh_only_fails,
|
||||
send_only_fails=output_options.send_sh_only_fails,
|
||||
aws_security_hub_available_regions=security_hub_regions,
|
||||
)
|
||||
# Send the findings to Security Hub
|
||||
@@ -620,7 +656,7 @@ def prowler():
|
||||
display_summary_table(
|
||||
findings,
|
||||
global_provider,
|
||||
global_provider.output_options,
|
||||
output_options,
|
||||
)
|
||||
# Only display compliance table if there are findings (not all MANUAL) and it is a default execution
|
||||
if (
|
||||
@@ -639,13 +675,13 @@ def prowler():
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
compliance,
|
||||
global_provider.output_options.output_filename,
|
||||
global_provider.output_options.output_directory,
|
||||
output_options.output_filename,
|
||||
output_options.output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
if compliance_overview:
|
||||
print(
|
||||
f"\nDetailed compliance results are in {Fore.YELLOW}{global_provider.output_options.output_directory}/compliance/{Style.RESET_ALL}\n"
|
||||
f"\nDetailed compliance results are in {Fore.YELLOW}{output_options.output_directory}/compliance/{Style.RESET_ALL}\n"
|
||||
)
|
||||
|
||||
# If custom checks were passed, remove the modules
|
||||
|
||||
4335
prowler/compliance/aws/kisa_isms_p_2023_aws.json
Normal file
4335
prowler/compliance/aws/kisa_isms_p_2023_aws.json
Normal file
File diff suppressed because it is too large
Load Diff
4335
prowler/compliance/aws/kisa_isms_p_2023_korean_aws.json
Normal file
4335
prowler/compliance/aws/kisa_isms_p_2023_korean_aws.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -58,6 +58,7 @@ aws:
|
||||
]
|
||||
|
||||
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
|
||||
# AWS SSM Configuration (aws.ssm_documents_set_as_public)
|
||||
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
|
||||
# Multi account environment: Any additional trusted account number should be added as a space separated list, e.g.
|
||||
# trusted_account_ids : ["123456789012", "098765432109", "678901234567"]
|
||||
@@ -101,6 +102,8 @@ aws:
|
||||
"ruby2.5",
|
||||
"ruby2.7",
|
||||
]
|
||||
# aws.awslambda_function_vpc_is_in_multi_azs
|
||||
lambda_min_azs: 2
|
||||
|
||||
# AWS Organizations
|
||||
# aws.organizations_scp_check_deny_regions
|
||||
|
||||
@@ -325,35 +325,6 @@ def import_check(check_path: str) -> ModuleType:
|
||||
return lib
|
||||
|
||||
|
||||
def run_check(check: Check, verbose: bool = False, only_logs: bool = False) -> list:
|
||||
"""
|
||||
Run the check and return the findings
|
||||
Args:
|
||||
check (Check): check class
|
||||
output_options (Any): output options
|
||||
Returns:
|
||||
list: list of findings
|
||||
"""
|
||||
findings = []
|
||||
if verbose:
|
||||
print(
|
||||
f"\nCheck ID: {check.CheckID} - {Fore.MAGENTA}{check.ServiceName}{Fore.YELLOW} [{check.Severity}]{Style.RESET_ALL}"
|
||||
)
|
||||
logger.debug(f"Executing check: {check.CheckID}")
|
||||
try:
|
||||
findings = check.execute()
|
||||
except Exception as error:
|
||||
if not only_logs:
|
||||
print(
|
||||
f"Something went wrong in {check.CheckID}, please use --log-level ERROR"
|
||||
)
|
||||
logger.error(
|
||||
f"{check.CheckID} -- {error.__class__.__name__}[{traceback.extract_tb(error.__traceback__)[-1].lineno}]: {error}"
|
||||
)
|
||||
finally:
|
||||
return findings
|
||||
|
||||
|
||||
def run_fixer(check_findings: list) -> int:
|
||||
"""
|
||||
Run the fixer for the check if it exists and there are any FAIL findings
|
||||
@@ -435,6 +406,7 @@ def execute_checks(
|
||||
global_provider: Any,
|
||||
custom_checks_metadata: Any,
|
||||
config_file: str,
|
||||
output_options: Any,
|
||||
) -> list:
|
||||
# List to store all the check's findings
|
||||
all_findings = []
|
||||
@@ -470,18 +442,42 @@ def execute_checks(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
# Set verbose flag
|
||||
verbose = False
|
||||
if hasattr(output_options, "verbose"):
|
||||
verbose = output_options.verbose
|
||||
elif hasattr(output_options, "fixer"):
|
||||
verbose = output_options.fixer
|
||||
|
||||
# Execution with the --only-logs flag
|
||||
if global_provider.output_options.only_logs:
|
||||
if output_options.only_logs:
|
||||
for check_name in checks_to_execute:
|
||||
# Recover service from check name
|
||||
service = check_name.split("_")[0]
|
||||
try:
|
||||
try:
|
||||
# Import check module
|
||||
check_module_path = f"prowler.providers.{global_provider.type}.services.{service}.{check_name}.{check_name}"
|
||||
lib = import_check(check_module_path)
|
||||
# Recover functions from check
|
||||
check_to_execute = getattr(lib, check_name)
|
||||
check = check_to_execute()
|
||||
except ModuleNotFoundError:
|
||||
logger.error(
|
||||
f"Check '{check_name}' was not found for the {global_provider.type.upper()} provider"
|
||||
)
|
||||
continue
|
||||
if verbose:
|
||||
print(
|
||||
f"\nCheck ID: {check.CheckID} - {Fore.MAGENTA}{check.ServiceName}{Fore.YELLOW} [{check.Severity}]{Style.RESET_ALL}"
|
||||
)
|
||||
check_findings = execute(
|
||||
service,
|
||||
check_name,
|
||||
check,
|
||||
global_provider,
|
||||
custom_checks_metadata,
|
||||
output_options,
|
||||
)
|
||||
report(check_findings, global_provider, output_options)
|
||||
all_findings.extend(check_findings)
|
||||
|
||||
# Update Audit Status
|
||||
@@ -539,12 +535,31 @@ def execute_checks(
|
||||
f"-> Scanning {orange_color}{service}{Style.RESET_ALL} service"
|
||||
)
|
||||
try:
|
||||
try:
|
||||
# Import check module
|
||||
check_module_path = f"prowler.providers.{global_provider.type}.services.{service}.{check_name}.{check_name}"
|
||||
lib = import_check(check_module_path)
|
||||
# Recover functions from check
|
||||
check_to_execute = getattr(lib, check_name)
|
||||
check = check_to_execute()
|
||||
except ModuleNotFoundError:
|
||||
logger.error(
|
||||
f"Check '{check_name}' was not found for the {global_provider.type.upper()} provider"
|
||||
)
|
||||
continue
|
||||
if verbose:
|
||||
print(
|
||||
f"\nCheck ID: {check.CheckID} - {Fore.MAGENTA}{check.ServiceName}{Fore.YELLOW} [{check.Severity}]{Style.RESET_ALL}"
|
||||
)
|
||||
check_findings = execute(
|
||||
service,
|
||||
check_name,
|
||||
check,
|
||||
global_provider,
|
||||
custom_checks_metadata,
|
||||
output_options,
|
||||
)
|
||||
|
||||
report(check_findings, global_provider, output_options)
|
||||
|
||||
all_findings.extend(check_findings)
|
||||
services_executed.add(service)
|
||||
checks_executed.add(check_name)
|
||||
@@ -567,51 +582,79 @@ def execute_checks(
|
||||
)
|
||||
bar()
|
||||
bar.title = f"-> {Fore.GREEN}Scan completed!{Style.RESET_ALL}"
|
||||
|
||||
# Custom report interface
|
||||
if os.environ.get("PROWLER_REPORT_LIB_PATH"):
|
||||
try:
|
||||
logger.info("Using custom report interface ...")
|
||||
lib = os.environ["PROWLER_REPORT_LIB_PATH"]
|
||||
outputs_module = importlib.import_module(lib)
|
||||
custom_report_interface = getattr(outputs_module, "report")
|
||||
|
||||
# TODO: review this call and see if we can remove the global_provider.output_options since it is contained in the global_provider
|
||||
custom_report_interface(check_findings, output_options, global_provider)
|
||||
except Exception:
|
||||
sys.exit(1)
|
||||
|
||||
return all_findings
|
||||
|
||||
|
||||
def execute(
|
||||
service: str,
|
||||
check_name: str,
|
||||
check: Check,
|
||||
global_provider: Any,
|
||||
custom_checks_metadata: Any,
|
||||
output_options: Any = None,
|
||||
):
|
||||
try:
|
||||
# Import check module
|
||||
check_module_path = f"prowler.providers.{global_provider.type}.services.{service}.{check_name}.{check_name}"
|
||||
lib = import_check(check_module_path)
|
||||
# Recover functions from check
|
||||
check_to_execute = getattr(lib, check_name)
|
||||
check_class = check_to_execute()
|
||||
"""
|
||||
Execute the check and report the findings
|
||||
|
||||
Args:
|
||||
service (str): service name
|
||||
check_name (str): check name
|
||||
global_provider (Any): provider object
|
||||
custom_checks_metadata (Any): custom checks metadata
|
||||
output_options (Any): output options, depending on the provider
|
||||
|
||||
Returns:
|
||||
list: list of findings
|
||||
"""
|
||||
try:
|
||||
# Update check metadata to reflect that in the outputs
|
||||
if custom_checks_metadata and custom_checks_metadata["Checks"].get(
|
||||
check_class.CheckID
|
||||
check.CheckID
|
||||
):
|
||||
check_class = update_check_metadata(
|
||||
check_class, custom_checks_metadata["Checks"][check_class.CheckID]
|
||||
check = update_check_metadata(
|
||||
check, custom_checks_metadata["Checks"][check.CheckID]
|
||||
)
|
||||
|
||||
# Run check
|
||||
verbose = (
|
||||
global_provider.output_options.verbose
|
||||
or global_provider.output_options.fixer
|
||||
)
|
||||
check_findings = run_check(
|
||||
check_class, verbose, global_provider.output_options.only_logs
|
||||
)
|
||||
only_logs = False
|
||||
if hasattr(output_options, "only_logs"):
|
||||
only_logs = output_options.only_logs
|
||||
|
||||
# Execute the check
|
||||
check_findings = []
|
||||
logger.debug(f"Executing check: {check.CheckID}")
|
||||
try:
|
||||
check_findings = check.execute()
|
||||
except Exception as error:
|
||||
if not only_logs:
|
||||
print(
|
||||
f"Something went wrong in {check.CheckID}, please use --log-level ERROR"
|
||||
)
|
||||
logger.error(
|
||||
f"{check.CheckID} -- {error.__class__.__name__}[{traceback.extract_tb(error.__traceback__)[-1].lineno}]: {error}"
|
||||
)
|
||||
|
||||
# Exclude findings per status
|
||||
if global_provider.output_options.status:
|
||||
if hasattr(output_options, "status") and output_options.status:
|
||||
check_findings = [
|
||||
finding
|
||||
for finding in check_findings
|
||||
if finding.status in global_provider.output_options.status
|
||||
if finding.status in output_options.status
|
||||
]
|
||||
|
||||
# Mutelist findings
|
||||
# Before returning the findings, we need to apply the mute list logic
|
||||
if hasattr(global_provider, "mutelist") and global_provider.mutelist.mutelist:
|
||||
# TODO: make this prettier
|
||||
is_finding_muted_args = {}
|
||||
if global_provider.type == "aws":
|
||||
is_finding_muted_args["aws_account_id"] = (
|
||||
@@ -626,27 +669,9 @@ def execute(
|
||||
**is_finding_muted_args
|
||||
)
|
||||
|
||||
# Refactor(Outputs)
|
||||
# Report the check's findings
|
||||
report(check_findings, global_provider)
|
||||
|
||||
# Refactor(Outputs)
|
||||
if os.environ.get("PROWLER_REPORT_LIB_PATH"):
|
||||
try:
|
||||
logger.info("Using custom report interface ...")
|
||||
lib = os.environ["PROWLER_REPORT_LIB_PATH"]
|
||||
outputs_module = importlib.import_module(lib)
|
||||
custom_report_interface = getattr(outputs_module, "report")
|
||||
|
||||
# TODO: review this call and see if we can remove the global_provider.output_options since it is contained in the global_provider
|
||||
custom_report_interface(
|
||||
check_findings, global_provider.output_options, global_provider
|
||||
)
|
||||
except Exception:
|
||||
sys.exit(1)
|
||||
except ModuleNotFoundError:
|
||||
logger.error(
|
||||
f"Check '{check_name}' was not found for the {global_provider.type.upper()} provider"
|
||||
f"Check '{check.CheckID}' was not found for the {global_provider.type.upper()} provider"
|
||||
)
|
||||
check_findings = []
|
||||
except Exception as error:
|
||||
|
||||
@@ -169,6 +169,19 @@ class Mitre_Requirement(BaseModel):
|
||||
Checks: list[str]
|
||||
|
||||
|
||||
# KISA-ISMS-P Requirement Attribute
|
||||
class KISA_ISMSP_Requirement_Attribute(BaseModel):
|
||||
"""KISA ISMS-P Requirement Attribute"""
|
||||
|
||||
Domain: str
|
||||
Subdomain: str
|
||||
Section: str
|
||||
AuditChecklist: Optional[list[str]]
|
||||
RelatedRegulations: Optional[list[str]]
|
||||
AuditEvidence: Optional[list[str]]
|
||||
NonComplianceCases: Optional[list[str]]
|
||||
|
||||
|
||||
# Base Compliance Model
|
||||
# TODO: move this to compliance folder
|
||||
class Compliance_Requirement(BaseModel):
|
||||
@@ -183,6 +196,7 @@ class Compliance_Requirement(BaseModel):
|
||||
ENS_Requirement_Attribute,
|
||||
ISO27001_2013_Requirement_Attribute,
|
||||
AWS_Well_Architected_Requirement_Attribute,
|
||||
KISA_ISMSP_Requirement_Attribute,
|
||||
# Generic_Compliance_Requirement_Attribute must be the last one since it is the fallback for generic compliance framework
|
||||
Generic_Compliance_Requirement_Attribute,
|
||||
]
|
||||
|
||||
@@ -89,7 +89,11 @@ class ASFF(Output):
|
||||
CreatedAt=timestamp,
|
||||
Severity=Severity(Label=finding.severity.value),
|
||||
Title=finding.check_title,
|
||||
Description=finding.description,
|
||||
Description=(
|
||||
(finding.status_extended[:1000] + "...")
|
||||
if len(finding.status_extended) > 1000
|
||||
else finding.status_extended
|
||||
),
|
||||
Resources=[
|
||||
Resource(
|
||||
Id=finding.resource_uid,
|
||||
|
||||
@@ -7,6 +7,7 @@ from prowler.lib.outputs.compliance.ens.ens import get_ens_table
|
||||
from prowler.lib.outputs.compliance.generic.generic_table import (
|
||||
get_generic_compliance_table,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.kisa_ismsp.kisa_ismsp import get_kisa_ismsp_table
|
||||
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack import (
|
||||
get_mitre_attack_table,
|
||||
)
|
||||
@@ -62,6 +63,15 @@ def display_compliance_table(
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
elif "kisa_isms_" in compliance_framework:
|
||||
get_kisa_ismsp_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
compliance_framework,
|
||||
output_filename,
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
else:
|
||||
get_generic_compliance_table(
|
||||
findings,
|
||||
|
||||
89
prowler/lib/outputs/compliance/kisa_ismsp/kisa_ismsp.py
Normal file
89
prowler/lib/outputs/compliance/kisa_ismsp/kisa_ismsp.py
Normal file
@@ -0,0 +1,89 @@
|
||||
from colorama import Fore, Style
|
||||
from tabulate import tabulate
|
||||
|
||||
from prowler.config.config import orange_color
|
||||
|
||||
|
||||
def get_kisa_ismsp_table(
|
||||
findings: list,
|
||||
bulk_checks_metadata: dict,
|
||||
compliance_framework: str,
|
||||
output_filename: str,
|
||||
output_directory: str,
|
||||
compliance_overview: bool,
|
||||
):
|
||||
sections = {}
|
||||
kisa_ismsp_compliance_table = {
|
||||
"Provider": [],
|
||||
"Section": [],
|
||||
"Status": [],
|
||||
"Muted": [],
|
||||
}
|
||||
pass_count = []
|
||||
fail_count = []
|
||||
muted_count = []
|
||||
for index, finding in enumerate(findings):
|
||||
check = bulk_checks_metadata[finding.check_metadata.CheckID]
|
||||
check_compliances = check.Compliance
|
||||
for compliance in check_compliances:
|
||||
if (
|
||||
compliance.Framework.startswith("KISA")
|
||||
and compliance.Version in compliance_framework
|
||||
):
|
||||
for requirement in compliance.Requirements:
|
||||
for attribute in requirement.Attributes:
|
||||
section = attribute.Section
|
||||
# Check if Section exists
|
||||
if section not in sections:
|
||||
sections[section] = {
|
||||
"Status": f"{Fore.GREEN}PASS{Style.RESET_ALL}",
|
||||
"Muted": 0,
|
||||
}
|
||||
if finding.muted:
|
||||
if index not in muted_count:
|
||||
muted_count.append(index)
|
||||
sections[section]["Muted"] += 1
|
||||
else:
|
||||
if finding.status == "FAIL" and index not in fail_count:
|
||||
fail_count.append(index)
|
||||
elif finding.status == "PASS" and index not in pass_count:
|
||||
pass_count.append(index)
|
||||
|
||||
# Add results to table
|
||||
sections = dict(sorted(sections.items()))
|
||||
for section in sections:
|
||||
kisa_ismsp_compliance_table["Provider"].append(compliance.Provider)
|
||||
kisa_ismsp_compliance_table["Section"].append(section)
|
||||
kisa_ismsp_compliance_table["Muted"].append(
|
||||
f"{orange_color}{sections[section]['Muted']}{Style.RESET_ALL}"
|
||||
)
|
||||
if len(fail_count) + len(pass_count) + len(muted_count) > 1:
|
||||
print(
|
||||
f"\nCompliance Status of {Fore.YELLOW}{compliance_framework.upper()}{Style.RESET_ALL} Framework:"
|
||||
)
|
||||
overview_table = [
|
||||
[
|
||||
f"{Fore.RED}{round(len(fail_count) / len(findings) * 100, 2)}% ({len(fail_count)}) FAIL{Style.RESET_ALL}",
|
||||
f"{Fore.GREEN}{round(len(pass_count) / len(findings) * 100, 2)}% ({len(pass_count)}) PASS{Style.RESET_ALL}",
|
||||
f"{orange_color}{round(len(muted_count) / len(findings) * 100, 2)}% ({len(muted_count)}) MUTED{Style.RESET_ALL}",
|
||||
]
|
||||
]
|
||||
print(tabulate(overview_table, tablefmt="rounded_grid"))
|
||||
if not compliance_overview:
|
||||
print(
|
||||
f"\nFramework {Fore.YELLOW}{compliance_framework.upper()}{Style.RESET_ALL} Results:"
|
||||
)
|
||||
print(
|
||||
tabulate(
|
||||
kisa_ismsp_compliance_table,
|
||||
headers="keys",
|
||||
tablefmt="rounded_grid",
|
||||
)
|
||||
)
|
||||
print(
|
||||
f"{Style.BRIGHT}* Only sections containing results appear.{Style.RESET_ALL}"
|
||||
)
|
||||
print(f"\nDetailed results of {compliance_framework.upper()} are in:")
|
||||
print(
|
||||
f" - CSV: {output_directory}/compliance/{output_filename}_{compliance_framework}.csv\n"
|
||||
)
|
||||
93
prowler/lib/outputs/compliance/kisa_ismsp/kisa_ismsp_aws.py
Normal file
93
prowler/lib/outputs/compliance/kisa_ismsp/kisa_ismsp_aws.py
Normal file
@@ -0,0 +1,93 @@
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
|
||||
from prowler.lib.outputs.compliance.kisa_ismsp.models import AWSKISAISMSPModel
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
|
||||
|
||||
class AWSKISAISMSP(ComplianceOutput):
|
||||
"""
|
||||
This class represents the AWS KISA-ISMS-P compliance output.
|
||||
|
||||
Attributes:
|
||||
- _data (list): A list to store transformed data from findings.
|
||||
- _file_descriptor (TextIOWrapper): A file descriptor to write data to a file.
|
||||
|
||||
Methods:
|
||||
- transform: Transforms findings into AWS KISA-ISMS-P compliance format.
|
||||
"""
|
||||
|
||||
def transform(
|
||||
self,
|
||||
findings: list[Finding],
|
||||
compliance: Compliance,
|
||||
compliance_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Transforms a list of findings into AWS KISA-ISMS-P compliance format.
|
||||
|
||||
Parameters:
|
||||
- findings (list): A list of findings.
|
||||
- compliance (Compliance): A compliance model.
|
||||
- compliance_name (str): The name of the compliance model.
|
||||
|
||||
Returns:
|
||||
- None
|
||||
"""
|
||||
for finding in findings:
|
||||
# Get the compliance requirements for the finding
|
||||
finding_requirements = finding.compliance.get(compliance_name, [])
|
||||
for requirement in compliance.Requirements:
|
||||
if requirement.Id in finding_requirements:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = AWSKISAISMSPModel(
|
||||
Provider=finding.provider,
|
||||
Description=compliance.Description,
|
||||
AccountId=finding.account_uid,
|
||||
Region=finding.region,
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Name=requirement.Name,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Domain=attribute.Domain,
|
||||
Requirements_Attributes_Subdomain=attribute.Subdomain,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_AuditChecklist=attribute.AuditChecklist,
|
||||
Requirements_Attributes_RelatedRegulations=attribute.RelatedRegulations,
|
||||
Requirements_Attributes_AuditEvidence=attribute.AuditEvidence,
|
||||
Requirements_Attributes_NonComplianceCases=attribute.NonComplianceCases,
|
||||
Status=finding.status,
|
||||
StatusExtended=finding.status_extended,
|
||||
ResourceId=finding.resource_uid,
|
||||
ResourceName=finding.resource_name,
|
||||
CheckId=finding.check_id,
|
||||
Muted=finding.muted,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
# Add manual requirements to the compliance output
|
||||
for requirement in compliance.Requirements:
|
||||
if not requirement.Checks:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = AWSKISAISMSPModel(
|
||||
Provider=compliance.Provider.lower(),
|
||||
Description=compliance.Description,
|
||||
AccountId="",
|
||||
Region="",
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Name=requirement.Name,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Domain=attribute.Domain,
|
||||
Requirements_Attributes_Subdomain=attribute.Subdomain,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_AuditChecklist=attribute.AuditChecklist,
|
||||
Requirements_Attributes_RelatedRegulations=attribute.RelatedRegulations,
|
||||
Requirements_Attributes_AuditEvidence=attribute.AuditEvidence,
|
||||
Requirements_Attributes_NonComplianceCases=attribute.NonComplianceCases,
|
||||
Status="MANUAL",
|
||||
StatusExtended="Manual check",
|
||||
ResourceId="manual_check",
|
||||
ResourceName="Manual check",
|
||||
CheckId="manual",
|
||||
Muted=False,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
31
prowler/lib/outputs/compliance/kisa_ismsp/models.py
Normal file
31
prowler/lib/outputs/compliance/kisa_ismsp/models.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AWSKISAISMSPModel(BaseModel):
|
||||
"""
|
||||
The AWS KISA-ISMS-P Model outputs findings in a format compliant with the AWS KISA-ISMS-P standard
|
||||
"""
|
||||
|
||||
Provider: str
|
||||
Description: str
|
||||
AccountId: str
|
||||
Region: str
|
||||
AssessmentDate: str
|
||||
Requirements_Id: str
|
||||
Requirements_Name: str
|
||||
Requirements_Description: str
|
||||
Requirements_Attributes_Domain: str
|
||||
Requirements_Attributes_Subdomain: str
|
||||
Requirements_Attributes_Section: str
|
||||
Requirements_Attributes_AuditChecklist: Optional[list[str]]
|
||||
Requirements_Attributes_RelatedRegulations: Optional[list[str]]
|
||||
Requirements_Attributes_AuditEvidence: Optional[list[str]]
|
||||
Requirements_Attributes_NonComplianceCases: Optional[list[str]]
|
||||
Status: str
|
||||
StatusExtended: str
|
||||
ResourceId: str
|
||||
ResourceName: str
|
||||
CheckId: str
|
||||
Muted: bool
|
||||
@@ -88,29 +88,37 @@ class Finding(BaseModel):
|
||||
|
||||
@classmethod
|
||||
def generate_output(
|
||||
cls, provider: Provider, check_output: Check_Report
|
||||
cls, provider: Provider, check_output: Check_Report, output_options
|
||||
) -> "Finding":
|
||||
"""Generates the output for a finding based on the provider and output options
|
||||
|
||||
Args:
|
||||
provider (Provider): the provider object
|
||||
check_output (Check_Report): the check output object
|
||||
output_options: the output options object, depending on the provider
|
||||
Returns:
|
||||
finding_output (Finding): the finding output object
|
||||
|
||||
"""
|
||||
output_options = provider.output_options
|
||||
# TODO: think about get_provider_data_mapping
|
||||
provider_data_mapping = get_provider_data_mapping(provider)
|
||||
|
||||
# TODO: move fill_common_finding_data
|
||||
common_finding_data = fill_common_finding_data(
|
||||
check_output, output_options.unix_timestamp
|
||||
)
|
||||
unix_timestamp = False
|
||||
if hasattr(output_options, "unix_timestamp"):
|
||||
unix_timestamp = output_options.unix_timestamp
|
||||
|
||||
common_finding_data = fill_common_finding_data(check_output, unix_timestamp)
|
||||
output_data = {}
|
||||
output_data.update(provider_data_mapping)
|
||||
output_data.update(common_finding_data)
|
||||
|
||||
bulk_checks_metadata = {}
|
||||
if hasattr(output_options, "bulk_checks_metadata"):
|
||||
bulk_checks_metadata = output_options.bulk_checks_metadata
|
||||
|
||||
output_data["compliance"] = get_check_compliance(
|
||||
check_output, provider.type, output_options.bulk_checks_metadata
|
||||
check_output, provider.type, bulk_checks_metadata
|
||||
)
|
||||
try:
|
||||
if provider.type == "aws":
|
||||
|
||||
@@ -25,10 +25,12 @@ def stdout_report(finding, color, verbose, status, fix):
|
||||
)
|
||||
|
||||
|
||||
# TODO: Only pass check_findings, provider.output_options and provider.type
|
||||
def report(check_findings, provider):
|
||||
# TODO: Only pass check_findings, output_options and provider.type
|
||||
def report(check_findings, provider, output_options):
|
||||
try:
|
||||
output_options = provider.output_options
|
||||
verbose = False
|
||||
if hasattr(output_options, "verbose"):
|
||||
verbose = output_options.verbose
|
||||
if check_findings:
|
||||
# TO-DO Generic Function
|
||||
if provider.type == "aws":
|
||||
@@ -39,21 +41,27 @@ def report(check_findings, provider):
|
||||
|
||||
for finding in check_findings:
|
||||
# Print findings by stdout
|
||||
status = []
|
||||
if hasattr(output_options, "status"):
|
||||
status = output_options.status
|
||||
fixer = False
|
||||
if hasattr(output_options, "fixer"):
|
||||
fixer = output_options.fixer
|
||||
color = set_report_color(finding.status, finding.muted)
|
||||
stdout_report(
|
||||
finding,
|
||||
color,
|
||||
output_options.verbose,
|
||||
output_options.status,
|
||||
output_options.fixer,
|
||||
verbose,
|
||||
status,
|
||||
fixer,
|
||||
)
|
||||
|
||||
else: # No service resources in the whole account
|
||||
color = set_report_color("MANUAL")
|
||||
if output_options.verbose:
|
||||
if verbose:
|
||||
print(f"\t{color}INFO{Style.RESET_ALL} There are no resources")
|
||||
# Separator between findings and bar
|
||||
if output_options.verbose:
|
||||
if verbose:
|
||||
print()
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
@@ -82,9 +90,14 @@ def extract_findings_statistics(findings: list) -> dict:
|
||||
extract_findings_statistics takes a list of findings and returns the following dict with the aggregated statistics
|
||||
{
|
||||
"total_pass": 0,
|
||||
"total_muted_pass": 0,
|
||||
"total_fail": 0,
|
||||
"total_muted_fail": 0,
|
||||
"resources_count": 0,
|
||||
"findings_count": 0,
|
||||
"critical_failed_findings": [],
|
||||
"critical_passed_findings": []
|
||||
"all_fails_are_muted": False
|
||||
}
|
||||
"""
|
||||
logger.info("Extracting audit statistics...")
|
||||
@@ -96,18 +109,42 @@ def extract_findings_statistics(findings: list) -> dict:
|
||||
resources = set()
|
||||
findings_count = 0
|
||||
all_fails_are_muted = True
|
||||
critical_severity_pass = 0
|
||||
critical_severity_fail = 0
|
||||
high_severity_pass = 0
|
||||
high_severity_fail = 0
|
||||
medium_severity_pass = 0
|
||||
medium_severity_fail = 0
|
||||
low_severity_pass = 0
|
||||
low_severity_fail = 0
|
||||
|
||||
for finding in findings:
|
||||
# Save the resource_id
|
||||
resources.add(finding.resource_id)
|
||||
|
||||
if finding.status == "PASS":
|
||||
if finding.check_metadata.Severity == "critical":
|
||||
critical_severity_pass += 1
|
||||
if finding.check_metadata.Severity == "high":
|
||||
high_severity_pass += 1
|
||||
if finding.check_metadata.Severity == "medium":
|
||||
medium_severity_pass += 1
|
||||
if finding.check_metadata.Severity == "low":
|
||||
low_severity_pass += 1
|
||||
total_pass += 1
|
||||
findings_count += 1
|
||||
if finding.muted is True:
|
||||
muted_pass += 1
|
||||
|
||||
if finding.status == "FAIL":
|
||||
if finding.check_metadata.Severity == "critical":
|
||||
critical_severity_fail += 1
|
||||
if finding.check_metadata.Severity == "high":
|
||||
high_severity_fail += 1
|
||||
if finding.check_metadata.Severity == "medium":
|
||||
medium_severity_fail += 1
|
||||
if finding.check_metadata.Severity == "low":
|
||||
low_severity_fail += 1
|
||||
total_fail += 1
|
||||
findings_count += 1
|
||||
if finding.muted is True:
|
||||
@@ -121,6 +158,14 @@ def extract_findings_statistics(findings: list) -> dict:
|
||||
stats["total_muted_fail"] = muted_fail
|
||||
stats["resources_count"] = len(resources)
|
||||
stats["findings_count"] = findings_count
|
||||
stats["total_critical_severity_fail"] = critical_severity_fail
|
||||
stats["total_critical_severity_pass"] = critical_severity_pass
|
||||
stats["total_high_severity_fail"] = high_severity_fail
|
||||
stats["total_high_severity_pass"] = high_severity_pass
|
||||
stats["total_medium_severity_fail"] = medium_severity_fail
|
||||
stats["total_medium_severity_pass"] = medium_severity_pass
|
||||
stats["total_low_severity_fail"] = medium_severity_fail
|
||||
stats["total_low_severity_pass"] = medium_severity_pass
|
||||
stats["all_fails_are_muted"] = all_fails_are_muted
|
||||
|
||||
return stats
|
||||
|
||||
@@ -121,6 +121,19 @@ class Slack:
|
||||
"text": f"\n:white_check_mark: *{stats['total_pass']} Passed findings* ({round(stats['total_pass'] / stats['findings_count'] * 100 , 2)}%)\n",
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": (
|
||||
"*Severities:*\n"
|
||||
f"• *Critical:* {stats['total_critical_severity_pass']} "
|
||||
f"• *High:* {stats['total_high_severity_pass']} "
|
||||
f"• *Medium:* {stats['total_medium_severity_pass']} "
|
||||
f"• *Low:* {stats['total_low_severity_pass']}"
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
@@ -128,6 +141,19 @@ class Slack:
|
||||
"text": f"\n:x: *{stats['total_fail']} Failed findings* ({round(stats['total_fail'] / stats['findings_count'] * 100 , 2)}%)\n ",
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": (
|
||||
"*Severities:*\n"
|
||||
f"• *Critical:* {stats['total_critical_severity_fail']} "
|
||||
f"• *High:* {stats['total_high_severity_fail']} "
|
||||
f"• *Medium:* {stats['total_medium_severity_fail']} "
|
||||
f"• *Low:* {stats['total_low_severity_fail']}"
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Generator
|
||||
|
||||
from prowler.lib.check.check import execute, update_audit_metadata
|
||||
from prowler.lib.check.check import execute, import_check, update_audit_metadata
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
from prowler.providers.common.models import Audit_Metadata
|
||||
@@ -99,13 +99,24 @@ class Scan:
|
||||
try:
|
||||
# Recover service from check name
|
||||
service = get_service_name_from_check_name(check_name)
|
||||
|
||||
try:
|
||||
# Import check module
|
||||
check_module_path = f"prowler.providers.{self._provider.type}.services.{service}.{check_name}.{check_name}"
|
||||
lib = import_check(check_module_path)
|
||||
# Recover functions from check
|
||||
check_to_execute = getattr(lib, check_name)
|
||||
check = check_to_execute()
|
||||
except ModuleNotFoundError:
|
||||
logger.error(
|
||||
f"Check '{check_name}' was not found for the {self._provider.type.upper()} provider"
|
||||
)
|
||||
continue
|
||||
# Execute the check
|
||||
check_findings = execute(
|
||||
service,
|
||||
check_name,
|
||||
check,
|
||||
self._provider,
|
||||
custom_checks_metadata,
|
||||
output_options=None,
|
||||
)
|
||||
|
||||
# Store findings
|
||||
@@ -131,7 +142,9 @@ class Scan:
|
||||
)
|
||||
|
||||
findings = [
|
||||
Finding.generate_output(self._provider, finding)
|
||||
Finding.generate_output(
|
||||
self._provider, finding, output_options=None
|
||||
)
|
||||
for finding in check_findings
|
||||
]
|
||||
|
||||
|
||||
@@ -52,7 +52,6 @@ from prowler.providers.aws.models import (
|
||||
AWSIdentityInfo,
|
||||
AWSMFAInfo,
|
||||
AWSOrganizationsInfo,
|
||||
AWSOutputOptions,
|
||||
AWSSession,
|
||||
)
|
||||
from prowler.providers.common.models import Audit_Metadata, Connection
|
||||
@@ -68,7 +67,6 @@ class AwsProvider(Provider):
|
||||
_audit_config: dict
|
||||
_scan_unused_services: bool = False
|
||||
_enabled_regions: set = set()
|
||||
_output_options: AWSOutputOptions
|
||||
# TODO: this is not optional, enforce for all providers
|
||||
audit_metadata: Audit_Metadata
|
||||
|
||||
@@ -270,6 +268,8 @@ class AwsProvider(Provider):
|
||||
# Fixer Config
|
||||
self._fixer_config = fixer_config
|
||||
|
||||
Provider.set_global_provider(self)
|
||||
|
||||
@property
|
||||
def identity(self):
|
||||
return self._identity
|
||||
@@ -302,17 +302,6 @@ class AwsProvider(Provider):
|
||||
def fixer_config(self):
|
||||
return self._fixer_config
|
||||
|
||||
@property
|
||||
def output_options(self):
|
||||
return self._output_options
|
||||
|
||||
@output_options.setter
|
||||
def output_options(self, options: tuple):
|
||||
arguments, bulk_checks_metadata = options
|
||||
self._output_options = AWSOutputOptions(
|
||||
arguments, bulk_checks_metadata, self._identity
|
||||
)
|
||||
|
||||
@property
|
||||
def mutelist(self) -> AWSMutelist:
|
||||
"""
|
||||
|
||||
@@ -174,6 +174,8 @@
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-south-1",
|
||||
"ap-southeast-2",
|
||||
"eu-west-2",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-2"
|
||||
@@ -617,6 +619,7 @@
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ca-central-1",
|
||||
"ca-west-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
"eu-north-1",
|
||||
@@ -1873,6 +1876,7 @@
|
||||
"eu-central-1",
|
||||
"eu-north-1",
|
||||
"eu-south-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
@@ -2221,27 +2225,6 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"codestar": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-north-1",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"codestar-connections": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -3189,6 +3172,7 @@
|
||||
"docdb": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"af-south-1",
|
||||
"ap-east-1",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
@@ -3199,6 +3183,7 @@
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-south-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
@@ -3588,6 +3573,7 @@
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ap-southeast-5",
|
||||
"ca-central-1",
|
||||
"ca-west-1",
|
||||
"eu-central-1",
|
||||
@@ -4963,15 +4949,6 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"honeycode": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"iam": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -7226,9 +7203,11 @@
|
||||
"ap-south-1",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-north-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
@@ -8454,12 +8433,22 @@
|
||||
"redshift-serverless": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-east-1",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
"eu-north-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
@@ -9470,6 +9459,7 @@
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ap-southeast-5",
|
||||
"ca-central-1",
|
||||
"ca-west-1",
|
||||
"eu-central-1",
|
||||
@@ -10153,16 +10143,22 @@
|
||||
"ap-northeast-2",
|
||||
"ap-northeast-3",
|
||||
"ap-south-1",
|
||||
"ap-south-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
"eu-north-1",
|
||||
"eu-south-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
@@ -10562,12 +10558,16 @@
|
||||
"ap-south-1",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-north-1",
|
||||
"eu-south-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"me-central-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-2"
|
||||
@@ -10969,6 +10969,7 @@
|
||||
"regions": {
|
||||
"aws": [
|
||||
"af-south-1",
|
||||
"ap-east-1",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
@@ -10981,6 +10982,7 @@
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"me-south-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
|
||||
@@ -1,157 +0,0 @@
|
||||
def is_condition_block_restrictive(
|
||||
condition_statement: dict,
|
||||
source_account: str,
|
||||
is_cross_account_allowed=False,
|
||||
):
|
||||
"""
|
||||
is_condition_block_restrictive parses the IAM Condition policy block and, by default, returns True if the source_account passed as argument is within, False if not.
|
||||
|
||||
If argument is_cross_account_allowed is True it tests if the Condition block includes any of the operators allowlisted returning True if does, False if not.
|
||||
|
||||
|
||||
@param condition_statement: dict with an IAM Condition block, e.g.:
|
||||
{
|
||||
"StringLike": {
|
||||
"AWS:SourceAccount": 111122223333
|
||||
}
|
||||
}
|
||||
|
||||
@param source_account: str with a 12-digit AWS Account number, e.g.: 111122223333
|
||||
|
||||
@param is_cross_account_allowed: bool to allow cross-account access, e.g.: True
|
||||
|
||||
"""
|
||||
is_condition_valid = False
|
||||
|
||||
# The conditions must be defined in lowercase since the context key names are not case-sensitive.
|
||||
# For example, including the aws:SourceAccount context key is equivalent to testing for AWS:SourceAccount
|
||||
# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition.html
|
||||
valid_condition_options = {
|
||||
"StringEquals": [
|
||||
"aws:sourceaccount",
|
||||
"aws:sourceowner",
|
||||
"s3:resourceaccount",
|
||||
"aws:principalaccount",
|
||||
"aws:resourceaccount",
|
||||
"aws:sourcearn",
|
||||
"aws:sourcevpc",
|
||||
"aws:sourcevpce",
|
||||
],
|
||||
"StringLike": [
|
||||
"aws:sourceaccount",
|
||||
"aws:sourceowner",
|
||||
"aws:sourcearn",
|
||||
"aws:principalarn",
|
||||
"aws:resourceaccount",
|
||||
"aws:principalaccount",
|
||||
"aws:sourcevpc",
|
||||
"aws:sourcevpce",
|
||||
],
|
||||
"ArnLike": ["aws:sourcearn", "aws:principalarn"],
|
||||
"ArnEquals": ["aws:sourcearn", "aws:principalarn"],
|
||||
}
|
||||
|
||||
for condition_operator, condition_operator_key in valid_condition_options.items():
|
||||
if condition_operator in condition_statement:
|
||||
for value in condition_operator_key:
|
||||
# We need to transform the condition_statement into lowercase
|
||||
condition_statement[condition_operator] = {
|
||||
k.lower(): v
|
||||
for k, v in condition_statement[condition_operator].items()
|
||||
}
|
||||
|
||||
if value in condition_statement[condition_operator]:
|
||||
# values are a list
|
||||
if isinstance(
|
||||
condition_statement[condition_operator][value],
|
||||
list,
|
||||
):
|
||||
is_condition_key_restrictive = True
|
||||
# if cross account is not allowed check for each condition block looking for accounts
|
||||
# different than default
|
||||
if not is_cross_account_allowed:
|
||||
# if there is an arn/account without the source account -> we do not consider it safe
|
||||
# here by default we assume is true and look for false entries
|
||||
for item in condition_statement[condition_operator][value]:
|
||||
if source_account not in item:
|
||||
is_condition_key_restrictive = False
|
||||
break
|
||||
|
||||
if is_condition_key_restrictive:
|
||||
is_condition_valid = True
|
||||
|
||||
# value is a string
|
||||
elif isinstance(
|
||||
condition_statement[condition_operator][value],
|
||||
str,
|
||||
):
|
||||
if is_cross_account_allowed:
|
||||
is_condition_valid = True
|
||||
else:
|
||||
if (
|
||||
source_account
|
||||
in condition_statement[condition_operator][value]
|
||||
):
|
||||
is_condition_valid = True
|
||||
|
||||
return is_condition_valid
|
||||
|
||||
|
||||
def is_condition_block_restrictive_organization(
|
||||
condition_statement: dict,
|
||||
):
|
||||
"""
|
||||
is_condition_block_restrictive_organization parses the IAM Condition policy block and returns True if the condition_statement is restrictive for the organization, False if not.
|
||||
|
||||
@param condition_statement: dict with an IAM Condition block, e.g.:
|
||||
{
|
||||
"StringLike": {
|
||||
"AWS:PrincipalOrgID": "o-111122223333"
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
is_condition_valid = False
|
||||
|
||||
# The conditions must be defined in lowercase since the context key names are not case-sensitive.
|
||||
# For example, including the aws:PrincipalOrgID context key is equivalent to testing for AWS:PrincipalOrgID
|
||||
# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition.html
|
||||
valid_condition_options = {
|
||||
"StringEquals": [
|
||||
"aws:principalorgid",
|
||||
],
|
||||
"StringLike": [
|
||||
"aws:principalorgid",
|
||||
],
|
||||
}
|
||||
|
||||
for condition_operator, condition_operator_key in valid_condition_options.items():
|
||||
if condition_operator in condition_statement:
|
||||
for value in condition_operator_key:
|
||||
# We need to transform the condition_statement into lowercase
|
||||
condition_statement[condition_operator] = {
|
||||
k.lower(): v
|
||||
for k, v in condition_statement[condition_operator].items()
|
||||
}
|
||||
|
||||
if value in condition_statement[condition_operator]:
|
||||
# values are a list
|
||||
if isinstance(
|
||||
condition_statement[condition_operator][value],
|
||||
list,
|
||||
):
|
||||
is_condition_valid = True
|
||||
for item in condition_statement[condition_operator][value]:
|
||||
if item == "*":
|
||||
is_condition_valid = False
|
||||
break
|
||||
|
||||
# value is a string
|
||||
elif isinstance(
|
||||
condition_statement[condition_operator][value],
|
||||
str,
|
||||
):
|
||||
if "*" not in condition_statement[condition_operator][value]:
|
||||
is_condition_valid = True
|
||||
|
||||
return is_condition_valid
|
||||
@@ -12,7 +12,7 @@
|
||||
"SubServiceName": "rest_api",
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsApiGatewayStage",
|
||||
"ResourceType": "AwsApiGatewayRestApi",
|
||||
"Description": "Check if API Gateway Stage has client certificate enabled to access your backend endpoint.",
|
||||
"Risk": "Possible man in the middle attacks and other similar risks.",
|
||||
"RelatedUrl": "",
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"SubServiceName": "rest_api",
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsApiGatewayStage",
|
||||
"ResourceType": "AwsApiGatewayRestApi",
|
||||
"Description": "Check if API Gateway Stage has logging enabled.",
|
||||
"Risk": "If not enabled, monitoring of service use is not possible. Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms.",
|
||||
"RelatedUrl": "",
|
||||
|
||||
@@ -15,7 +15,7 @@ class apigateway_restapi_public(Check):
|
||||
report.resource_tags = rest_api.tags
|
||||
if rest_api.public_endpoint:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"API Gateway {rest_api.name} ID {rest_api.id} is internet accesible."
|
||||
report.status_extended = f"API Gateway {rest_api.name} ID {rest_api.id} is internet accessible."
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"SubServiceName": "rest_api",
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsApiGatewayStage",
|
||||
"ResourceType": "AwsApiGatewayRestApi",
|
||||
"Description": "Check if API Gateway Stage has a WAF ACL attached.",
|
||||
"Risk": "Potential attacks and / or abuse of service, more even for even for internet reachable services.",
|
||||
"RelatedUrl": "",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AppStream",
|
||||
"ResourceType": "Other",
|
||||
"Description": "Ensure default Internet Access from your Amazon AppStream fleet streaming instances should remain unchecked.",
|
||||
"Risk": "Default Internet Access from your fleet streaming instances should be controlled using a NAT gateway in the VPC.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AppStream",
|
||||
"ResourceType": "Other",
|
||||
"Description": "Ensure user maximum session duration is no longer than 10 hours.",
|
||||
"Risk": "Having a session duration lasting longer than 10 hours should not be necessary and if running for any malicious reasons provides a greater time for usage than should be allowed.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AppStream",
|
||||
"ResourceType": "Other",
|
||||
"Description": "Ensure session disconnect timeout is set to 5 minutes or less",
|
||||
"Risk": "Disconnect timeout in minutes, is the amount of of time that a streaming session remains active after users disconnect.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AppStream",
|
||||
"ResourceType": "Other",
|
||||
"Description": "Ensure session idle disconnect timeout is set to 10 minutes or less.",
|
||||
"Risk": "Idle disconnect timeout in minutes is the amount of time that users can be inactive before they are disconnected from their streaming session and the Disconnect timeout in minutes time begins.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:athena:region:account-id:workgroup/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "WorkGroup",
|
||||
"ResourceType": "AwsAthenaWorkGroup",
|
||||
"Description": "Ensure that encryption at rest is enabled for Amazon Athena query results stored in Amazon S3 in order to secure data and meet compliance requirements for data-at-rest encryption.",
|
||||
"Risk": "If not enabled sensitive information at rest is not protected.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/athena/latest/ug/encryption.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:athena:region:account-id:workgroup/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "WorkGroup",
|
||||
"ResourceType": "AwsAthenaWorkGroup",
|
||||
"Description": "Ensure that workgroup configuration is enforced so it cannot be overriden by client-side settings.",
|
||||
"Risk": "If workgroup configuration is not enforced security settings like encryption can be overriden by client-side settings.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "autoscaling_find_secrets_ec2_launch_configuration",
|
||||
"CheckTitle": "Find secrets in EC2 Auto Scaling Launch Configuration",
|
||||
"CheckTitle": "[DEPRECATED] Find secrets in EC2 Auto Scaling Launch Configuration",
|
||||
"CheckType": [
|
||||
"IAM"
|
||||
],
|
||||
@@ -9,8 +9,8 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:autoscaling:region:account-id:autoScalingGroupName/resource-name",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "Other",
|
||||
"Description": "Find secrets in EC2 Auto Scaling Launch Configuration",
|
||||
"ResourceType": "AwsAutoScalingLaunchConfiguration",
|
||||
"Description": "[DEPRECATED] Find secrets in EC2 Auto Scaling Launch Configuration",
|
||||
"Risk": "The use of a hard-coded password increases the possibility of password guessing. If hard-coded passwords are used, it is possible that malicious users gain access through the account in question.",
|
||||
"RelatedUrl": "",
|
||||
"Remediation": {
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:autoscaling:region:account-id:autoScalingGroupName/resource-name",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Other",
|
||||
"ResourceType": "AwsAutoScalingAutoScalingGroup",
|
||||
"Description": "EC2 Auto Scaling Group should use multiple Availability Zones",
|
||||
"Risk": "In case of a failure in a single Availability Zone, the Auto Scaling Group will not be able to launch new instances to replace the failed ones.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-add-availability-zone.html",
|
||||
|
||||
@@ -13,14 +13,20 @@ class awslambda_function_inside_vpc(Check):
|
||||
report.resource_id = function.name
|
||||
report.resource_arn = function_arn
|
||||
report.resource_tags = function.tags
|
||||
report.status = "FAIL"
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Lambda function {function.name} is not inside a VPC"
|
||||
f"Lambda function {function.name} is inside of VPC {function.vpc_id}"
|
||||
)
|
||||
|
||||
if function.vpc_id:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Lambda function {function.name} is inside of VPC {function.vpc_id}"
|
||||
if not function.vpc_id:
|
||||
awslambda_client.set_failed_check(
|
||||
self.__class__.__name__,
|
||||
function_arn,
|
||||
)
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Lambda function {function.name} is not inside a VPC"
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -28,7 +28,8 @@ class awslambda_function_invoke_api_operations_cloudtrail_logging_enabled(Check)
|
||||
for resource in data_event.event_selector["DataResources"]:
|
||||
if resource["Type"] == "AWS::Lambda::Function" and (
|
||||
function.arn in resource["Values"]
|
||||
or "arn:aws:lambda" in resource["Values"]
|
||||
or f"arn:{awslambda_client.audited_partition}:lambda"
|
||||
in resource["Values"]
|
||||
):
|
||||
lambda_recorded_cloudtrail = True
|
||||
break
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.awslambda.awslambda_client import awslambda_client
|
||||
from prowler.providers.aws.services.iam.lib.policy import is_policy_public
|
||||
|
||||
|
||||
class awslambda_function_not_publicly_accessible(Check):
|
||||
@@ -14,37 +15,11 @@ class awslambda_function_not_publicly_accessible(Check):
|
||||
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Lambda function {function.name} has a policy resource-based policy not public."
|
||||
|
||||
public_access = False
|
||||
if function.policy:
|
||||
for statement in function.policy["Statement"]:
|
||||
# Only check allow statements
|
||||
if statement["Effect"] == "Allow" and (
|
||||
"*" in statement["Principal"]
|
||||
or (
|
||||
isinstance(statement["Principal"], dict)
|
||||
and (
|
||||
"*" in statement["Principal"].get("AWS", "")
|
||||
or "*"
|
||||
in statement["Principal"].get("CanonicalUser", "")
|
||||
or ( # Check if function can be invoked by other AWS services
|
||||
(
|
||||
".amazonaws.com"
|
||||
in statement["Principal"].get("Service", "")
|
||||
)
|
||||
and (
|
||||
"*" in statement.get("Action", "")
|
||||
or "InvokeFunction"
|
||||
in statement.get("Action", "")
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
):
|
||||
public_access = True
|
||||
break
|
||||
|
||||
if public_access:
|
||||
if is_policy_public(
|
||||
function.policy,
|
||||
awslambda_client.audited_account,
|
||||
is_cross_account_allowed=True,
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Lambda function {function.name} has a policy resource-based policy with public access."
|
||||
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "awslambda_function_vpc_multi_az",
|
||||
"CheckTitle": "Check if AWS Lambda Function VPC is deployed Across Multiple Availability Zones",
|
||||
"CheckType": [],
|
||||
"ServiceName": "lambda",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:lambda:region:account-id:function/function-name",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsLambdaFunction",
|
||||
"Description": "This control checks whether an AWS Lambda function connected to a VPC operates in at least the specified number of Availability Zones (AZs). A failure occurs if the function does not operate in the required number of AZs, which by default is two.",
|
||||
"Risk": "A Lambda function not deployed across multiple AZs increases the risk of a single point of failure, which can result in a complete disruption of the function's operations if an AZ becomes unavailable.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/lambda/latest/operatorguide/networking-vpc.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/lambda-controls.html#lambda-5",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Ensure that your AWS Lambda functions connected to a VPC are distributed across multiple Availability Zones (AZs) to enhance availability and resilience.",
|
||||
"Url": "https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.awslambda.awslambda_client import awslambda_client
|
||||
from prowler.providers.aws.services.awslambda.awslambda_function_inside_vpc.awslambda_function_inside_vpc import (
|
||||
awslambda_function_inside_vpc,
|
||||
)
|
||||
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
|
||||
|
||||
|
||||
class awslambda_function_vpc_multi_az(Check):
|
||||
def execute(self) -> list[Check_Report_AWS]:
|
||||
findings = []
|
||||
LAMBDA_MIN_AZS = awslambda_client.audit_config.get("lambda_min_azs", 2)
|
||||
for function_arn, function in awslambda_client.functions.items():
|
||||
# only proceed if check "awslambda_function_inside_vpc" did not run or did not FAIL to avoid to report that the function is not inside a VPC twice
|
||||
if not awslambda_client.is_failed_check(
|
||||
awslambda_function_inside_vpc.__name__,
|
||||
function_arn,
|
||||
):
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = function.region
|
||||
report.resource_id = function.name
|
||||
report.resource_arn = function_arn
|
||||
report.resource_tags = function.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Lambda function {function.name} is not inside a VPC."
|
||||
)
|
||||
|
||||
if function.vpc_id:
|
||||
function_availability_zones = {
|
||||
getattr(
|
||||
vpc_client.vpc_subnets.get(subnet_id),
|
||||
"availability_zone",
|
||||
None,
|
||||
)
|
||||
for subnet_id in function.subnet_ids
|
||||
if subnet_id in vpc_client.vpc_subnets
|
||||
}
|
||||
|
||||
if len(function_availability_zones) >= LAMBDA_MIN_AZS:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Lambda function {function.name} is inside of VPC {function.vpc_id} that spans in at least {LAMBDA_MIN_AZS} AZs: {', '.join(function_availability_zones)}."
|
||||
else:
|
||||
report.status_extended = f"Lambda function {function.name} is inside of VPC {function.vpc_id} that spans only in {len(function_availability_zones)} AZs: {', '.join(function_availability_zones)}. Must span in at least {LAMBDA_MIN_AZS} AZs."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -44,6 +44,7 @@ class Lambda(AWSService):
|
||||
arn=lambda_arn,
|
||||
security_groups=vpc_config.get("SecurityGroupIds", []),
|
||||
vpc_id=vpc_config.get("VpcId"),
|
||||
subnet_ids=set(vpc_config.get("SubnetIds", [])),
|
||||
region=regional_client.region,
|
||||
)
|
||||
if "Runtime" in function:
|
||||
@@ -202,4 +203,5 @@ class Function(BaseModel):
|
||||
code: LambdaCode = None
|
||||
url_config: URLConfig = None
|
||||
vpc_id: Optional[str]
|
||||
subnet_ids: Optional[set]
|
||||
tags: Optional[list] = []
|
||||
|
||||
@@ -12,6 +12,7 @@ class backup_plans_exist(Check):
|
||||
report.resource_arn = backup_client.backup_plans[0].arn
|
||||
report.resource_id = backup_client.backup_plans[0].name
|
||||
report.region = backup_client.backup_plans[0].region
|
||||
report.resource_tags = backup_client.backup_plans[0].tags
|
||||
findings.append(report)
|
||||
elif backup_client.backup_vaults:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
@@ -20,5 +21,6 @@ class backup_plans_exist(Check):
|
||||
report.resource_arn = backup_client.backup_plan_arn_template
|
||||
report.resource_id = backup_client.audited_account
|
||||
report.region = backup_client.region
|
||||
report.resource_tags = []
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:backup-report-plan:backup-report-plan-id",
|
||||
"Severity": "low",
|
||||
"ResourceType": "Other",
|
||||
"ResourceType": "AwsBackupBackupPlan",
|
||||
"Description": "This check ensures that there is at least one backup report plan in place.",
|
||||
"Risk": "Without a backup report plan, an organization may lack visibility into the success or failure of backup operations.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/aws-backup/latest/devguide/create-report-plan-console.html",
|
||||
|
||||
@@ -9,7 +9,6 @@ from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.lib.service.service import AWSService
|
||||
|
||||
|
||||
################## Backup
|
||||
class Backup(AWSService):
|
||||
def __init__(self, provider):
|
||||
# Call AWSService's __init__
|
||||
@@ -19,12 +18,14 @@ class Backup(AWSService):
|
||||
self.backup_vault_arn_template = f"arn:{self.audited_partition}:backup:{self.region}:{self.audited_account}:backup-vault"
|
||||
self.backup_vaults = []
|
||||
self.__threading_call__(self._list_backup_vaults)
|
||||
self.__threading_call__(self._list_tags, self.backup_vaults)
|
||||
self.backup_plans = []
|
||||
self.__threading_call__(self._list_backup_plans)
|
||||
self.__threading_call__(self._list_tags, self.backup_plans)
|
||||
self.backup_report_plans = []
|
||||
self.__threading_call__(self._list_backup_report_plans)
|
||||
self.protected_resources = {}
|
||||
self.__threading_call__(self._list_protected_resources)
|
||||
self.protected_resources = []
|
||||
self.__threading_call__(self._list_backup_selections)
|
||||
|
||||
def _list_backup_vaults(self, regional_client):
|
||||
logger.info("Backup - Listing Backup Vaults...")
|
||||
@@ -140,33 +141,43 @@ class Backup(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def _list_protected_resources(self, regional_client):
|
||||
logger.info("Backup - Listing Protected Resources...")
|
||||
|
||||
def _list_backup_selections(self, regional_client):
|
||||
logger.info("Backup - Listing Backup Selections...")
|
||||
try:
|
||||
list_protected_resources_paginator = regional_client.get_paginator(
|
||||
"list_protected_resources"
|
||||
for backup_plan in self.backup_plans:
|
||||
paginator = regional_client.get_paginator("list_backup_selections")
|
||||
for page in paginator.paginate(BackupPlanId=backup_plan.id):
|
||||
for selection in page.get("BackupSelectionsList", []):
|
||||
selection_id = selection.get("SelectionId")
|
||||
if selection_id:
|
||||
backup_selection = regional_client.get_backup_selection(
|
||||
BackupPlanId=backup_plan.id, SelectionId=selection_id
|
||||
)["BackupSelection"]
|
||||
|
||||
self.protected_resources.extend(
|
||||
backup_selection.get("Resources", [])
|
||||
)
|
||||
|
||||
except ClientError as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
for page in list_protected_resources_paginator.paginate():
|
||||
for resource in page.get("Results", []):
|
||||
arn = resource.get("ResourceArn", "")
|
||||
if not self.audit_resources or (
|
||||
is_resource_filtered(
|
||||
arn,
|
||||
self.audit_resources,
|
||||
)
|
||||
):
|
||||
self.protected_resources[arn] = ProtectedResource(
|
||||
arn=arn,
|
||||
resource_type=resource.get("ResourceType"),
|
||||
region=regional_client.region,
|
||||
last_backup_time=resource.get("LastBackupTime"),
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def _list_tags(self, resource):
|
||||
try:
|
||||
tags = self.regional_clients[resource.region].list_tags(
|
||||
ResourceArn=resource.arn
|
||||
)["Tags"]
|
||||
resource.tags = [tags] if tags else []
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
|
||||
class BackupVault(BaseModel):
|
||||
arn: str
|
||||
@@ -177,6 +188,7 @@ class BackupVault(BaseModel):
|
||||
locked: bool
|
||||
min_retention_days: int = None
|
||||
max_retention_days: int = None
|
||||
tags: Optional[list]
|
||||
|
||||
|
||||
class BackupPlan(BaseModel):
|
||||
@@ -187,6 +199,7 @@ class BackupPlan(BaseModel):
|
||||
version_id: str
|
||||
last_execution_date: Optional[datetime]
|
||||
advanced_settings: list
|
||||
tags: Optional[list]
|
||||
|
||||
|
||||
class BackupReportPlan(BaseModel):
|
||||
@@ -195,10 +208,3 @@ class BackupReportPlan(BaseModel):
|
||||
name: str
|
||||
last_attempted_execution_date: Optional[datetime]
|
||||
last_successful_execution_date: Optional[datetime]
|
||||
|
||||
|
||||
class ProtectedResource(BaseModel):
|
||||
arn: str
|
||||
resource_type: str
|
||||
region: str
|
||||
last_backup_time: Optional[datetime]
|
||||
|
||||
@@ -16,6 +16,7 @@ class backup_vaults_encrypted(Check):
|
||||
report.resource_arn = backup_vault.arn
|
||||
report.resource_id = backup_vault.name
|
||||
report.region = backup_vault.region
|
||||
report.resource_tags = backup_vault.tags
|
||||
# if it is encrypted we only change the status and the status extended
|
||||
if backup_vault.encryption:
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -12,12 +12,14 @@ class backup_vaults_exist(Check):
|
||||
report.resource_arn = backup_client.backup_vault_arn_template
|
||||
report.resource_id = backup_client.audited_account
|
||||
report.region = backup_client.region
|
||||
report.resource_tags = []
|
||||
if backup_client.backup_vaults:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"At least one backup vault exists: {backup_client.backup_vaults[0].name}."
|
||||
report.resource_arn = backup_client.backup_vaults[0].arn
|
||||
report.resource_id = backup_client.backup_vaults[0].name
|
||||
report.region = backup_client.backup_vaults[0].region
|
||||
report.resource_tags = backup_client.backup_vaults[0].tags
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudfront_distributions_custom_ssl_certificate",
|
||||
"CheckTitle": "CloudFront distributions should use custom SSL/TLS certificates.",
|
||||
"CheckType": [],
|
||||
"ServiceName": "cloudfront",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudFrontDistribution",
|
||||
"Description": "Ensure that your Amazon CloudFront distributions are configured to use a custom SSL/TLS certificate instead of the default one.",
|
||||
"Risk": "Using the default SSL/TLS certificate provided by CloudFront can limit your ability to use custom domain names and may not align with your organization's security policies or branding requirements.",
|
||||
"RelatedUrl": "https://aws.amazon.com/what-is/ssl-certificate/",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "https://docs.prowler.com/checks/aws/networking-policies/ensure-aws-cloudfront-distribution-uses-custom-ssl-certificate/",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/cloudfront-controls.html#cloudfront-7",
|
||||
"Terraform": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudFront/cloudfront-distro-custom-tls.html"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Configure your CloudFront distributions to use a custom SSL/TLS certificate to enable secure access via your own domain names and meet specific security and branding needs. This allows for more control over encryption and authentication settings.",
|
||||
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/CNAMEs.html#CreatingCNAME"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"encryption"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudfront.cloudfront_client import (
|
||||
cloudfront_client,
|
||||
)
|
||||
|
||||
|
||||
class cloudfront_distributions_custom_ssl_certificate(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for distribution in cloudfront_client.distributions.values():
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = distribution.region
|
||||
report.resource_arn = distribution.arn
|
||||
report.resource_id = distribution.id
|
||||
report.resource_tags = distribution.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} is using a custom SSL/TLS certificate."
|
||||
|
||||
if distribution.default_certificate:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} is using the default SSL/TLS certificate."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudfront_distributions_default_root_object",
|
||||
"CheckTitle": "Check if CloudFront distributions have a default root object.",
|
||||
"CheckType": [],
|
||||
"ServiceName": "cloudfront",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AwsCloudFrontDistribution",
|
||||
"Description": "Check if CloudFront distributions have a default root object.",
|
||||
"Risk": "Without a default root object, requests to the root URL may result in an error or expose unintended content, leading to potential security risks and a poor user experience.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html#DefaultRootObjectHow",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws cloudfront update-distribution --id <distribution-id> --default-root-object <new-root-object>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/cloudfront-controls.html#cloudfront-1",
|
||||
"Terraform": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudFront/cloudfront-default-object.html"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Configure a default root object for your CloudFront distribution to ensure that a specific file (such as index.html) is returned when users access the root URL. This improves user experience and ensures that sensitive content isn't accidentally exposed.",
|
||||
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html#DefaultRootObjectHowToDefine"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudfront.cloudfront_client import (
|
||||
cloudfront_client,
|
||||
)
|
||||
|
||||
|
||||
class cloudfront_distributions_default_root_object(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for distribution in cloudfront_client.distributions.values():
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = distribution.region
|
||||
report.resource_arn = distribution.arn
|
||||
report.resource_id = distribution.id
|
||||
report.resource_tags = distribution.tags
|
||||
|
||||
if distribution.default_root_object:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} does have a default root object ({distribution.default_root_object}) configured."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} does not have a default root object configured."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudfront_distributions_https_sni_enabled",
|
||||
"CheckTitle": "Check if CloudFront distributions are using SNI to serve HTTPS requests.",
|
||||
"CheckType": [],
|
||||
"ServiceName": "cloudfront",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id",
|
||||
"Severity": "low",
|
||||
"ResourceType": "AwsCloudFrontDistribution",
|
||||
"Description": "Check if CloudFront distributions are using SNI to serve HTTPS requests.",
|
||||
"Risk": "If SNI is not used, CloudFront will allocate a dedicated IP address for each SSL certificate, leading to higher costs and inefficient IP address utilization. This could also complicate scaling and managing multiple distributions, especially if your domain requires multiple SSL certificates.",
|
||||
"RelatedUrl": "https://www.cloudflare.com/es-es/learning/ssl/what-is-sni/",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/cloudfront-controls.html#cloudfront-8",
|
||||
"Terraform": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudFront/cloudfront-sni.html"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Ensure that your CloudFront distributions are configured to use Server Name Indication (SNI) when serving HTTPS requests with custom SSL/TLS certificates. This is the recommended approach for reducing costs and optimizing IP address usage.",
|
||||
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cnames-https-dedicated-ip-or-sni.html#cnames-https-sni"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"encryption"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudfront.cloudfront_client import (
|
||||
cloudfront_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudfront.cloudfront_service import (
|
||||
SSLSupportMethod,
|
||||
)
|
||||
|
||||
|
||||
class cloudfront_distributions_https_sni_enabled(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for distribution in cloudfront_client.distributions.values():
|
||||
if distribution.certificate:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = distribution.region
|
||||
report.resource_arn = distribution.arn
|
||||
report.resource_id = distribution.id
|
||||
report.resource_tags = distribution.tags
|
||||
|
||||
if distribution.ssl_support_method == SSLSupportMethod.sni_only:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} is serving HTTPS requests using SNI."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} is not serving HTTPS requests using SNI."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudfront_distributions_multiple_origin_failover_configured",
|
||||
"CheckTitle": "Check if CloudFront distributions have origin failover enabled.",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks",
|
||||
"Industry and Regulatory Standards",
|
||||
"NIST 800-53 Controls"
|
||||
],
|
||||
"ServiceName": "cloudfront",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id",
|
||||
"Severity": "low",
|
||||
"ResourceType": "AWSCloudFrontDistribution",
|
||||
"Description": "Check if CloudFront distributions have origin failover enabled.",
|
||||
"Risk": "Without origin failover, if the primary origin becomes unavailable, your CloudFront distribution may experience downtime, leading to potential service interruptions and a poor user experience.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_OriginGroup.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/cloudfront-controls.html#cloudfront-4",
|
||||
"Terraform": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudFront/origin-failover-enabled.html"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Configure origin failover in your CloudFront distribution by setting up an origin group with at least two origins to enhance availability and ensure traffic is redirected if the primary origin fails.",
|
||||
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/high_availability_origin_failover.html#concept_origin_groups.creating"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"redundancy"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudfront.cloudfront_client import (
|
||||
cloudfront_client,
|
||||
)
|
||||
|
||||
|
||||
class cloudfront_distributions_multiple_origin_failover_configured(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for distribution in cloudfront_client.distributions.values():
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = distribution.region
|
||||
report.resource_arn = distribution.arn
|
||||
report.resource_id = distribution.id
|
||||
report.resource_tags = distribution.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} does not have an origin group configured with at least 2 origins."
|
||||
|
||||
if distribution.origin_failover:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} has an origin group with at least 2 origins configured."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudfront_distributions_origin_traffic_encrypted",
|
||||
"CheckTitle": "Check if CloudFront distributions encrypt traffic to custom origins.",
|
||||
"CheckType": [],
|
||||
"ServiceName": "cloudfront",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AWSCloudFrontDistribution",
|
||||
"Description": "Check if CloudFront distributions encrypt traffic to custom origins.",
|
||||
"Risk": "Allowing unencrypted HTTP traffic between CloudFront and custom origins can expose data to potential eavesdropping and manipulation, compromising data security and integrity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/whitepapers/latest/secure-content-delivery-amazon-cloudfront/custom-origin-with-cloudfront.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/cloudfront-controls.html#cloudfront-9",
|
||||
"Terraform": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudFront/cloudfront-traffic-to-origin-unencrypted.html"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Configure your CloudFront distributions to require HTTPS (TLS) for traffic to custom origins, ensuring all data transmitted between CloudFront and the origin is encrypted and protected from unauthorized access.",
|
||||
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https-cloudfront-to-custom-origin.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudfront.cloudfront_client import (
|
||||
cloudfront_client,
|
||||
)
|
||||
|
||||
|
||||
class cloudfront_distributions_origin_traffic_encrypted(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for distribution in cloudfront_client.distributions.values():
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = distribution.region
|
||||
report.resource_arn = distribution.arn
|
||||
report.resource_id = distribution.id
|
||||
report.resource_tags = distribution.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} does encrypt traffic to custom origins."
|
||||
unencrypted_origins = []
|
||||
|
||||
for origin in distribution.origins:
|
||||
if (
|
||||
origin.origin_protocol_policy == ""
|
||||
or origin.origin_protocol_policy == "http-only"
|
||||
) or (
|
||||
origin.origin_protocol_policy == "match-viewer"
|
||||
and distribution.viewer_protocol_policy == "allow-all"
|
||||
):
|
||||
unencrypted_origins.append(origin.id)
|
||||
|
||||
if unencrypted_origins:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} does not encrypt traffic to custom origins {', '.join(unencrypted_origins)}."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudfront_distributions_s3_origin_access_control",
|
||||
"CheckTitle": "Check if CloudFront distributions with S3 origin use OAC.",
|
||||
"CheckType": [
|
||||
"Data Exposure"
|
||||
],
|
||||
"ServiceName": "cloudfront",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AWSCloudFrontDistribution",
|
||||
"Description": "Check if CloudFront distributions use origin access control.",
|
||||
"Risk": "Without OAC, your S3 bucket could be accessed directly, bypassing CloudFront, which could expose your content to unauthorized access. Additionally, relying on Origin Access Identity (OAI) may limit functionality and security features, making your distribution less secure and more difficult to manage.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html#migrate-from-oai-to-oac",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "https://docs.prowler.com/checks/aws/iam-policies/ensure-aws-cloudfromt-distribution-with-s3-have-origin-access-set-to-enabled/",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/cloudfront-controls.html#cloudfront-13",
|
||||
"Terraform": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudFront/s3-origin.html"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Configure Origin Access Control (OAC) for CloudFront distributions that use an Amazon S3 origin. This will ensure that the content in your S3 bucket is accessible only through the specified CloudFront distribution, enhancing security by preventing direct access to the bucket.",
|
||||
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudfront.cloudfront_client import (
|
||||
cloudfront_client,
|
||||
)
|
||||
|
||||
|
||||
class cloudfront_distributions_s3_origin_access_control(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for distribution in cloudfront_client.distributions.values():
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = distribution.region
|
||||
report.resource_arn = distribution.arn
|
||||
report.resource_id = distribution.id
|
||||
report.resource_tags = distribution.tags
|
||||
|
||||
if any(origin.s3_origin_config for origin in distribution.origins):
|
||||
s3_buckets_with_no_oac = []
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} is using origin access control (OAC) for S3 origins."
|
||||
|
||||
for origin in distribution.origins:
|
||||
if (
|
||||
origin.s3_origin_config != {}
|
||||
and origin.origin_access_control == ""
|
||||
):
|
||||
s3_buckets_with_no_oac.append(origin.id)
|
||||
|
||||
if s3_buckets_with_no_oac:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} is not using origin access control (OAC) in S3 origins {', '.join(s3_buckets_with_no_oac)}."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudfront_distributions_s3_origin_non_existent_bucket",
|
||||
"CheckTitle": "CloudFront distributions should not point to non-existent S3 origins.",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls"
|
||||
],
|
||||
"ServiceName": "cloudfront",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AwsCloudFrontDistribution",
|
||||
"Description": "This control checks whether Amazon CloudFront distributions are pointing to non-existent Amazon S3 origins. The control fails if the origin is configured to point to a non-existent bucket.",
|
||||
"Risk": "Pointing a CloudFront distribution to a non-existent S3 bucket can allow malicious actors to create the bucket and potentially serve unauthorized content through your distribution, leading to security and integrity issues.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/whitepapers/latest/secure-content-delivery-amazon-cloudfront/s3-origin-with-cloudfront.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/cloudfront-controls.html#cloudfront-12",
|
||||
"Terraform": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudFront/cloudfront-existing-s3-bucket.html"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Verify that all CloudFront distributions are configured to point to valid, existing S3 buckets. Update the origin settings as needed to ensure that your distributions are linked to appropriate and secure origins.",
|
||||
"Url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToUpdateDistribution.html"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"trustboundaries"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudfront.cloudfront_client import (
|
||||
cloudfront_client,
|
||||
)
|
||||
from prowler.providers.aws.services.s3.s3_client import s3_client
|
||||
|
||||
|
||||
class cloudfront_distributions_s3_origin_non_existent_bucket(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for distribution in cloudfront_client.distributions.values():
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = distribution.region
|
||||
report.resource_arn = distribution.arn
|
||||
report.resource_id = distribution.id
|
||||
report.resource_tags = distribution.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} does not have non-existent S3 buckets as origins."
|
||||
non_existent_buckets = []
|
||||
|
||||
for origin in distribution.origins:
|
||||
bucket_name = origin.domain_name.split(".")[0]
|
||||
if not s3_client._head_bucket(bucket_name):
|
||||
non_existent_buckets.append(bucket_name)
|
||||
|
||||
if non_existent_buckets:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} has non-existent S3 buckets as origins: {','.join(non_existent_buckets)}."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -8,7 +8,6 @@ from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.lib.service.service import AWSService
|
||||
|
||||
|
||||
################## CloudFront
|
||||
class CloudFront(AWSService):
|
||||
def __init__(self, provider):
|
||||
# Call AWSService's __init__
|
||||
@@ -30,6 +29,24 @@ class CloudFront(AWSService):
|
||||
):
|
||||
distribution_id = item["Id"]
|
||||
distribution_arn = item["ARN"]
|
||||
origin_groups = item.get("OriginGroups", {}).get(
|
||||
"Items", []
|
||||
)
|
||||
origin_failover = all(
|
||||
origin_group.get("Members", {}).get("Quantity", 0) >= 2
|
||||
for origin_group in origin_groups
|
||||
)
|
||||
default_certificate = item["ViewerCertificate"][
|
||||
"CloudFrontDefaultCertificate"
|
||||
]
|
||||
certificate = item["ViewerCertificate"].get(
|
||||
"Certificate", ""
|
||||
)
|
||||
ssl_support_method = SSLSupportMethod(
|
||||
item["ViewerCertificate"].get(
|
||||
"SSLSupportMethod", "static-ip"
|
||||
)
|
||||
)
|
||||
origins = []
|
||||
for origin in item.get("Origins", {}).get("Items", []):
|
||||
origins.append(
|
||||
@@ -44,6 +61,12 @@ class CloudFront(AWSService):
|
||||
)
|
||||
.get("OriginSslProtocols", {})
|
||||
.get("Items", []),
|
||||
origin_access_control=origin.get(
|
||||
"OriginAccessControlId", ""
|
||||
),
|
||||
s3_origin_config=origin.get(
|
||||
"S3OriginConfig", {}
|
||||
),
|
||||
)
|
||||
)
|
||||
distribution = Distribution(
|
||||
@@ -51,6 +74,10 @@ class CloudFront(AWSService):
|
||||
id=distribution_id,
|
||||
origins=origins,
|
||||
region=region,
|
||||
origin_failover=origin_failover,
|
||||
ssl_support_method=ssl_support_method,
|
||||
default_certificate=default_certificate,
|
||||
certificate=certificate,
|
||||
)
|
||||
self.distributions[distribution_id] = distribution
|
||||
|
||||
@@ -64,6 +91,7 @@ class CloudFront(AWSService):
|
||||
try:
|
||||
for distribution_id in distributions.keys():
|
||||
distribution_config = client.get_distribution_config(Id=distribution_id)
|
||||
|
||||
# Global Config
|
||||
distributions[distribution_id].logging_enabled = distribution_config[
|
||||
"DistributionConfig"
|
||||
@@ -78,6 +106,16 @@ class CloudFront(AWSService):
|
||||
distributions[distribution_id].web_acl_id = distribution_config[
|
||||
"DistributionConfig"
|
||||
]["WebACLId"]
|
||||
distributions[distribution_id].default_root_object = (
|
||||
distribution_config["DistributionConfig"].get(
|
||||
"DefaultRootObject", ""
|
||||
)
|
||||
)
|
||||
distributions[distribution_id].viewer_protocol_policy = (
|
||||
distribution_config["DistributionConfig"][
|
||||
"DefaultCacheBehavior"
|
||||
].get("ViewerProtocolPolicy", "")
|
||||
)
|
||||
|
||||
# Default Cache Config
|
||||
default_cache_config = DefaultCacheConfigBehaviour(
|
||||
@@ -139,6 +177,14 @@ class GeoRestrictionType(Enum):
|
||||
whitelist = "whitelist"
|
||||
|
||||
|
||||
class SSLSupportMethod(Enum):
|
||||
"""Method types that viewer want to accept HTTPS requests from"""
|
||||
|
||||
static_ip = "static-ip"
|
||||
sni_only = "sni-only"
|
||||
vip = "vip"
|
||||
|
||||
|
||||
class DefaultCacheConfigBehaviour(BaseModel):
|
||||
realtime_log_config_arn: Optional[str]
|
||||
viewer_protocol_policy: ViewerProtocolPolicy
|
||||
@@ -150,6 +196,8 @@ class Origin(BaseModel):
|
||||
domain_name: str
|
||||
origin_protocol_policy: str
|
||||
origin_ssl_protocols: list[str]
|
||||
origin_access_control: Optional[str]
|
||||
s3_origin_config: Optional[dict]
|
||||
|
||||
|
||||
class Distribution(BaseModel):
|
||||
@@ -163,4 +211,10 @@ class Distribution(BaseModel):
|
||||
geo_restriction_type: Optional[GeoRestrictionType]
|
||||
origins: list[Origin]
|
||||
web_acl_id: str = ""
|
||||
default_certificate: Optional[bool]
|
||||
default_root_object: Optional[str]
|
||||
viewer_protocol_policy: Optional[str]
|
||||
tags: Optional[list] = []
|
||||
origin_failover: Optional[bool]
|
||||
ssl_support_method: Optional[SSLSupportMethod]
|
||||
certificate: Optional[str]
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
|
||||
"Severity": "low",
|
||||
"ResourceType": "AwsS3Bucket",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"Description": "Ensure that all your AWS CloudTrail trails are configured to log Data events in order to record S3 object-level API operations, such as GetObject, DeleteObject and PutObject, for individual S3 buckets or for all current and future S3 buckets provisioned in your AWS account.",
|
||||
"Risk": "If logs are not enabled, monitoring of service use and threat analysis is not possible.",
|
||||
"RelatedUrl": "",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
|
||||
"Severity": "low",
|
||||
"ResourceType": "AwsS3Bucket",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"Description": "Ensure that all your AWS CloudTrail trails are configured to log Data events in order to record S3 object-level API operations, such as GetObject, DeleteObject and PutObject, for individual S3 buckets or for all current and future S3 buckets provisioned in your AWS account.",
|
||||
"Risk": "If logs are not enabled, monitoring of service use and threat analysis is not possible.",
|
||||
"RelatedUrl": "",
|
||||
|
||||
@@ -39,14 +39,27 @@ class cloudtrail_threat_detection_enumeration(Check):
|
||||
minutes=threat_detection_minutes,
|
||||
):
|
||||
event_log = json.loads(event_log["CloudTrailEvent"])
|
||||
if ".amazonaws.com" not in event_log["sourceIPAddress"]:
|
||||
if event_log["sourceIPAddress"] not in potential_enumeration:
|
||||
potential_enumeration[event_log["sourceIPAddress"]] = set()
|
||||
potential_enumeration[event_log["sourceIPAddress"]].add(
|
||||
event_name
|
||||
if (
|
||||
event_log["userIdentity"]["arn"],
|
||||
event_log["userIdentity"]["type"],
|
||||
) not in potential_enumeration:
|
||||
potential_enumeration[
|
||||
(
|
||||
event_log["userIdentity"]["arn"],
|
||||
event_log["userIdentity"]["type"],
|
||||
)
|
||||
] = set()
|
||||
potential_enumeration[
|
||||
(
|
||||
event_log["userIdentity"]["arn"],
|
||||
event_log["userIdentity"]["type"],
|
||||
)
|
||||
for source_ip, actions in potential_enumeration.items():
|
||||
ip_threshold = round(len(actions) / len(enumeration_actions), 2)
|
||||
].add(event_name)
|
||||
|
||||
for aws_identity, actions in potential_enumeration.items():
|
||||
identity_threshold = round(len(actions) / len(enumeration_actions), 2)
|
||||
aws_identity_type = aws_identity[1]
|
||||
aws_identity_arn = aws_identity[0]
|
||||
if len(actions) / len(enumeration_actions) > threshold:
|
||||
found_potential_enumeration = True
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
@@ -56,7 +69,7 @@ class cloudtrail_threat_detection_enumeration(Check):
|
||||
cloudtrail_client.region
|
||||
)
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Potential enumeration attack detected from source IP {source_ip} with an threshold of {ip_threshold}."
|
||||
report.status_extended = f"Potential enumeration attack detected from AWS {aws_identity_type} {aws_identity_arn.split('/')[-1]} with an threshold of {identity_threshold}."
|
||||
findings.append(report)
|
||||
if not found_potential_enumeration:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
|
||||
@@ -40,19 +40,28 @@ class cloudtrail_threat_detection_privilege_escalation(Check):
|
||||
minutes=threat_detection_minutes,
|
||||
):
|
||||
event_log = json.loads(event_log["CloudTrailEvent"])
|
||||
if ".amazonaws.com" not in event_log["sourceIPAddress"]:
|
||||
if (
|
||||
event_log["sourceIPAddress"]
|
||||
not in potential_privilege_escalation
|
||||
):
|
||||
potential_privilege_escalation[
|
||||
event_log["sourceIPAddress"]
|
||||
] = set()
|
||||
if (
|
||||
event_log["userIdentity"]["arn"],
|
||||
event_log["userIdentity"]["type"],
|
||||
) not in potential_privilege_escalation:
|
||||
potential_privilege_escalation[
|
||||
event_log["sourceIPAddress"]
|
||||
].add(event_name)
|
||||
for source_ip, actions in potential_privilege_escalation.items():
|
||||
ip_threshold = round(len(actions) / len(privilege_escalation_actions), 2)
|
||||
(
|
||||
event_log["userIdentity"]["arn"],
|
||||
event_log["userIdentity"]["type"],
|
||||
)
|
||||
] = set()
|
||||
potential_privilege_escalation[
|
||||
(
|
||||
event_log["userIdentity"]["arn"],
|
||||
event_log["userIdentity"]["type"],
|
||||
)
|
||||
].add(event_name)
|
||||
for aws_identity, actions in potential_privilege_escalation.items():
|
||||
identity_threshold = round(
|
||||
len(actions) / len(privilege_escalation_actions), 2
|
||||
)
|
||||
aws_identity_type = aws_identity[1]
|
||||
aws_identity_arn = aws_identity[0]
|
||||
if len(actions) / len(privilege_escalation_actions) > threshold:
|
||||
found_potential_privilege_escalation = True
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
@@ -62,7 +71,7 @@ class cloudtrail_threat_detection_privilege_escalation(Check):
|
||||
cloudtrail_client.region
|
||||
)
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Potential privilege escalation attack detected from source IP {source_ip} with an threshold of {ip_threshold}."
|
||||
report.status_extended = f"Potential privilege escalation attack detected from AWS {aws_identity_type} {aws_identity_arn.split('/')[-1]} with an threshold of {identity_threshold}."
|
||||
findings.append(report)
|
||||
if not found_potential_privilege_escalation:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for changes to Network Access Control Lists (NACL).",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for changes to network gateways.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing Cloud Trail Logs to CloudWatch Logs, or an external Security information and event management (SIEM)environment, and establishing corresponding metric filters and alarms. Routing tablesare used to route network traffic between subnets and to network gateways. It isrecommended that a metric filter and alarm be established for changes to route tables.",
|
||||
"Risk": "CloudWatch is an AWS native service that allows you to ob serve and monitor resources and applications. CloudTrail Logs can also be sent to an external Security informationand event management (SIEM) environment for monitoring and alerting.Monitoring changes to route tables will help ensure that all VPC traffic flows through anexpected path and prevent any accidental or intentional modifications that may lead touncontrolled network traffic. An alarm should be triggered every time an AWS API call isperformed to create, replace, delete, or disassociate a Route Table.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for VPC changes.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudWatch",
|
||||
"ResourceType": "AwsAccount",
|
||||
"Description": "Check if CloudWatch has allowed cross-account sharing.",
|
||||
"Risk": "Cross-Account access to CloudWatch could increase the risk of compromising information between accounts.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Cross-Account-Cross-Region.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "logs",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsLogsLogGroup",
|
||||
"ResourceType": "Other",
|
||||
"Description": "Check if CloudWatch log groups are protected by AWS KMS.",
|
||||
"Risk": "Using customer managed KMS to encrypt CloudWatch log group provide additional confidentiality and control over the log data.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/cli/latest/reference/logs/associate-kms-key.html",
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:log-group/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailLogGroup",
|
||||
"ResourceType": "Other",
|
||||
"Description": "Check if secrets exists in CloudWatch logs",
|
||||
"Risk": "Storing sensitive data in CloudWatch logs could allow an attacker with read-only access to escalate their privileges or gain unauthorised access to systems.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for AWS Config configuration changes.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for CloudTrail configuration changes.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for AWS Management Console authentication failures.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for AWS Organizations changes.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for disabling or scheduled deletion of customer created KMS CMKs.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for S3 bucket policy changes.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for IAM policy changes.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for usage of root account.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for security group changes.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for Management Console sign-in without MFA.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudTrailTrail",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for unauthorized API calls.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
|
||||
@@ -19,6 +19,7 @@ class codebuild_project_no_secrets_in_variables(Check):
|
||||
report.region = project.region
|
||||
report.resource_id = project.name
|
||||
report.resource_arn = project.arn
|
||||
report.resource_tags = project.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CodeBuild project {project.name} does not have sensitive environment plaintext credentials."
|
||||
secrets_found = []
|
||||
|
||||
@@ -12,6 +12,7 @@ class codebuild_project_older_90_days(Check):
|
||||
report.region = project.region
|
||||
report.resource_id = project.name
|
||||
report.resource_arn = project.arn
|
||||
report.resource_tags = project.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CodeBuild project {project.name} has been invoked in the last 90 days."
|
||||
if project.last_invoked_time:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user