chore(merge): add new changes from v3 (#3549)

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
Co-authored-by: Nacho Rivera <nachor1992@gmail.com>
Co-authored-by: Rubén De la Torre Vico <rubendltv22@gmail.com>
Co-authored-by: Pedro Martín <pedromarting3@gmail.com>
Co-authored-by: Hugo966 <148140670+Hugo966@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Hugo Gálvez Ureña <hugogalvezu96@gmail.com>
Co-authored-by: github-actions <noreply@github.com>
This commit is contained in:
Sergio Garcia
2024-03-19 15:54:41 +01:00
committed by GitHub
parent 35043c2dd6
commit 3d59c34ec9
111 changed files with 5285 additions and 957 deletions

View File

@@ -11,7 +11,7 @@ jobs:
with:
fetch-depth: 0
- name: TruffleHog OSS
uses: trufflesecurity/trufflehog@v3.68.4
uses: trufflesecurity/trufflehog@v3.69.0
with:
path: ./
base: ${{ github.event.repository.default_branch }}

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 Netflix, Inc.
Copyright @ 2024 Toni de la Fuente
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -47,9 +47,9 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) |
|---|---|---|---|---|
| AWS | 302 | 61 -> `prowler aws --list-services` | 27 -> `prowler aws --list-compliance` | 6 -> `prowler aws --list-categories` |
| GCP | 73 | 11 -> `prowler gcp --list-services` | 1 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
| Azure | 91 | 14 -> `prowler azure --list-services` | CIS soon | 2 -> `prowler azure --list-categories` |
| AWS | 304 | 61 -> `prowler aws --list-services` | 28 -> `prowler aws --list-compliance` | 6 -> `prowler aws --list-categories` |
| GCP | 75 | 11 -> `prowler gcp --list-services` | 1 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
| Azure | 109 | 16 -> `prowler azure --list-services` | CIS soon | 2 -> `prowler azure --list-categories` |
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | CIS soon | 7 -> `prowler kubernetes --list-categories` |
# 📖 Documentation

View File

@@ -36,6 +36,7 @@ Currently, the available frameworks are:
- `fedramp_low_revision_4_aws`
- `fedramp_moderate_revision_4_aws`
- `ffiec_aws`
- `aws_foundational_technical_review_aws`
- `gdpr_aws`
- `gxp_21_cfr_part_11_aws`
- `gxp_eu_annex_11_aws`

View File

@@ -164,5 +164,8 @@ azure:
# GCP Configuration
gcp:
# GCP Compute Configuration
# gcp.compute_public_address_shodan
shodan_api_key: null
```

View File

@@ -62,8 +62,9 @@ prowler <provider> --categories internet-exposed
### Shodan
Prowler allows you check if any elastic ip in your AWS Account is exposed in Shodan with `-N`/`--shodan <shodan_api_key>` option:
Prowler allows you check if any public IPs in your Cloud environments are exposed in Shodan with `-N`/`--shodan <shodan_api_key>` option:
For example, you can check if any of your AWS EC2 instances has an elastic IP exposed in shodan:
```console
prowler aws -N/--shodan <shodan_api_key> -c ec2_elastic_ip_shodan
```
@@ -71,3 +72,7 @@ Also, you can check if any of your Azure Subscription has an public IP exposed i
```console
prowler azure -N/--shodan <shodan_api_key> -c network_public_ip_shodan
```
And finally, you can check if any of your GCP projects has an public IP address exposed in shodan:
```console
prowler gcp -N/--shodan <shodan_api_key> -c compute_public_address_shodan
```

37
poetry.lock generated
View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
[[package]]
name = "about-time"
@@ -599,13 +599,13 @@ isodate = ">=0.6.1,<1.0.0"
[[package]]
name = "azure-storage-blob"
version = "12.19.0"
version = "12.19.1"
description = "Microsoft Azure Blob Storage Client Library for Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "azure-storage-blob-12.19.0.tar.gz", hash = "sha256:26c0a4320a34a3c2a1b74528ba6812ebcb632a04cd67b1c7377232c4b01a5897"},
{file = "azure_storage_blob-12.19.0-py3-none-any.whl", hash = "sha256:7bbc2c9c16678f7a420367fef6b172ba8730a7e66df7f4d7a55d5b3c8216615b"},
{file = "azure-storage-blob-12.19.1.tar.gz", hash = "sha256:13e16ba42fc54ac2c7e8f976062173a5c82b9ec0594728e134aac372965a11b0"},
{file = "azure_storage_blob-12.19.1-py3-none-any.whl", hash = "sha256:c5530dc51c21c9564e4eb706cd499befca8819b10dd89716d3fc90d747556243"},
]
[package.dependencies]
@@ -633,13 +633,13 @@ dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
[[package]]
name = "bandit"
version = "1.7.7"
version = "1.7.8"
description = "Security oriented static analyser for python code."
optional = false
python-versions = ">=3.8"
files = [
{file = "bandit-1.7.7-py3-none-any.whl", hash = "sha256:17e60786a7ea3c9ec84569fd5aee09936d116cb0cb43151023258340dbffb7ed"},
{file = "bandit-1.7.7.tar.gz", hash = "sha256:527906bec6088cb499aae31bc962864b4e77569e9d529ee51df3a93b4b8ab28a"},
{file = "bandit-1.7.8-py3-none-any.whl", hash = "sha256:509f7af645bc0cd8fd4587abc1a038fc795636671ee8204d502b933aee44f381"},
{file = "bandit-1.7.8.tar.gz", hash = "sha256:36de50f720856ab24a24dbaa5fee2c66050ed97c1477e0a1159deab1775eab6b"},
]
[package.dependencies]
@@ -650,6 +650,7 @@ stevedore = ">=1.20.0"
[package.extras]
baseline = ["GitPython (>=3.1.30)"]
sarif = ["jschema-to-python (>=1.2.3)", "sarif-om (>=1.0.4)"]
test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)"]
toml = ["tomli (>=1.1.0)"]
yaml = ["PyYAML"]
@@ -1480,13 +1481,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
[[package]]
name = "google-api-python-client"
version = "2.120.0"
version = "2.122.0"
description = "Google API Client Library for Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "google-api-python-client-2.120.0.tar.gz", hash = "sha256:a0c8769cad9576768bcb3191cb1f550f6ab3290cba042badb0fb17bba03f70cc"},
{file = "google_api_python_client-2.120.0-py2.py3-none-any.whl", hash = "sha256:e2cdf4497bfc758fb44a4b487920cc1ca0571c2428187697a8e43e3b9feba1c9"},
{file = "google-api-python-client-2.122.0.tar.gz", hash = "sha256:77447bf2d6b6ea9e686fd66fc2f12ee7a63e3889b7427676429ebf09fcb5dcf9"},
{file = "google_api_python_client-2.122.0-py2.py3-none-any.whl", hash = "sha256:a5953e60394b77b98bcc7ff7c4971ed784b3b693e9a569c176eaccb1549330f2"},
]
[package.dependencies]
@@ -3182,8 +3183,8 @@ astroid = ">=3.1.0,<=3.2.0-dev0"
colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""}
dill = [
{version = ">=0.2", markers = "python_version < \"3.11\""},
{version = ">=0.3.6", markers = "python_version >= \"3.11\""},
{version = ">=0.3.7", markers = "python_version >= \"3.12\""},
{version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""},
]
isort = ">=4.2.5,<5.13.0 || >5.13.0,<6"
mccabe = ">=0.6,<0.8"
@@ -3230,13 +3231,13 @@ diagrams = ["jinja2", "railroad-diagrams"]
[[package]]
name = "pytest"
version = "8.0.2"
version = "8.1.1"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest-8.0.2-py3-none-any.whl", hash = "sha256:edfaaef32ce5172d5466b5127b42e0d6d35ebbe4453f0e3505d96afd93f6b096"},
{file = "pytest-8.0.2.tar.gz", hash = "sha256:d4051d623a2e0b7e51960ba963193b09ce6daeb9759a451844a21e4ddedfc1bd"},
{file = "pytest-8.1.1-py3-none-any.whl", hash = "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7"},
{file = "pytest-8.1.1.tar.gz", hash = "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"},
]
[package.dependencies]
@@ -3244,11 +3245,11 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""}
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
iniconfig = "*"
packaging = "*"
pluggy = ">=1.3.0,<2.0"
tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
pluggy = ">=1.4,<2.0"
tomli = {version = ">=1", markers = "python_version < \"3.11\""}
[package.extras]
testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
testing = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-cov"
@@ -4505,4 +4506,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<3.13"
content-hash = "2bbb6bfb3c931d8d98d4cb5c1d5642073de26932c0bc73a188fe77c922707d0e"
content-hash = "3a7cce322dff828722630cff1f3160cee8a24b2426a3fa7ca2bcabca3acbbca6"

View File

@@ -0,0 +1,693 @@
{
"Framework": "AWS-Foundational-Technical-Review",
"Version": "",
"Provider": "AWS",
"Description": "The AWS Foundational Technical Review (FTR) assesses an AWS Partner's solution against a specific set of Amazon Web Services (AWS) best practices around security, performance, and operational processes that are most critical for customer success. Passing the FTR is required to qualify AWS Software Partners for AWS Partner Network (APN) programs such as AWS Competency and AWS Service Ready but any AWS Partner who offers a technology solution may request a FTR review through AWS Partner Central.",
"Requirements": [
{
"Id": "HOST-001",
"Name": "Confirm your hosting model",
"Description": "To use this FTR checklist you must host all critical application components on AWS. You may use external providers for edge services such as content delivery networks (CDNs) or domain name system (DNS), or corporate identity providers. If you are using any edge services outside AWS, please specify them in the self-assessment.",
"Attributes": [
{
"Section": "Partner-hosted FTR requirements",
"Subsection": "Hosting",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "SUP-001",
"Name": "Subscribe to the AWS Business Support tier (or higher) for all production AWS accounts or have an action plan to handle issues which require help from AWS Support",
"Description": "It is recommended that you subscribe to the AWS Business Support tier or higher (including AWS Partner-Led Support) for all of your AWS production accounts. For more information, refer to Compare AWS Support Plans. If you don't have premium support, you must have an action plan to handle issues which require help from AWS Support. AWS Support provides a mix of tools and technology, people, and programs designed to proactively help you optimize performance, lower costs, and innovate faster. AWS Business Support provides additional benefits including access to AWS Trusted Advisor and AWS Personal Health Dashboard and faster response times.",
"Attributes": [
{
"Section": "Partner-hosted FTR requirements",
"Subsection": "Support level",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "WAFR-001",
"Name": "Conduct periodic architecture reviews (minimum once every year)",
"Description": "Conduct periodic architecture reviews of your production workload (at least once per year) using a documented architectural standard that includes AWS-specific best practices. If you have an internally defined standard for your AWS workloads, we recommend you use it for these reviews. If you do not have an internal standard, we recommend you use the AWS Well-Architected Framework.",
"Attributes": [
{
"Section": "Partner-hosted FTR requirements",
"Subsection": "Architecture review",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "WAFR-002",
"Name": "Review the AWS Shared Responsibility Models for Security and Resiliency",
"Description": "Review the AWS Shared Responsibility Model for Security and the AWS Shared Responsibility Model for Resiliency. Ensure that your products architecture and operational processes address the customer responsibilities defined in these models. We recommend you to use AWS Resilience Hub to ensure your workload resiliency posture meets your targets and to provide you with operational procedures you may use to address the customer responsibilities.",
"Attributes": [
{
"Section": "Partner-hosted FTR requirements",
"Subsection": "Architecture review",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "ARC-001",
"Name": "Use root user only by exception",
"Description": "The root user has unlimited access to your account and its resources, and using it only by exception helps protect your AWS resources. The AWS root user must not be used for everyday tasks, even administrative ones. Instead, adhere to the best practice of using the root user only to create your first AWS Identity and Access Management (IAM) user. Then securely lock away the root user credentials and use them to perform only a few accounts and service management tasks. To view the tasks that require you to sign in as the root user, see AWS Tasks That Require Root User. FTR does not require you to actively monitor root usage.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "AWS root account",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "ARC-003",
"Name": "Enable multi-factor authentication (MFA) on the root user for all AWS accounts",
"Description": "Enabling MFA provides an additional layer of protection against unauthorized access to your account. To configure MFA for the root user, follow the instructions for enabling either a virtual MFA or hardware MFA device. If you are using AWS Organizations to create new accounts, the initial password for the root user is set to a random value that is never exposed to you. If you do not recover the password for the root user of these accounts, you do not need to enable MFA on them. For any accounts where you do have access to the root users password, you must enable MFA",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "AWS root account",
"Type": "Automated"
}
],
"Checks": [
"iam_root_mfa_enabled",
"iam_root_hardware_mfa_enabled"
]
},
{
"Id": "ARC-004",
"Name": "Remove access keys for the root user",
"Description": "Programmatic access to AWS APIs should never use the root user. It is best not to generate static an access key for the root user. If one already exists, you should transition any processes using that key to use temporary access keys from an AWS Identity and Access Management (IAM) role, or, if necessary, static access keys from an IAM user.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "AWS root account",
"Type": "Automated"
}
],
"Checks": [
"iam_no_root_access_key"
]
},
{
"Id": "ARC-005",
"Name": "Develop incident management plans",
"Description": "An incident management plan is critical to respond, mitigate, and recover from the potential impact of security incidents. An incident management plan is a structured process for identifying, remediating, and responding in a timely matter to security incidents. An effective incident management plan must be continually iterated upon, remaining current with your cloud operations goal. For more information on developing incident management plan please see Develop incident management plans.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "AWS root account",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "ACOM-001",
"Name": "Configure AWS account contacts",
"Description": "If an account is not managed by AWS Organizations, alternate account contacts help AWS get in contact with the appropriate personnel if needed. Configure the accounts alternate contacts to point to a group rather than an individual. For example, create separate email distribution lists for billing, operations, and security and configure these as Billing, Security, and Operations contacts in each active AWS account. This ensures that multiple people will receive AWS notifications and be able to respond, even if someone is on vacation, changes roles, or leaves the company.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Communications from AWS",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "ACOM-002",
"Name": "Set account contact information including the root user email address to email addresses and phone numbers owned by your company",
"Description": "Using company owned email addresses and phone numbers for contact information enables you to access them even if the individuals whom they belong to are no longer with your organization",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Communications from AWS",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "IAM-001",
"Name": "Enable multi-factor authentication (MFA) for all Human Identities with AWS access",
"Description": "You must require any human identities to authenticate using MFA before accessing your AWS accounts. Typically, this means enabling MFA within your corporate identity provider. If you have existing legacy IAM users you must enable MFA for console access for those principals as well. Enabling MFA for IAM users provides an additional layer of security. With MFA, users have a device that generates a unique authentication code (a one-time password, or OTP). Users must provide both their normal credentials (user name and password) and the OTP. The MFA device can either be a special piece of hardware, or it can be a virtual device (for example, it can run in an app on a smartphone). Please note that machine identities do not require MFA.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Automated"
}
],
"Checks": [
"iam_root_mfa_enabled",
"iam_root_hardware_mfa_enabled",
"iam_user_hardware_mfa_enabled",
"iam_user_mfa_enabled_console_access",
"iam_administrator_access_with_mfa"
]
},
{
"Id": "IAM-002",
"Name": "Monitor and secure static AWS Identity and Access Management (IAM) credentials",
"Description": "Use temporary IAM credentials retrieved by assuming a role whenever possible. In cases where it is infeasible to use IAM roles, implement the following controls to reduce the risk these credentials are misused: Rotate IAM access keys regularly (recommended at least every 90 days). Maintain an inventory of all static keys and where they are used and remove unused access keys. Implement monitoring of AWS CloudTrail logs to detect anomalous activity or other potential misuse (e.g. using AWS GuardDuty.) Define a runbook or SOP for revoking credentials in the event you detect misuse.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Automated"
}
],
"Checks": [
"iam_rotate_access_key_90_days",
"iam_user_accesskey_unused",
"iam_user_with_temporary_credentials",
"guardduty_is_enabled",
"guardduty_no_high_severity_findings"
]
},
{
"Id": "IAM-003",
"Name": "Use strong password policy",
"Description": "Enforce a strong password policy, and educate users to avoid common or re-used passwords. For IAM users, you can create a password policy for your account on the Account Settings page of the IAM console. You can use the password policy to define password requirements, such as minimum length and whether it requires non-alphabetic characters, and so on. For more information, see Setting an Account Password Policy for IAM users.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Automated"
}
],
"Checks": [
"iam_password_policy_expires_passwords_within_90_days_or_less",
"iam_password_policy_lowercase",
"iam_password_policy_minimum_length_14",
"iam_password_policy_number",
"iam_password_policy_reuse_24",
"iam_password_policy_symbol",
"iam_password_policy_uppercase"
]
},
{
"Id": "IAM-004",
"Name": "Create individual identities (no shared credentials) for anyone who needs AWS access",
"Description": "Create individual entities and give unique security credentials and permissions to each user accessing your account. With individual entities and no shared credentials, you can audit the activity of each user.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "IAM-005",
"Name": "Use IAM roles and its temporary security credentials to provide access to third parties.",
"Description": "Do not provision IAM users and share those credentials with people outside of your organization. Any external services that need to make AWS API calls against your account (for example, a monitoring solution that accesses your account's AWS CloudWatch metrics) must use a cross-account role. For more information, refer to Providing access to AWS accounts owned by third parties.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "IAM-006",
"Name": "Grant least privilege access",
"Description": "You must follow the standard security advice of granting least privilege. Grant only the access that identities require by allowing access to specific actions on specific AWS resources under specific conditions. Rely on groups and identity attributes to dynamically set permissions at scale, rather than defining permissions for individual users. For example, you can allow a group of developers access to manage only resources for their project. This way, when a developer is removed from the group, access for the developer is revoked everywhere that group was used for access control, without requiring any changes to the access policies.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Automated"
}
],
"Checks": [
"iam_policy_attached_only_to_group_or_roles"
]
},
{
"Id": "IAM-007",
"Name": "Manage access based on life cycle",
"Description": "Integrate access controls with operator and application lifecycle and your centralized federation provider and IAM. For example, remove a users access when they leave the organization or change roles.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "IAM-008",
"Name": "Audit identities quarterly",
"Description": "Auditing the identities that are configured in your identity provider and IAM helps ensure that only authorized identities have access to your workload. For example, remove people that leave the organization, and remove cross-account roles that are no longer required. Have a process in place to periodically audit permissions to the services accessed by an IAM entity. This helps you identify the policies you needto modify to remove any unused permissions. For more information, see Refining permissions in AWS using last accessed information.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "IAM-009",
"Name": "Do not embed credentials in application code",
"Description": "Ensure that all credentials used by your applications (for example, IAM access keys and database passwords) are never included in your application's source code or committed to source control in any way.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "IAM-0010",
"Name": "Store secrets securely.",
"Description": "Encrypt all secrets in transit and at rest, define fine-grained access controls that only allow access to specific identities, and log access to secrets in an audit log. We recommend you use a purpose-built secret management service such as AWS Secrets Manager, AWS Systems Manager Parameter Store, or an AWS Partner solution, but internally developed solutions that meet these requirements are also acceptable.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "IAM-0011",
"Name": "Encrypt all end user/customer credentials and hash passwords at rest.",
"Description": "If you are storing end user/customer credentials in a database that you manage, encrypt credentials at rest and hash passwords. As an alternative, AWS recommends using a user-identity synchronization service, such as Amazon Cognito or an equivalent AWS Partner solution.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "IAM-0012",
"Name": "Use temporary credentials",
"Description": "Use temporary security credentials to access AWS resources. For machine identities within AWS (for example, Amazon Elastic Compute Cloud (Amazon EC2) instances or AWS Lambda functions), always use IAM roles to acquire temporary security credentials. For machine identities running outside of AWS, use IAM Roles Anywhere or securely store static AWS access keys that are only used to assume an IAM role.For human identities, use AWS IAM Identity Center or other identity federation solutions where possible. If you must use static AWS access keys for human users, require MFA for all access, including the AWS Management Console, and AWS Command Line Interface (AWS CLI).",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Identity and Access Management",
"Type": "Automated"
}
],
"Checks": [
"iam_rotate_access_key_90_days",
"iam_user_accesskey_unused",
"iam_user_with_temporary_credentials",
"iam_policy_attached_only_to_group_or_roles",
"iam_role_administratoraccess_policy",
"iam_role_cross_account_readonlyaccess_policy",
"iam_role_cross_service_confused_deputy_prevention",
"iam_root_hardware_mfa_enabled",
"iam_root_mfa_enabled",
"iam_root_hardware_mfa_enabled",
"iam_user_hardware_mfa_enabled",
"iam_user_mfa_enabled_console_access",
"iam_administrator_access_with_mfa"
]
},
{
"Id": "SECOPS-001",
"Name": "Perform vulnerability management",
"Description": "Define a mechanism and frequency to scan and patch for vulnerabilities in your dependencies, and in your operating systems to help protect against new threats. Scan and patch your dependencies, and your operating systems on a defined schedule. Software vulnerability management is essential to keeping your system secure from threat actors. Embedding vulnerability assessments early into your continuous integration/continuous delivery (CI/CD) pipeline allows you to prioritize remediation of any security vulnerabilities detected. The solution you need to achieve this varies according to the AWS services that you are consuming. To check for vulnerabilities in software running in Amazon EC2 instances, you can add Amazon Inspector to your pipeline to cause your build to fail if Inspector detects vulnerabilities. You can also use open source products such as OWASP Dependency-Check, Snyk, OpenVAS, package managers and AWS Partner tools for vulnerability management.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Operational security",
"Type": "Automated"
}
],
"Checks": [
"inspector2_is_enabled",
"inspector2_active_findings_exist",
"accessanalyzer_enabled_without_findings",
"guardduty_no_high_severity_findings"
]
},
{
"Id": "NETSEC-001",
"Name": "Implement the least permissive rules for all Amazon EC2 security groups",
"Description": "All Amazon EC2 security groups should restrict access to the greatest degree possible. At a minimum, do the following: Ensure that no security groups allow ingress from 0.0.0.0/0 to port 22 or 3389 (CIS 5.2) Ensure that the default security group of every VPC restricts all traffic (CIS 5.3/Security Control EC2.2)",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Network Security",
"Type": "Automated"
}
],
"Checks": [
"ec2_ami_public",
"ec2_instance_public_ip",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23",
"ec2_securitygroup_allow_wide_open_public_ipv4",
"ec2_securitygroup_default_restrict_traffic",
"ec2_securitygroup_not_used",
"ec2_securitygroup_with_many_ingress_egress_rules"
]
},
{
"Id": "NETSEC-002",
"Name": "Restrict resources in public subnets",
"Description": "Do not place resources in public subnets of your VPC unless they must receive network traffic from public sources. Public subnets are subnets associated with a route table that has a route to an internet gateway.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Network Security",
"Type": "Automated"
}
],
"Checks": [
"vpc_subnet_no_public_ip_by_default",
"vpc_subnet_separate_private_public",
"vpc_endpoint_connections_trust_boundaries",
"vpc_endpoint_services_allowed_principals_trust_boundaries",
"workspaces_vpc_2private_1public_subnets_nat"
]
},
{
"Id": "BAR-001",
"Name": "Configure automatic data backups",
"Description": "You must perform regular backups to a durable storage service. Backups ensure that you have the ability to recover from administrative, logical, or physical error scenarios. Configure backups to be taken automatically based on a periodic schedule, or by changes in the dataset. RDS instances, EBS volumes, DynamoDB tables, and S3 objects can all be configured for automatic backup. AWS Backup, AWS Marketplace solutions or third-party solutions can also be used. If objects in S3 bucket are write-once-read-many (WORM), compensating controls such as object lock can be used meet this requirement. If it is customers responsibility to backup their data, it must be clearly stated in the documentation and the Partner must provide clear instructions on how to backup the data.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Backups and recovery",
"Type": "Automated"
}
],
"Checks": [
"backup_plans_exist",
"backup_reportplans_exist",
"backup_vaults_encrypted",
"backup_vaults_exist",
"efs_have_backup_enabled",
"rds_instance_backup_enabled"
]
},
{
"Id": "BAR-002",
"Name": "Periodically recover data to verify the integrity of your backup process",
"Description": "To confirm that your backup process meets your recovery time objectives (RTO) and recovery point objectives (RPO), run a recovery test on a regular schedule and after making significant changes to your cloud environment. For more information, refer to Getting Started - Backup and Restore with AWS.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Backups and recovery",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "RES-001",
"Name": "Define a Recovery Point Objective (RPO)",
"Description": "To confirm that your backup process meets your recovery time objectives (RTO) and recovery point objectives (RPO), run a recovery test on a regular schedule and after making significant changes to your cloud environment. For more information, refer to Getting Started - Backup and Restore with AWS.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Resiliency",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "RES-002",
"Name": "Establish a Recovery Time Objective (RTO)",
"Description": "Define an RTO that meets your organizations needs and expectations. RTO is the maximum acceptable delay your organization will accept between the interruption and restoration of service.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Resiliency",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "RES-004",
"Name": "Resiliency Testing",
"Description": "Test resiliency to ensure that RTO and RPO are met, both periodically (minimum every 12 months) and after major updates. The resiliency test must include accidental data loss, instance failures, and Availability Zone (AZ) failures. At least one resilience test that meets RTO and RPO requirements must be completed prior to FTR approval. You can use AWS Resilience Hub to test and verify your workloads to see if it meets its resilience target. AWS Resilience Hub works with AWS Fault Injection Service (AWS FIS) , a chaos engineering service, to provide fault-injection simulations of real-world failures to validate the application recovers within the resilience targets you defined. AWS Resilience Hub also provides API operations for you to integrate its resilience assessment and testing into your CI/CD pipelines for ongoing resilience validation. Including resilience validation in CI/CD pipelines helps make sure that changes to the workloads underlying infrastructure don't compromise resilience.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Resiliency",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "RES-005",
"Name": "Communicate customer responsibilities for resilience",
"Description": "Clearly define your customers responsibility for backup, recovery, and availability. At a minimum, your product documentation or customer agreements should cover the following: Responsibility the customer has for backing up the data stored in your solution. Instructions for backing up data or configuring optional features in your product for data protection, if applicable. Options customers have for configuring the availability of your product.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Resiliency",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "RES-006",
"Name": "Architect your product to meet availability targets and uptime service level agreements (SLAs)",
"Description": "If you publish or privately agree to availability targets or uptime SLAs, ensure that your architecture and operational processes are designed to support them. Additionally, provide clear guidance to customers on any configuration required to achieve the targets or SLAs.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Resiliency",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "RES-007",
"Name": "Define a customer communication plan for outages",
"Description": "Establish a plan for communicating information about system outages to your customers both during and after incidents. Your communication should not include any data that was provided by AWS under a non-disclosure agreement (NDA).",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Resiliency",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "S3-001",
"Name": "Review all Amazon S3 buckets to determine appropriate access levels",
"Description": "You must ensure that buckets that require public access have been reviewed to determine if public read or write access is needed and if appropriate controls are in place to control public access. When assigning access permissions, follow the principle of least privilege, an AWS best practice. For more information, refer to overview of managing access.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Amazon S3 bucket access",
"Type": "Automated"
}
],
"Checks": [
"s3_bucket_acl_prohibited",
"s3_bucket_default_encryption",
"s3_bucket_kms_encryption",
"s3_bucket_level_public_access_block",
"s3_bucket_object_lock",
"s3_bucket_policy_public_write_access",
"s3_bucket_public_access",
"s3_bucket_public_list_acl",
"s3_bucket_public_write_acl",
"s3_bucket_secure_transport_policy",
"s3_bucket_server_access_logging_enabled"
]
},
{
"Id": "CAA-001",
"Name": "Use cross-account roles to access customer AWS accounts",
"Description": "Cross-account roles reduce the amount of sensitive information AWS Partners need to store for their customers.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Cross-account access",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "CAA-007",
"Name": "Provide guidance or an automated setup mechanism (for example, an AWS CloudFormation template) for creating cross-account roles with the minimum required privileges",
"Description": "The policy created for cross-account access in customer accounts must follow the principle of least privilege. The AWS Partner must provide a role-policy document or an automated setup mechanism (for example, an AWS CloudFormation template) for the customers to use to ensure that the roles are created with minimum required privileges. For more information, refer to the AWS Partner Network (APN) blog posts.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Cross-account access",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "CAA-002",
"Name": "Use an external ID with cross-account roles to access customer accounts",
"Description": "An external ID allows the user that is assuming the role to assert the circumstances in which they are operating. It also provides a way for the account owner to permit the role to be assumed only under specific circumstances. The primary function of the external ID is to address and prevent the confused deputy problem.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Cross-account access",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "CAA-004",
"Name": "Use a value you generate (not something provided by the customer) for the external ID",
"Description": "When configuring cross-account access using IAM roles, you must use a value you generate for the external ID, instead of one provided by the customer, to ensure the integrity of the cross-account role configuration. A partner-generated external ID ensures that malicious parties cannot impersonate a customer's configuration and enforces uniqueness and format consistency across all customers. If you are not generating an external ID today we recommend implementing a process that generates a random unique value (such as a Universally Unique Identifier) for the external ID that a customer uses to set up a cross-account role.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Cross-account access",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "CAA-005",
"Name": "Ensure that all external IDs are unique.",
"Description": "The external IDs used must be unique across all customers. Re-using external IDs for different customers does not solve the confused deputy problem and runs the risk of customer A being able to view data of customer B by using the role ARN and the external ID of customer B. To resolve this, we recommend implementing a process that ensures a random unique value, such as a Universally Unique Identifier, is generated for the external ID that a customer would use to setup a cross account role.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Cross-account access",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "CAA-006",
"Name": "Provide read-only access to external ID to customers",
"Description": "Customers must not be able to set or influence external IDs. When the external ID is editable, it is possible for one customer to impersonate the configuration of another. For example, when the external ID is editable, customer A can create a cross account role setup using customer Bs role ARN and external ID, granting customer A access to customer Bs data. Remediation of this item involves making the external ID a view-only field, ensuring that the external ID cannot be changed to impersonate the setup of another customer.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Cross-account access",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "CAA-003",
"Name": "Deprecate any historical use of customer-provided IAM credentials",
"Description": "If your application provides legacy support for the use of static IAM credentials for cross-account access, the application's user interface and customer documentation must make it clear that this method is deprecated. Existing customers should be encouraged to switch to cross-account role based-access, and collection of credentials should be disabled for new customers.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Cross-account access",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "SDAT-001",
"Name": "Identify sensitive data (for example, Personally Identifiable Information (PII) and Protected Health Information (PHI))",
"Description": "Data classification enables you to determine which data needs to be protected and how. Based on the workload and the data it processes, identify the data that is not common public knowledge.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Sensitive data",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "SDAT-002",
"Name": "Encrypt all sensitive data at rest",
"Description": "Encryption maintains the confidentiality of sensitive data even when it gets stolen or the network through which it is transmitted becomes compromised.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Sensitive data",
"Type": "Automated"
}
],
"Checks": [
"sns_topics_kms_encryption_at_rest_enabled",
"athena_workgroup_encryption",
"cloudtrail_kms_encryption_enabled",
"dynamodb_accelerator_cluster_encryption_enabled",
"dynamodb_tables_kms_cmk_encryption_enabled",
"efs_encryption_at_rest_enabled",
"opensearch_service_domains_encryption_at_rest_enabled"
]
},
{
"Id": "SDAT-003",
"Name": "Only use protocols with encryption when transmitting sensitive data outside of your VPC",
"Description": "Encryption maintains data confidentiality even when the network through which it is transmitted becomes compromised.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Sensitive data",
"Type": "Manual"
}
],
"Checks": []
},
{
"Id": "RCVP-001",
"Name": "Establish a process to ensure that all required compliance standards are met",
"Description": "If you advertise that your product meets specific compliance standards, you must have an internal process for ensuring compliance. Examples of compliance standards include Payment Card Industry Data Security Standard (PCI DSS) PCI DSS, Federal Risk and Authorization Management Program (FedRAMP)FedRAMP, and U.S. Health Insurance Portability and Accountability Act (HIPAA)HIPAA. Applicable compliance standards are determined by various factors, such as what types of data the solution stores or transmits and which geographic regions the solution supports.",
"Attributes": [
{
"Section": "Architectural and Operational Controls",
"Subsection": "Regulatory compliance validation process",
"Type": "Manual"
}
],
"Checks": []
}
]
}

View File

@@ -12,7 +12,7 @@ from prowler.providers.common.common import get_global_provider
timestamp = datetime.today()
timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)
prowler_version = "3.14.0"
prowler_version = "4.0.0"
square_logo_img = "https://user-images.githubusercontent.com/38561120/235905862-9ece5bd7-9aa3-4e48-807a-3a9035eb8bfb.png"
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
azure_logo = "https://user-images.githubusercontent.com/38561120/235927375-b23e2e0f-8932-49ec-b59c-d89f61c8041d.png"

View File

@@ -104,6 +104,9 @@ azure:
# GCP Configuration
gcp:
# GCP Compute Configuration
# gcp.compute_public_address_shodan
shodan_api_key: null
# Kubernetes Configuration
kubernetes:

View File

@@ -40,15 +40,16 @@ def generate_provider_output(provider, finding, csv_data) -> FindingOutput:
csv_data["auth_method"] = (
f"{provider.identity.identity_type}: {provider.identity.identity_id}"
)
csv_data["account_uid"] = provider.identity.subscriptions[
finding.subscription
]
csv_data["account_name"] = finding.subscription
# Get the first tenant domain ID, just in case
csv_data["account_organization_uid"] = csv_data["account_organization_uid"][
0
]
csv_data["account_uid"] = (
csv_data["account_organization_uid"]
if "Tenant:" in finding.subscription
else provider.identity.subscriptions[finding.subscription]
)
csv_data["account_name"] = finding.subscription
csv_data["resource_name"] = finding.resource_name
csv_data["resource_uid"] = finding.resource_id
# TODO: pending to get location from Azure resources (finding.location)

View File

@@ -868,6 +868,7 @@
"ap-southeast-3",
"ap-southeast-4",
"ca-central-1",
"ca-west-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
@@ -1101,6 +1102,7 @@
"ap-southeast-3",
"ap-southeast-4",
"ca-central-1",
"ca-west-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
@@ -2262,12 +2264,14 @@
"ap-southeast-3",
"ca-central-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
"eu-south-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"il-central-1",
"me-central-1",
"me-south-1",
"sa-east-1",
"us-east-1",
@@ -2294,12 +2298,14 @@
"ap-southeast-3",
"ca-central-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
"eu-south-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"il-central-1",
"me-central-1",
"me-south-1",
"sa-east-1",
"us-east-1",
@@ -2328,12 +2334,14 @@
"ap-southeast-3",
"ca-central-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
"eu-south-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"il-central-1",
"me-central-1",
"me-south-1",
"sa-east-1",
"us-east-1",
@@ -3803,6 +3811,7 @@
"eu-central-1",
"eu-north-1",
"eu-south-1",
"eu-south-2",
"eu-west-1",
"eu-west-2",
"eu-west-3",
@@ -4454,8 +4463,10 @@
"ap-southeast-3",
"ca-central-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
"eu-south-1",
"eu-south-2",
"eu-west-1",
"eu-west-2",
"eu-west-3",
@@ -4468,8 +4479,14 @@
"us-west-1",
"us-west-2"
],
"aws-cn": [],
"aws-us-gov": []
"aws-cn": [
"cn-north-1",
"cn-northwest-1"
],
"aws-us-gov": [
"us-gov-east-1",
"us-gov-west-1"
]
}
},
"fsx-windows": {
@@ -4739,6 +4756,7 @@
"ap-southeast-3",
"ap-southeast-4",
"ca-central-1",
"ca-west-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
@@ -4845,6 +4863,7 @@
"ap-southeast-1",
"ap-southeast-2",
"ap-southeast-3",
"ap-southeast-4",
"ca-central-1",
"eu-central-1",
"eu-central-2",
@@ -4881,6 +4900,7 @@
"ap-northeast-2",
"ap-northeast-3",
"ap-south-1",
"ap-south-2",
"ap-southeast-1",
"ap-southeast-2",
"ap-southeast-3",
@@ -4890,6 +4910,7 @@
"eu-central-2",
"eu-north-1",
"eu-south-1",
"eu-south-2",
"eu-west-1",
"eu-west-2",
"eu-west-3",
@@ -5672,6 +5693,7 @@
"ap-southeast-3",
"ap-southeast-4",
"ca-central-1",
"ca-west-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
@@ -7059,6 +7081,7 @@
"ap-southeast-1",
"eu-central-1",
"eu-west-1",
"eu-west-2",
"us-east-1",
"us-east-2",
"us-west-2"
@@ -9417,7 +9440,10 @@
"cn-north-1",
"cn-northwest-1"
],
"aws-us-gov": []
"aws-us-gov": [
"us-gov-east-1",
"us-gov-west-1"
]
}
},
"simspaceweaver": {
@@ -9784,6 +9810,7 @@
"ap-southeast-1",
"ap-southeast-2",
"ap-southeast-3",
"ap-southeast-4",
"ca-central-1",
"eu-central-1",
"eu-central-2",
@@ -9939,17 +9966,6 @@
]
}
},
"supplychain": {
"regions": {
"aws": [
"eu-central-1",
"us-east-1",
"us-west-2"
],
"aws-cn": [],
"aws-us-gov": []
}
},
"support": {
"regions": {
"aws": [
@@ -10711,19 +10727,6 @@
]
}
},
"wam": {
"regions": {
"aws": [
"ap-southeast-1",
"ap-southeast-2",
"eu-west-1",
"us-east-1",
"us-west-2"
],
"aws-cn": [],
"aws-us-gov": []
}
},
"wellarchitected": {
"regions": {
"aws": [

View File

@@ -21,7 +21,7 @@
"Terraform": "https://docs.bridgecrew.io/docs/monitoring_13#fix---buildtime"
},
"Recommendation": {
"Text": "If you are using CloudTrails and CloudWatch, perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for route table changes and the <cloudtrail_log_group_name> taken from audit step 1. aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> -- filter-name `<route_table_changes_metric>` --metric-transformations metricName= `<route_table_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }' Note: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify aws sns create-topic --name <sns_topic_name> Note: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> - -notification-endpoint <sns_subscription_endpoints> Note: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 aws cloudwatch put-metric-alarm --alarm-name `<route_table_changes_alarm>` --metric-name `<route_table_changes_metric>` --statistic Sum --period 300 - -threshold 1 --comparison-operator GreaterThanOrEqualToThreshold -- evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>",
"Text": "If you are using CloudTrails and CloudWatch, perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for route table changes and the <cloudtrail_log_group_name> taken from audit step 1. aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> -- filter-name `<route_table_changes_metric>` --metric-transformations metricName= `<route_table_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventSource = ec2.amazonaws.com) && (($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable)) }' Note: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify aws sns create-topic --name <sns_topic_name> Note: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> - -notification-endpoint <sns_subscription_endpoints> Note: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 aws cloudwatch put-metric-alarm --alarm-name `<route_table_changes_alarm>` --metric-name `<route_table_changes_metric>` --statistic Sum --period 300 - -threshold 1 --comparison-operator GreaterThanOrEqualToThreshold -- evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>",
"Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html"
}
},

View File

@@ -796,9 +796,9 @@ class IAM(AWSService):
response = self.client.get_service_last_accessed_details(
JobId=details["JobId"]
)
self.last_accessed_services[(user.name, user.arn)] = response[
"ServicesLastAccessed"
]
self.last_accessed_services[(user.name, user.arn)] = response.get(
"ServicesLastAccessed", {}
)
except ClientError as error:
if error.response["Error"]["Code"] == "NoSuchEntity":

View File

@@ -7,12 +7,12 @@ class iam_user_mfa_enabled_console_access(Check):
findings = []
response = iam_client.credential_report
for user in response:
report = Check_Report_AWS(self.metadata())
report.resource_id = user["user"]
report.resource_arn = user["arn"]
report.region = iam_client.region
# all the users but root (which by default does not support console password)
if user["password_enabled"] != "not_supported":
if user["user"] != "<root_account>":
report = Check_Report_AWS(self.metadata())
report.resource_id = user["user"]
report.resource_arn = user["arn"]
report.region = iam_client.region
# check if the user has password enabled
if user["password_enabled"] == "true":
if user["mfa_active"] == "false":
@@ -26,12 +26,6 @@ class iam_user_mfa_enabled_console_access(Check):
report.status_extended = (
f"User {user['user']} does not have Console Password enabled."
)
# root user
else:
report.status = "PASS"
report.status_extended = (
f"User {user['user']} does not have Console Password enabled."
)
findings.append(report)
findings.append(report)
return findings

View File

@@ -298,7 +298,8 @@ Azure Identity Type: {Fore.YELLOW}[{self._identity.identity_type}]{Style.RESET_A
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
asyncio.run(get_azure_identity())
asyncio.get_event_loop().run_until_complete(get_azure_identity())
# Managed identities only can be assigned resource, resource group and subscription scope permissions
elif managed_entity_auth:
identity.identity_id = "Default Managed Identity ID"

View File

@@ -9,7 +9,7 @@ class AzureService:
provider: AzureProvider,
):
self.clients = self.__set_clients__(
provider.identity.subscriptions,
provider.identity,
provider.session,
service,
provider.region_config,
@@ -19,20 +19,23 @@ class AzureService:
self.locations = provider.locations
self.audit_config = provider.audit_config
def __set_clients__(self, subscriptions, session, service, region_config):
def __set_clients__(self, identity, session, service, region_config):
clients = {}
try:
for display_name, id in subscriptions.items():
clients.update(
{
display_name: service(
credential=session,
subscription_id=id,
base_url=region_config.base_url,
credential_scopes=region_config.credential_scopes,
)
}
)
if "GraphServiceClient" in str(service):
clients.update({identity.tenant_domain: service(credentials=session)})
else:
for display_name, id in identity.subscriptions.items():
clients.update(
{
display_name: service(
credential=session,
subscription_id=id,
base_url=region_config.base_url,
credential_scopes=region_config.credential_scopes,
)
}
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"

View File

@@ -1,7 +1,11 @@
from datetime import timedelta
from typing import Dict
from azure.core.exceptions import HttpResponseError
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceNotFoundError,
)
from azure.mgmt.security import SecurityCenter
from pydantic import BaseModel
@@ -49,6 +53,11 @@ class Defender(AzureService):
)
}
)
except ResourceNotFoundError as error:
if "Subscription Not Registered" in error.message:
logger.error(
f"Subscription name: {subscription_name} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: Subscription Not Registered - Please register to Microsoft.Security in order to view your security status"
)
except Exception as error:
logger.error(
f"Subscription name: {subscription_name} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -73,10 +82,14 @@ class Defender(AzureService):
)
}
)
except ClientAuthenticationError as error:
if "Subscription Not Registered" in error.message:
logger.error(
f"Subscription name: {subscription_name} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: Subscription Not Registered - Please register to Microsoft.Security in order to view your security status"
)
except Exception as error:
logger.error(f"Subscription name: {subscription_name}")
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
f"Subscription name: {subscription_name} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return auto_provisioning
@@ -123,6 +136,11 @@ class Defender(AzureService):
)
}
)
except ClientAuthenticationError as error:
if "Subscription Not Registered" in error.message:
logger.error(
f"Subscription name: {subscription_name} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: Subscription Not Registered - Please register to Microsoft.Security in order to view your security status"
)
except Exception as error:
logger.error(
f"Subscription name: {subscription_name} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"

View File

@@ -0,0 +1,4 @@
from prowler.providers.azure.services.entra.entra_service import Entra
from prowler.providers.common.common import get_global_provider
entra_client = Entra(get_global_provider())

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "entra_policy_ensure_default_user_cannot_create_apps",
"CheckTitle": "Ensure That 'Users Can Register Applications' Is Set to 'No'",
"CheckType": [],
"ServiceName": "entra",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "#microsoft.graph.authorizationPolicy",
"Description": "Require administrators or appropriately delegated users to register third-party applications.",
"Risk": "It is recommended to only allow an administrator to register custom-developed applications. This ensures that the application undergoes a formal security review and approval process prior to exposing Azure Active Directory data. Certain users like developers or other high-request users may also be delegated permissions to prevent them from waiting on an administrative user. Your organization should review your policies and decide your needs.",
"RelatedUrl": "https://learn.microsoft.com/en-us/entra/identity-platform/how-applications-are-added#who-has-permission-to-add-applications-to-my-azure-ad-instance",
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "",
"Terraform": ""
},
"Recommendation": {
"Text": "1. From Azure Home select the Portal Menu 2. Select Azure Active Directory 3. Select Users 4. Select User settings 5. Ensure that Users can register applications is set to No",
"Url": "https://learn.microsoft.com/en-us/entra/identity/role-based-access-control/delegate-app-roles#restrict-who-can-create-applications"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "Enforcing this setting will create additional requests for approval that will need to be addressed by an administrator. If permissions are delegated, a user may approve a malevolent third party application, potentially giving it access to your data."
}

View File

@@ -0,0 +1,28 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.entra.entra_client import entra_client
class entra_policy_ensure_default_user_cannot_create_apps(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for tenant_domain, auth_policy in entra_client.authorization_policy.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = f"Tenant: '{tenant_domain}'"
report.resource_name = auth_policy.name
report.resource_id = auth_policy.id
report.status_extended = "App creation is not disabled for non-admin users."
if auth_policy.default_user_role_permissions and not getattr(
auth_policy.default_user_role_permissions,
"allowed_to_create_apps",
True,
):
report.status = "PASS"
report.status_extended = "App creation is disabled for non-admin users."
findings.append(report)
return findings

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "entra_policy_ensure_default_user_cannot_create_tenants",
"CheckTitle": "Ensure that 'Restrict non-admin users from creating tenants' is set to 'Yes'",
"CheckType": [],
"ServiceName": "entra",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "#microsoft.graph.authorizationPolicy",
"Description": "Require administrators or appropriately delegated users to create new tenants.",
"Risk": "It is recommended to only allow an administrator to create new tenants. This prevent users from creating new Azure AD or Azure AD B2C tenants and ensures that only authorized users are able to do so.",
"RelatedUrl": "https://learn.microsoft.com/en-us/entra/fundamentals/users-default-permissions",
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "",
"Terraform": ""
},
"Recommendation": {
"Text": "1. From Azure Home select the Portal Menu 2. Select Azure Active Directory 3. Select Users 4. Select User settings 5. Set 'Restrict non-admin users from creating' tenants to 'Yes'",
"Url": "https://learn.microsoft.com/en-us/entra/identity/role-based-access-control/permissions-reference#tenant-creator"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "Enforcing this setting will ensure that only authorized users are able to create new tenants."
}

View File

@@ -0,0 +1,32 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.entra.entra_client import entra_client
class entra_policy_ensure_default_user_cannot_create_tenants(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for tenant_domain, auth_policy in entra_client.authorization_policy.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = f"Tenant: '{tenant_domain}'"
report.resource_name = auth_policy.name
report.resource_id = auth_policy.id
report.status_extended = (
"Tenants creation is not disabled for non-admin users."
)
if auth_policy.default_user_role_permissions and not getattr(
auth_policy.default_user_role_permissions,
"allowed_to_create_tenants",
True,
):
report.status = "PASS"
report.status_extended = (
"Tenants creation is disabled for non-admin users."
)
findings.append(report)
return findings

View File

@@ -0,0 +1,81 @@
import asyncio
from dataclasses import dataclass
from typing import Optional
from msgraph import GraphServiceClient
from msgraph.generated.models.default_user_role_permissions import (
DefaultUserRolePermissions,
)
from pydantic import BaseModel
from prowler.lib.logger import logger
from prowler.providers.azure.azure_provider import AzureProvider
from prowler.providers.azure.lib.service.service import AzureService
########################## Entra
class Entra(AzureService):
def __init__(self, provider: AzureProvider):
super().__init__(GraphServiceClient, provider)
self.users = asyncio.get_event_loop().run_until_complete(self.__get_users__())
self.authorization_policy = asyncio.get_event_loop().run_until_complete(
self.__get_authorization_policy__()
)
async def __get_users__(self):
try:
users = {}
for tenant, client in self.clients.items():
users_list = await client.users.get()
users.update({tenant: {}})
for user in users_list.value:
users[tenant].update(
{
user.user_principal_name: User(
id=user.id, name=user.display_name
)
}
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return users
async def __get_authorization_policy__(self):
try:
authorization_policy = {}
for tenant, client in self.clients.items():
auth_policy = await client.policies.authorization_policy.get()
authorization_policy.update(
{
tenant: AuthorizationPolicy(
id=auth_policy.id,
name=auth_policy.display_name,
description=auth_policy.description,
default_user_role_permissions=getattr(
auth_policy, "default_user_role_permissions", None
),
)
}
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return authorization_policy
class User(BaseModel):
id: str
name: str
@dataclass
class AuthorizationPolicy:
id: str
name: str
description: str
default_user_role_permissions: Optional[DefaultUserRolePermissions]

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "keyvault_logging_enabled",
"CheckTitle": "Ensure that logging for Azure Key Vault is 'Enabled'",
"CheckType": [],
"ServiceName": "keyvault",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "KeyVault",
"Description": "Enable AuditEvent logging for key vault instances to ensure interactions with key vaults are logged and available.",
"Risk": "Monitoring how and when key vaults are accessed, and by whom, enables an audit trail of interactions with confidential information, keys, and certificates managed by Azure Keyvault. Enabling logging for Key Vault saves information in an Azure storage account which the user provides. This creates a new container named insights-logs-auditevent automatically for the specified storage account. This same storage account can be used for collecting logs for multiple key vaults.",
"RelatedUrl": "https://docs.microsoft.com/en-us/azure/key-vault/key-vault-logging",
"Remediation": {
"Code": {
"CLI": "az monitor diagnostic-settings create --name <diagnostic settings name> --resource <key vault resource ID> --logs'[{category:AuditEvents,enabled:true,retention-policy:{enabled:true,days:180}}]' --metrics'[{category:AllMetrics,enabled:true,retention-policy:{enabled:true,days:180}}]' <[--event-hub <event hub ID> --event-hub-rule <event hub auth rule ID> | --storage-account <storage account ID> |--workspace <log analytics workspace ID> | --marketplace-partner-id <full resource ID of third-party solution>]>",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/KeyVault/enable-audit-event-logging-for-azure-key-vaults.html",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Go to Key vaults 2. For each Key vault 3. Go to Diagnostic settings 4. Click on Edit Settings 5. Ensure that Archive to a storage account is Enabled 6. Ensure that AuditEvent is checked, and the retention days is set to 180 days or as appropriate",
"Url": "https://docs.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-data-protection#dp-8-ensure-security-of-key-and-certificate-repository"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, Diagnostic AuditEvent logging is not enabled for Key Vault instances."
}

View File

@@ -0,0 +1,43 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.keyvault.keyvault_client import keyvault_client
class keyvault_logging_enabled(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for subscription, key_vaults in keyvault_client.key_vaults.items():
for keyvault in key_vaults:
keyvault_name = keyvault.name
subscription_name = subscription
if not keyvault.monitor_diagnostic_settings:
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = subscription_name
report.resource_name = keyvault.name
report.resource_id = keyvault.id
report.status_extended = f"There are no diagnostic settings capturing audit logs for Key Vault {keyvault_name} in subscription {subscription_name}."
findings.append(report)
else:
for diagnostic_setting in keyvault.monitor_diagnostic_settings:
report = Check_Report_Azure(self.metadata())
report.subscription = subscription_name
report.resource_name = diagnostic_setting.name
report.resource_id = diagnostic_setting.id
report.status = "FAIL"
report.status_extended = f"Diagnostic setting {diagnostic_setting.name} for Key Vault {keyvault_name} in subscription {subscription_name} does not have audit logging."
audit = False
allLogs = False
for log in diagnostic_setting.logs:
if log.category_group == "audit" and log.enabled:
audit = True
if log.category_group == "allLogs" and log.enabled:
allLogs = True
if audit and allLogs:
report.status = "PASS"
report.status_extended = f"Diagnostic setting {diagnostic_setting.name} for Key Vault {keyvault_name} in subscription {subscription_name} has audit logging."
break
findings.append(report)
return findings

View File

@@ -12,9 +12,11 @@ from azure.mgmt.keyvault.v2023_07_01.models import (
from prowler.lib.logger import logger
from prowler.providers.azure.azure_provider import AzureProvider
from prowler.providers.azure.lib.service.service import AzureService
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
from prowler.providers.azure.services.monitor.monitor_service import DiagnosticSetting
########################## Storage
########################## KeyVault
class KeyVault(AzureService):
def __init__(self, provider: AzureProvider):
super().__init__(KeyVaultManagementClient, provider)
@@ -49,6 +51,9 @@ class KeyVault(AzureService):
properties=keyvault_properties,
keys=keys,
secrets=secrets,
monitor_diagnostic_settings=self.__get_vault_monitor_settings__(
keyvault_name, resource_group, subscription
),
)
)
except Exception as error:
@@ -120,6 +125,25 @@ class KeyVault(AzureService):
)
return secrets
def __get_vault_monitor_settings__(
self, keyvault_name, resource_group, subscription
):
logger.info(
f"KeyVault - Getting monitor diagnostics settings for {keyvault_name}..."
)
monitor_diagnostics_settings = []
try:
monitor_diagnostics_settings = monitor_client.diagnostic_settings_with_uri(
self.subscriptions[subscription],
f"subscriptions/{self.subscriptions[subscription]}/resourceGroups/{resource_group}/providers/Microsoft.KeyVault/vaults/{keyvault_name}",
monitor_client.clients[subscription],
)
except Exception as error:
logger.error(
f"Subscription name: {self.subscription} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return monitor_diagnostics_settings
@dataclass
class Key:
@@ -149,3 +173,4 @@ class KeyVaultInfo:
properties: VaultProperties
keys: list[Key] = None
secrets: list[Secret] = None
monitor_diagnostic_settings: list[DiagnosticSetting] = None

View File

@@ -0,0 +1,23 @@
"""
This module contains functions related to monitoring alerts in Azure.
"""
def check_alert_rule(alert_rule, expected_equal) -> bool:
"""
Checks if an alert rule meets the specified condition.
Args:
alert_rule: An object representing the alert rule to be checked.
expected_equal: The expected value for the "operationName" field.
Returns:
A boolean value indicating whether the alert rule meets the condition.
"""
if alert_rule.enabled:
for element in alert_rule.condition.all_of:
if element.field == "operationName" and element.equals == expected_equal:
return True
return False

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_create_policy_assignment",
"CheckTitle": "Ensure that Activity Log Alert exists for Create Policy Assignment",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Description": "Create an activity log alert for the Create Policy Assignment event.",
"Risk": "Monitoring for create policy assignment events gives insight into changes done in 'Azure policy - assignments' and can reduce the time it takes to detect unsolicited changes.",
"RelatedUrl": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Authorization/policyAssignments/write and level=<verbose | information | warning | error | critical> --scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription ID> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-alert-for-create-policy-assignment-events.html#trendmicro",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Policy assignment (policyAssignments). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create policy assignment (Microsoft.Authorization/policyAssignments). 12. Select the Actions tab. 13. To use an existing action group, click elect action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
}

View File

@@ -0,0 +1,33 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.monitor.lib.monitor_alerts import check_alert_rule
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
class monitor_alert_create_policy_assignment(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for (
subscription_name,
activity_log_alerts,
) in monitor_client.alert_rules.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = subscription_name
report.resource_name = "Monitor"
report.resource_id = "Monitor"
report.status_extended = f"There is not an alert for creating Policy Assignments in subscription {subscription_name}."
for alert_rule in activity_log_alerts:
if check_alert_rule(
alert_rule, "Microsoft.Authorization/policyAssignments/write"
):
report.status = "PASS"
report.resource_name = alert_rule.name
report.resource_id = alert_rule.id
report.subscription = subscription_name
report.status_extended = f"There is an alert configured for creating Policy Assignments in subscription {subscription_name}."
break
findings.append(report)
return findings

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_create_update_policy_assignment",
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update Network Security Group",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Description": "Create an Activity Log Alert for the Create or Update Network Security Group event.",
"Risk": "Monitoring for Create or Update Network Security Group events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/networkSecurityGroups/write and level=verbose --scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' --subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-update-network-security-group-rule-alert-in-use.html#trendmicro",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Network security groups. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Network Security Group (Microsoft.Network/networkSecurityGroups). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
}

View File

@@ -0,0 +1,32 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.monitor.lib.monitor_alerts import check_alert_rule
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
class monitor_alert_create_update_nsg(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for (
subscription_name,
activity_log_alerts,
) in monitor_client.alert_rules.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = subscription_name
report.resource_name = "Monitor"
report.resource_id = "Monitor"
report.status_extended = f"There is not an alert for creating/updating Network Security Groups in subscription {subscription_name}."
for alert_rule in activity_log_alerts:
if check_alert_rule(
alert_rule, "Microsoft.Network/networkSecurityGroups/write"
):
report.status = "PASS"
report.resource_name = alert_rule.name
report.resource_id = alert_rule.id
report.subscription = subscription_name
report.status_extended = f"There is an alert configured for creating/updating Network Security Groups in subscription {subscription_name}."
break
findings.append(report)
return findings

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_create_update_public_ip_address_rule",
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update Public IP Address rule",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Description": "Create an activity log alert for the Create or Update Public IP Addresses rule.",
"Risk": "Monitoring for Create or Update Public IP Address events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/publicIPAddresses/write and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-public-ip-alert.html#trendmicro",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Public IP addresses. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Public Ip Address (Microsoft.Network/publicIPAddresses). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
}

View File

@@ -0,0 +1,33 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.monitor.lib.monitor_alerts import check_alert_rule
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
class monitor_alert_create_update_public_ip_address_rule(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for (
subscription_name,
activity_log_alerts,
) in monitor_client.alert_rules.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = subscription_name
report.resource_name = "Monitor"
report.resource_id = "Monitor"
report.status_extended = f"There is not an alert for creating/updating Public IP address rule in subscription {subscription_name}."
for alert_rule in activity_log_alerts:
if check_alert_rule(
alert_rule, "Microsoft.Network/publicIPAddresses/write"
):
report.status = "PASS"
report.resource_name = alert_rule.name
report.resource_id = alert_rule.id
report.subscription = subscription_name
report.status_extended = f"There is an alert configured for creating/updating Public IP address rule in subscription {subscription_name}."
break
findings.append(report)
return findings

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_create_update_security_solution",
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update Security Solution",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Description": "Create an activity log alert for the Create or Update Security Solution event.",
"Risk": "Monitoring for Create or Update Security Solution events gives insight into changes to the active security solutions and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Security/securitySolutions/write and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-security-solution-alert.html#trendmicro",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Security Solutions (securitySolutions). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Security Solutions (Microsoft.Security/securitySolutions). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
}

View File

@@ -0,0 +1,32 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.monitor.lib.monitor_alerts import check_alert_rule
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
class monitor_alert_create_update_security_solution(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for (
subscription_name,
activity_log_alerts,
) in monitor_client.alert_rules.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = subscription_name
report.resource_name = "Monitor"
report.resource_id = "Monitor"
report.status_extended = f"There is not an alert for creating/updating Security Solution in subscription {subscription_name}."
for alert_rule in activity_log_alerts:
if check_alert_rule(
alert_rule, "Microsoft.Security/securitySolutions/write"
):
report.status = "PASS"
report.resource_name = alert_rule.name
report.resource_id = alert_rule.id
report.subscription = subscription_name
report.status_extended = f"There is an alert configured for creating/updating Security Solution in subscription {subscription_name}."
break
findings.append(report)
return findings

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_create_update_sqlserver_fr",
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update SQL Server Firewall Rule",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Description": "Create an activity log alert for the Create or Update SQL Server Firewall Rule event.",
"Risk": "Monitoring for Create or Update SQL Server Firewall Rule events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Sql/servers/firewallRules/write and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-or-delete-sql-server-firewall-rule-alert.html#trendmicro",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Server Firewall Rule (servers/firewallRules). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create/Update server firewall rule (Microsoft.Sql/servers/firewallRules). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
}

View File

@@ -0,0 +1,32 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.monitor.lib.monitor_alerts import check_alert_rule
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
class monitor_alert_create_update_sqlserver_fr(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for (
subscription_name,
activity_log_alerts,
) in monitor_client.alert_rules.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = subscription_name
report.resource_name = "Monitor"
report.resource_id = "Monitor"
report.status_extended = f"There is not an alert for creating/updating SQL Server firewall rule in subscription {subscription_name}."
for alert_rule in activity_log_alerts:
if check_alert_rule(
alert_rule, "Microsoft.Sql/servers/firewallRules/write"
):
report.status = "PASS"
report.resource_name = alert_rule.name
report.resource_id = alert_rule.id
report.subscription = subscription_name
report.status_extended = f"There is an alert configured for creating/updating SQL Server firewall rule in subscription {subscription_name}."
break
findings.append(report)
return findings

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_delete_nsg",
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Network Security Group",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Description": "Create an activity log alert for the Delete Network Security Group event.",
"Risk": "Monitoring for 'Delete Network Security Group' events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/networkSecurityGroups/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-network-security-group-rule-alert-in-use.html#trendmicro",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Network security groups. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete Network Security Group (Microsoft.Network/networkSecurityGroups). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
}

View File

@@ -0,0 +1,35 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.monitor.lib.monitor_alerts import check_alert_rule
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
class monitor_alert_delete_nsg(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for (
subscription_name,
activity_log_alerts,
) in monitor_client.alert_rules.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = subscription_name
report.resource_name = "Monitor"
report.resource_id = "Monitor"
report.status_extended = f"There is not an alert for deleting Network Security Groups in subscription {subscription_name}."
for alert_rule in activity_log_alerts:
if check_alert_rule(
alert_rule, "Microsoft.Network/networkSecurityGroups/delete"
) or check_alert_rule(
alert_rule, "Microsoft.ClassicNetwork/networkSecurityGroups/delete"
):
report.status = "PASS"
report.resource_name = alert_rule.name
report.resource_id = alert_rule.id
report.subscription = subscription_name
report.status_extended = f"There is an alert configured for deleting Network Security Groups in subscription {subscription_name}."
break
findings.append(report)
return findings

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_delete_policy_assignment",
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Policy Assignment",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Description": "Create an activity log alert for the Delete Policy Assignment event.",
"Risk": "Monitoring for delete policy assignment events gives insight into changes done in 'azure policy - assignments' and can reduce the time it takes to detect unsolicited changes.",
"RelatedUrl": "https://docs.microsoft.com/en-in/rest/api/monitor/activitylogalerts/createorupdate",
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Authorization/policyAssignments/delete and level=<verbose | information | warning | error | critical> --scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-policy-assignment-alert-in-use.html#trendmicro",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Policy assignment (policyAssignments). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete policy assignment (Microsoft.Authorization/policyAssignments). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
}

View File

@@ -0,0 +1,32 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.monitor.lib.monitor_alerts import check_alert_rule
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
class monitor_alert_delete_policy_assignment(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for (
subscription_name,
activity_log_alerts,
) in monitor_client.alert_rules.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = subscription_name
report.resource_name = "Monitor"
report.resource_id = "Monitor"
report.status_extended = f"There is not an alert for deleting policy assignment in subscription {subscription_name}."
for alert_rule in activity_log_alerts:
if check_alert_rule(
alert_rule, "Microsoft.Authorization/policyAssignments/delete"
):
report.status = "PASS"
report.resource_name = alert_rule.name
report.resource_id = alert_rule.id
report.subscription = subscription_name
report.status_extended = f"There is an alert configured for deleting policy assignment in subscription {subscription_name}."
break
findings.append(report)
return findings

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_delete_public_ip_address_rule",
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Public IP Address rule",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Description": "Create an activity log alert for the Delete Public IP Address rule.",
"Risk": "Monitoring for Delete Public IP Address events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/publicIPAddresses/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-public-ip-alert.html#trendmicro",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Public IP addresses. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete Public Ip Address (Microsoft.Network/publicIPAddresses). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
}

View File

@@ -0,0 +1,32 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.monitor.lib.monitor_alerts import check_alert_rule
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
class monitor_alert_delete_public_ip_address_rule(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for (
subscription_name,
activity_log_alerts,
) in monitor_client.alert_rules.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = subscription_name
report.resource_name = "Monitor"
report.resource_id = "Monitor"
report.status_extended = f"There is not an alert for deleting public IP address rule in subscription {subscription_name}."
for alert_rule in activity_log_alerts:
if check_alert_rule(
alert_rule, "Microsoft.Network/publicIPAddresses/delete"
):
report.status = "PASS"
report.resource_name = alert_rule.name
report.resource_id = alert_rule.id
report.subscription = subscription_name
report.status_extended = f"There is an alert configured for deleting public IP address rule in subscription {subscription_name}."
break
findings.append(report)
return findings

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_delete_security_solution",
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Security Solution",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Description": "Create an activity log alert for the Delete Security Solution event.",
"Risk": "Monitoring for Delete Security Solution events gives insight into changes to the active security solutions and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Security/securitySolutions/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-security-solution-alert.html#trendmicro",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Security Solutions (securitySolutions). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete Security Solutions (Microsoft.Security/securitySolutions). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.curitySolutions). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Security Solutions (Microsoft.Security/securitySolutions). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
}

View File

@@ -0,0 +1,32 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.monitor.lib.monitor_alerts import check_alert_rule
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
class monitor_alert_delete_security_solution(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for (
subscription_name,
activity_log_alerts,
) in monitor_client.alert_rules.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = subscription_name
report.resource_name = "Monitor"
report.resource_id = "Monitor"
report.status_extended = f"There is not an alert for deleting Security Solution in subscription {subscription_name}."
for alert_rule in activity_log_alerts:
if check_alert_rule(
alert_rule, "Microsoft.Security/securitySolutions/delete"
):
report.status = "PASS"
report.resource_name = alert_rule.name
report.resource_id = alert_rule.id
report.subscription = subscription_name
report.status_extended = f"There is an alert configured for deleting Security Solution in subscription {subscription_name}."
break
findings.append(report)
return findings

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_delete_sqlserver_fr",
"CheckTitle": "Ensure that Activity Log Alert exists for Delete SQL Server Firewall Rule",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Description": "Create an activity log alert for the 'Delete SQL Server Firewall Rule.'",
"Risk": "Monitoring for Delete SQL Server Firewall Rule events gives insight into SQL network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Sql/servers/firewallRules/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-or-delete-sql-server-firewall-rule-alert.html#trendmicro",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Server Firewall Rule (servers/firewallRules). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete server firewall rule (Microsoft.Sql/servers/firewallRules). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
}

View File

@@ -0,0 +1,32 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.monitor.lib.monitor_alerts import check_alert_rule
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
class monitor_alert_delete_sqlserver_fr(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for (
subscription_name,
activity_log_alerts,
) in monitor_client.alert_rules.items():
report = Check_Report_Azure(self.metadata())
report.status = "FAIL"
report.subscription = subscription_name
report.resource_name = "Monitor"
report.resource_id = "Monitor"
report.status_extended = f"There is not an alert for deleting SQL Server firewall rule in subscription {subscription_name}."
for alert_rule in activity_log_alerts:
if check_alert_rule(
alert_rule, "Microsoft.Sql/servers/firewallRules/delete"
):
report.status = "PASS"
report.resource_name = alert_rule.name
report.resource_id = alert_rule.id
report.subscription = subscription_name
report.status_extended = f"There is an alert configured for deleting SQL Server firewall rule in subscription {subscription_name}."
break
findings.append(report)
return findings

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "monitor_diagnostic_settings_exists",
"CheckTitle": "Ensure that a 'Diagnostic Setting' exists for Subscription Activity Logs ",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "Monitor",
"Description": "Enable Diagnostic settings for exporting activity logs. Diagnostic settings are available for each individual resource within a subscription. Settings should be configured for all appropriate resources for your environment.",
"Risk": "A diagnostic setting controls how a diagnostic log is exported. By default, logs are retained only for 90 days. Diagnostic settings should be defined so that logs can be exported and stored for a longer duration in order to analyze security activities within an Azure subscription.",
"RelatedUrl": "https://learn.microsoft.com/en-us/cli/azure/monitor/diagnostic-settings?view=azure-cli-latest",
"Remediation": {
"Code": {
"CLI": "az monitor diagnostic-settings subscription create --subscription <subscription id> --name <diagnostic settings name> --location <location> <[- -event-hub <event hub ID> --event-hub-auth-rule <event hub auth rule ID>] [-- storage-account <storage account ID>] [--workspace <log analytics workspace ID>] --logs '<JSON encoded categories>' (e.g. [{category:Security,enabled:true},{category:Administrative,enabled:true},{cat egory:Alert,enabled:true},{category:Policy,enabled:true}])",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/Monitor/subscription-activity-log-diagnostic-settings.html#trendmicro",
"Terraform": ""
},
"Recommendation": {
"Text": "To enable Diagnostic Settings on a Subscription: 1. Go to Monitor 2. Click on Activity Log 3. Click on Export Activity Logs 4. Click + Add diagnostic setting 5. Enter a Diagnostic setting name 6. Select Categories for the diagnostic settings 7. Select the appropriate Destination details (this may be Log Analytics, Storage Account, Event Hub, or Partner solution) 8. Click Save To enable Diagnostic Settings on a specific resource: 1. Go to Monitor 2. Click Diagnostic settings 3. Click on the resource that has a diagnostics status of disabled 4. Select Add Diagnostic Setting 5. Enter a Diagnostic setting name 6. Select the appropriate log, metric, and destination. (this may be Log Analytics, Storage Account, Event Hub, or Partner solution) 7. Click save Repeat these step for all resources as needed.",
"Url": "https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitoring-overview-activity-logs#export-the-activity-log-with-a-log-profile"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, diagnostic setting is not set."
}

View File

@@ -0,0 +1,27 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.monitor.monitor_client import monitor_client
class monitor_diagnostic_settings_exists(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for (
subscription_name,
diagnostic_settings,
) in monitor_client.diagnostics_settings.items():
report = Check_Report_Azure(self.metadata())
report.subscription = subscription_name
report.status = "FAIL"
report.status_extended = (
f"No diagnostic settings found in subscription {subscription_name}."
)
if diagnostic_settings:
report.status = "PASS"
report.status_extended = (
f"Diagnostic settings found in subscription {subscription_name}."
)
findings.append(report)
return findings

View File

@@ -1,7 +1,7 @@
from dataclasses import dataclass
from azure.mgmt.monitor import MonitorManagementClient
from azure.mgmt.monitor.models import LogSettings
from azure.mgmt.monitor.models import AlertRuleAllOfCondition, LogSettings
from prowler.lib.logger import logger
from prowler.providers.azure.azure_provider import AzureProvider
@@ -14,34 +14,69 @@ class Monitor(AzureService):
super().__init__(MonitorManagementClient, provider)
self.diagnostics_settings = self.__get_diagnostics_settings__()
self.alert_rules = self.get_alert_rules()
def __get_diagnostics_settings__(self):
logger.info("Monitor - Getting diagnostics settings...")
diagnostics_settings_list = []
diagnostics_settings = {}
for subscription, client in self.clients.items():
try:
diagnostics_settings.update({subscription: []})
settings = client.diagnostic_settings.list(
resource_uri=f"subscriptions/{self.subscriptions[subscription]}/"
diagnostics_settings_list = self.diagnostic_settings_with_uri(
subscription,
f"subscriptions/{self.subscriptions[subscription]}/",
client,
)
for setting in settings:
diagnostics_settings[subscription].append(
DiagnosticSetting(
id=setting.id,
storage_account_name=setting.storage_account_id.split("/")[
-1
],
logs=setting.logs,
storage_account_id=setting.storage_account_id,
)
)
diagnostics_settings.update({subscription: diagnostics_settings_list})
except Exception as error:
logger.error(
f"Subscription name: {subscription} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return diagnostics_settings
def diagnostic_settings_with_uri(self, subscription, uri, client):
diagnostics_settings = []
try:
settings = client.diagnostic_settings.list(resource_uri=uri)
for setting in settings:
diagnostics_settings.append(
DiagnosticSetting(
id=setting.id,
name=setting.id.split("/")[-1],
storage_account_name=setting.storage_account_id.split("/")[-1],
logs=setting.logs,
storage_account_id=setting.storage_account_id,
)
)
except Exception as error:
logger.error(
f"Subscription name: {subscription} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return diagnostics_settings
def get_alert_rules(self):
logger.info("Monitor - Getting alert rules...")
alert_rules = {}
for subscription, client in self.clients.items():
try:
alert_rules.update({subscription: []})
rules = client.activity_log_alerts.list_by_subscription_id()
for rule in rules:
alert_rules[subscription].append(
AlertRule(
id=rule.id,
name=rule.name,
condition=rule.condition,
enabled=rule.enabled,
description=rule.description,
)
)
except Exception as error:
logger.error(
f"Subscription name: {subscription} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return alert_rules
@dataclass
class DiagnosticSetting:
@@ -49,3 +84,13 @@ class DiagnosticSetting:
storage_account_id: str
storage_account_name: str
logs: LogSettings
name: str
@dataclass
class AlertRule:
id: str
name: str
condition: AlertRuleAllOfCondition
enabled: bool
description: str

View File

@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "network_flow_log_captured_sent",
"CheckTitle": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
"CheckType": [],
"ServiceName": "network",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Network",
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
"Risk": "Network Flow Logs provide valuable insight into the flow of traffic around your network and feed into both Azure Monitor and Azure Sentinel (if in use), permitting the generation of visual flow diagrams to aid with analyzing for lateral movement, etc.",
"RelatedUrl": "https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-4-enable-network-logging-for-security-investigation",
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Navigate to Network Watcher. 2. Select NSG flow logs. 3. Select + Create. 4. Select the desired Subscription. 5. Select + Select NSG. 6. Select a network security group. 7. Click Confirm selection. 8. Select or create a new Storage Account. 9. Input the retention in days to retain the log. 10. Click Next. 11. Under Configuration, select Version 2. 12. If rich analytics are required, select Enable Traffic Analytics, a processing interval, and a Log Analytics Workspace. 13. Select Next. 14. Optionally add Tags. 15. Select Review + create. 16. Select Create. Warning The remediation policy creates remediation deployment and names them by concatenating the subscription name and the resource group name. The MAXIMUM permitted length of a deployment name is 64 characters. Exceeding this will cause the remediation task to fail.",
"Url": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-portal"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace. This will increase the amount of data stored and used by Azure Monitor."
}

View File

@@ -0,0 +1,27 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.network.network_client import network_client
class network_flow_log_captured_sent(Check):
def execute(self) -> Check_Report_Azure:
findings = []
for subscription, network_watchers in network_client.network_watchers.items():
for network_watcher in network_watchers:
report = Check_Report_Azure(self.metadata())
report.subscription = subscription
report.resource_name = network_watcher.name
report.resource_id = network_watcher.id
report.status = "FAIL"
report.status_extended = f"Network Watcher {network_watcher.name} from subscription {subscription} has no flow logs"
if network_watcher.flow_logs:
report.status = "FAIL"
report.status_extended = f"Network Watcher {network_watcher.name} from subscription {subscription} has flow logs disabled"
for flow_log in network_watcher.flow_logs:
if flow_log.enabled:
report.status = "PASS"
report.status_extended = f"Network Watcher {network_watcher.name} from subscription {subscription} has flow logs that are captured and sent to Log Analytics workspace"
break
findings.append(report)
return findings

View File

@@ -31,6 +31,7 @@ class GCPService:
)
# Only project ids that have their API enabled will be scanned
self.project_ids = self.__is_api_active__(provider.project_ids)
self.audit_config = provider.audit_config
def __get_client__(self):
return self.client

View File

@@ -0,0 +1,34 @@
{
"Provider": "compute",
"CheckID": "compute_public_address_shodan",
"CheckTitle": "Check if any of the Public Addresses are in Shodan (requires Shodan API KEY).",
"CheckType": [
"Infrastructure Security"
],
"ServiceName": "compute",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "GCPComputeAddress",
"Description": "Check if any of the Public Addresses are in Shodan (requires Shodan API KEY).",
"Risk": "Sites like Shodan index exposed systems and further expose them to wider audiences as a quick way to find exploitable systems.",
"RelatedUrl": "",
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "",
"Terraform": ""
},
"Recommendation": {
"Text": "Check Identified IPs; consider changing them to private ones and delete them from Shodan.",
"Url": "https://www.shodan.io/"
}
},
"Categories": [
"internet-exposed"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
}

View File

@@ -0,0 +1,40 @@
import shodan
from prowler.lib.check.models import Check, Check_Report_GCP
from prowler.lib.logger import logger
from prowler.providers.gcp.services.compute.compute_client import compute_client
class compute_public_address_shodan(Check):
def execute(self):
findings = []
shodan_api_key = compute_client.audit_config.get("shodan_api_key")
if shodan_api_key:
api = shodan.Shodan(shodan_api_key)
for address in compute_client.addresses:
if address.type == "EXTERNAL":
report = Check_Report_GCP(self.metadata())
report.project_id = address.project_id
report.resource_id = address.id
report.location = address.region
try:
shodan_info = api.host(address.ip)
report.status = "FAIL"
report.status_extended = f"Public Address {address.ip} listed in Shodan with open ports {str(shodan_info['ports'])} and ISP {shodan_info['isp']} in {shodan_info['country_name']}. More info at https://www.shodan.io/host/{address.ip}."
findings.append(report)
except shodan.APIError as error:
if "No information available for that IP" in error.value:
report.status = "PASS"
report.status_extended = (
f"Public Address {address.ip} is not listed in Shodan."
)
findings.append(report)
continue
else:
logger.error(f"Unknown Shodan API Error: {error.value}")
else:
logger.error(
"No Shodan API Key -- Please input a Shodan API Key with -N/--shodan or in config.yaml"
)
return findings

View File

@@ -14,6 +14,7 @@ class Compute(GCPService):
self.instances = []
self.networks = []
self.subnets = []
self.addresses = []
self.firewalls = []
self.projects = []
self.load_balancers = []
@@ -26,6 +27,7 @@ class Compute(GCPService):
self.__get_networks__()
self.__threading_call__(self.__get_subnetworks__, self.regions)
self.__get_firewalls__()
self.__threading_call__(self.__get_addresses__, self.regions)
def __get_regions__(self):
for project_id in self.project_ids:
@@ -198,6 +200,36 @@ class Compute(GCPService):
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __get_addresses__(self, region):
for project_id in self.project_ids:
try:
request = self.client.addresses().list(
project=project_id, region=region
)
while request is not None:
response = request.execute(
http=self.__get_AuthorizedHttp_client__()
)
for address in response.get("items", []):
self.addresses.append(
Address(
name=address["name"],
id=address["id"],
project_id=project_id,
type=address.get("addressType", "EXTERNAL"),
ip=address["address"],
region=region,
)
)
request = self.client.subnetworks().list_next(
previous_request=request, previous_response=response
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __get_firewalls__(self):
for project_id in self.project_ids:
try:
@@ -298,6 +330,15 @@ class Subnet(BaseModel):
region: str
class Address(BaseModel):
name: str
id: str
ip: str
type: str
project_id: str
region: str
class Firewall(BaseModel):
name: str
id: str

View File

@@ -22,7 +22,7 @@ packages = [
{include = "prowler"}
]
readme = "README.md"
version = "3.14.0"
version = "3.15.0"
[tool.poetry.dependencies]
alive-progress = "3.1.5"
@@ -44,12 +44,12 @@ azure-mgmt-sql = "3.0.1"
azure-mgmt-storage = "21.1.0"
azure-mgmt-subscription = "3.1.1"
azure-mgmt-web = "7.2.0"
azure-storage-blob = "12.19.0"
azure-storage-blob = "12.19.1"
boto3 = "1.26.165"
botocore = "1.29.165"
colorama = "0.4.6"
detect-secrets = "1.4.0"
google-api-python-client = "2.120.0"
google-api-python-client = "2.122.0"
google-auth-httplib2 = ">=0.1,<0.3"
jsonschema = "4.21.1"
kubernetes = "28.1.0"
@@ -64,7 +64,7 @@ slack-sdk = "3.27.1"
tabulate = "0.9.0"
[tool.poetry.group.dev.dependencies]
bandit = "1.7.7"
bandit = "1.7.8"
black = "24.2.0"
coverage = "7.4.3"
docker = "7.0.0"
@@ -75,7 +75,7 @@ moto = {extras = ["all"], version = "5.0.3"}
openapi-schema-validator = "0.6.2"
openapi-spec-validator = "0.7.1"
pylint = "3.1.0"
pytest = "8.0.2"
pytest = "8.1.1"
pytest-cov = "4.1.0"
pytest-env = "1.1.3"
pytest-randomly = "3.15.0"

View File

@@ -485,7 +485,7 @@ class Test_cloudwatch_changes_to_network_route_tables_alarm_configured:
logs_client.put_metric_filter(
logGroupName="/log-group/test",
filterName="test-filter",
filterPattern='{($.eventSource = ec2.amazonaws.com) && ($.eventName = CreateRoute) ||\n ($.eventName = "CreateRouteTable") ||\n ($.eventName = "ReplaceRoute") ||\n ($.eventName = "ReplaceRouteTableAssociation")||\n ($.eventName = "DeleteRouteTable") ||\n ($.eventName = "DeleteRoute") ||\n ($.eventName = "DisassociateRouteTable") }',
filterPattern="{($.eventSource = ec2.amazonaws.com) && (($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable)) }",
metricTransformations=[
{
"metricName": "my-metric",

View File

@@ -119,9 +119,9 @@ class Test_glue_data_catalogs_connection_passwords_encryption_enabled:
password_kms_id=None,
)
]
glue_client.audited_account = AWS_ACCOUNT_NUMBER
glue_client.audited_partition = AWS_COMMERCIAL_PARTITION
glue_client.region = AWS_REGION_US_EAST_1
glue_client.audited_account = AWS_ACCOUNT_NUMBER
glue_client.data_catalog_arn_template = f"arn:{glue_client.audited_partition}:glue:{glue_client.region}:{glue_client.audited_account}:data-catalog"
glue_client.__get_data_catalog_arn_template__ = mock.MagicMock(
return_value=glue_client.data_catalog_arn_template
@@ -161,9 +161,9 @@ class Test_glue_data_catalogs_connection_passwords_encryption_enabled:
password_kms_id="kms-key",
)
]
glue_client.audited_account = AWS_ACCOUNT_NUMBER
glue_client.audited_partition = "aws"
glue_client.audited_partition = AWS_COMMERCIAL_PARTITION
glue_client.region = AWS_REGION_US_EAST_1
glue_client.audited_account = AWS_ACCOUNT_NUMBER
glue_client.data_catalog_arn_template = f"arn:{glue_client.audited_partition}:glue:{glue_client.region}:{glue_client.audited_account}:data-catalog"
glue_client.__get_data_catalog_arn_template__ = mock.MagicMock(
return_value=glue_client.data_catalog_arn_template

View File

@@ -43,9 +43,9 @@ class Test_glue_data_catalogs_metadata_encryption_enabled:
password_kms_id=None,
)
]
glue_client.region = AWS_REGION_US_EAST_1
glue_client.audited_account = AWS_ACCOUNT_NUMBER
glue_client.audited_partition = AWS_COMMERCIAL_PARTITION
glue_client.region = AWS_REGION_US_EAST_1
glue_client.data_catalog_arn_template = f"arn:{glue_client.audited_partition}:glue:{glue_client.region}:{glue_client.audited_account}:data-catalog"
glue_client.__get_data_catalog_arn_template__ = mock.MagicMock(
return_value=glue_client.data_catalog_arn_template
@@ -85,10 +85,10 @@ class Test_glue_data_catalogs_metadata_encryption_enabled:
password_kms_id=None,
)
]
glue_client.audited_account = AWS_ACCOUNT_NUMBER
glue_client.provider._scan_unused_services = False
glue_client.audited_partition = AWS_COMMERCIAL_PARTITION
glue_client.region = AWS_REGION_US_EAST_1
glue_client.audited_account = AWS_ACCOUNT_NUMBER
glue_client.audited_partition = AWS_COMMERCIAL_PARTITION
glue_client.data_catalog_arn_template = f"arn:{glue_client.audited_partition}:glue:{glue_client.region}:{glue_client.audited_account}:data-catalog"
glue_client.__get_data_catalog_arn_template__ = mock.MagicMock(
return_value=glue_client.data_catalog_arn_template
@@ -120,10 +120,10 @@ class Test_glue_data_catalogs_metadata_encryption_enabled:
password_kms_id=None,
)
]
glue_client.audited_account = AWS_ACCOUNT_NUMBER
glue_client.provider._scan_unused_services = False
glue_client.audited_partition = AWS_COMMERCIAL_PARTITION
glue_client.region = AWS_REGION_US_EAST_1
glue_client.audited_account = AWS_ACCOUNT_NUMBER
glue_client.audited_partition = AWS_COMMERCIAL_PARTITION
glue_client.data_catalog_arn_template = f"arn:{glue_client.audited_partition}:glue:{glue_client.region}:{glue_client.audited_account}:data-catalog"
glue_client.__get_data_catalog_arn_template__ = mock.MagicMock(
return_value=glue_client.data_catalog_arn_template
@@ -163,9 +163,9 @@ class Test_glue_data_catalogs_metadata_encryption_enabled:
password_kms_id=None,
)
]
glue_client.region = AWS_REGION_US_EAST_1
glue_client.audited_account = AWS_ACCOUNT_NUMBER
glue_client.audited_partition = AWS_COMMERCIAL_PARTITION
glue_client.region = AWS_REGION_US_EAST_1
glue_client.data_catalog_arn_template = f"arn:{glue_client.audited_partition}:glue:{glue_client.region}:{glue_client.audited_account}:data-catalog"
glue_client.__get_data_catalog_arn_template__ = mock.MagicMock(
return_value=glue_client.data_catalog_arn_template

View File

@@ -18,10 +18,6 @@ class Test_iam_user_mfa_enabled_console_access_test:
@mock_aws
def test_root_user_not_password_console_enabled(self):
iam_client = client("iam")
user = "test-user"
arn = iam_client.create_user(UserName=user)["User"]["Arn"]
from prowler.providers.aws.services.iam.iam_service import IAM
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
@@ -36,18 +32,37 @@ class Test_iam_user_mfa_enabled_console_access_test:
iam_user_mfa_enabled_console_access,
)
service_client.credential_report[0]["password_enabled"] = "not_supported"
service_client.credential_report = [
{
"user": "<root_account>",
"arn": f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root",
"user_creation_time": "2022-02-17T14:59:38+00:00",
"password_enabled": "not_supported",
"password_last_used": "2023-05-22T09:52:24+00:00",
"password_last_changed": "not_supported",
"password_next_rotation": "not_supported",
"mfa_active": "true",
"access_key_1_active": "false",
"access_key_1_last_rotated": "N/A",
"access_key_1_last_used_date": "N/A",
"access_key_1_last_used_region": "N/A",
"access_key_1_last_used_service": "N/A",
"access_key_2_active": "false",
"access_key_2_last_rotated": "N/A",
"access_key_2_last_used_date": "N/A",
"access_key_2_last_used_region": "N/A",
"access_key_2_last_used_service": "N/A",
"cert_1_active": "false",
"cert_1_last_rotated": "N/A",
"cert_2_active": "false",
"cert_2_last_rotated": "N/A",
}
]
check = iam_user_mfa_enabled_console_access()
result = check.execute()
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"User {user} does not have Console Password enabled."
)
assert result[0].resource_id == user
assert result[0].resource_arn == arn
assert len(result) == 0
@mock_aws
def test_user_not_password_console_enabled(self):

View File

@@ -9,7 +9,7 @@ from tests.providers.azure.azure_fixtures import (
)
def mock_app_get_apps(self):
def mock_app_get_apps(_):
return {
AZURE_SUBSCRIPTION_ID: {
"app_id-1": WebApp(

View File

@@ -0,0 +1,100 @@
from unittest import mock
from uuid import uuid4
from prowler.providers.azure.services.entra.entra_service import AuthorizationPolicy
from tests.providers.azure.azure_fixtures import set_mocked_azure_provider
class Test_entra_policy_ensure_default_user_cannot_create_apps:
def test_entra_no_authorization_policy(self):
entra_client = mock.MagicMock
entra_client.authorization_policy = {}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_tenants.entra_policy_ensure_default_user_cannot_create_tenants.entra_client",
new=entra_client,
):
from prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_tenants.entra_policy_ensure_default_user_cannot_create_tenants import (
entra_policy_ensure_default_user_cannot_create_tenants,
)
check = entra_policy_ensure_default_user_cannot_create_tenants()
result = check.execute()
assert len(result) == 0
def test_entra_default_user_role_permissions_not_allowed_to_create_apps(self):
id = str(uuid4())
entra_client = mock.MagicMock
entra_client.authorization_policy = {
"test.com": AuthorizationPolicy(
id=id,
name="Test",
description="Test",
default_user_role_permissions=mock.MagicMock(
allowed_to_create_apps=False
),
)
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_apps.entra_policy_ensure_default_user_cannot_create_apps.entra_client",
new=entra_client,
):
from prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_apps.entra_policy_ensure_default_user_cannot_create_apps import (
entra_policy_ensure_default_user_cannot_create_apps,
)
check = entra_policy_ensure_default_user_cannot_create_apps()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "App creation is disabled for non-admin users."
)
assert result[0].resource_name == "Test"
assert result[0].resource_id == id
assert result[0].subscription == "Tenant: 'test.com'"
def test_entra_default_user_role_permissions_allowed_to_create_apps(self):
id = str(uuid4())
entra_client = mock.MagicMock
entra_client.authorization_policy = {
"test.com": AuthorizationPolicy(
id=id,
name="Test",
description="Test",
default_user_role_permissions=mock.MagicMock(
allowed_to_create_apps=True
),
)
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_apps.entra_policy_ensure_default_user_cannot_create_apps.entra_client",
new=entra_client,
):
from prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_apps.entra_policy_ensure_default_user_cannot_create_apps import (
entra_policy_ensure_default_user_cannot_create_apps,
)
check = entra_policy_ensure_default_user_cannot_create_apps()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "App creation is not disabled for non-admin users."
)
assert result[0].resource_name == "Test"
assert result[0].resource_id == id
assert result[0].subscription == "Tenant: 'test.com'"

View File

@@ -0,0 +1,100 @@
from unittest import mock
from uuid import uuid4
from prowler.providers.azure.services.entra.entra_service import AuthorizationPolicy
from tests.providers.azure.azure_fixtures import set_mocked_azure_provider
class Test_entra_policy_ensure_default_user_cannot_create_tenants:
def test_entra_no_authorization_policy(self):
entra_client = mock.MagicMock
entra_client.authorization_policy = {}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_tenants.entra_policy_ensure_default_user_cannot_create_tenants.entra_client",
new=entra_client,
):
from prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_tenants.entra_policy_ensure_default_user_cannot_create_tenants import (
entra_policy_ensure_default_user_cannot_create_tenants,
)
check = entra_policy_ensure_default_user_cannot_create_tenants()
result = check.execute()
assert len(result) == 0
def test_entra_default_user_role_permissions_not_allowed_to_create_tenants(self):
id = str(uuid4())
entra_client = mock.MagicMock
entra_client.authorization_policy = {
"test.omnimicrosoft.com": AuthorizationPolicy(
id=id,
name="Test",
description="Test",
default_user_role_permissions=mock.MagicMock(
allowed_to_create_tenants=False
),
)
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_tenants.entra_policy_ensure_default_user_cannot_create_tenants.entra_client",
new=entra_client,
):
from prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_tenants.entra_policy_ensure_default_user_cannot_create_tenants import (
entra_policy_ensure_default_user_cannot_create_tenants,
)
check = entra_policy_ensure_default_user_cannot_create_tenants()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Tenants creation is disabled for non-admin users."
)
assert result[0].resource_name == "Test"
assert result[0].resource_id == id
assert result[0].subscription == "Tenant: 'test.omnimicrosoft.com'"
def test_entra_default_user_role_permissions_allowed_to_create_tenants(self):
id = str(uuid4())
entra_client = mock.MagicMock
entra_client.authorization_policy = {
"test.omnimicrosoft.com": AuthorizationPolicy(
id=id,
name="Test",
description="Test",
default_user_role_permissions=mock.MagicMock(
allowed_to_create_tenants=True
),
)
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_tenants.entra_policy_ensure_default_user_cannot_create_tenants.entra_client",
new=entra_client,
):
from prowler.providers.azure.services.entra.entra_policy_ensure_default_user_cannot_create_tenants.entra_policy_ensure_default_user_cannot_create_tenants import (
entra_policy_ensure_default_user_cannot_create_tenants,
)
check = entra_policy_ensure_default_user_cannot_create_tenants()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "Tenants creation is not disabled for non-admin users."
)
assert result[0].resource_name == "Test"
assert result[0].resource_id == id
assert result[0].subscription == "Tenant: 'test.omnimicrosoft.com'"

View File

@@ -0,0 +1,57 @@
from unittest.mock import patch
from prowler.providers.azure.models import AzureIdentityInfo
from prowler.providers.azure.services.entra.entra_service import (
AuthorizationPolicy,
Entra,
User,
)
from tests.providers.azure.azure_fixtures import DOMAIN, set_mocked_azure_provider
async def mock_entra_get_users(_):
return {
"user-1@tenant1.es": User(id="id-1", name="User 1"),
}
async def mock_entra_get_authorization_policy(_):
return AuthorizationPolicy(
id="id-1",
name="Name 1",
description="Description 1",
default_user_role_permissions=None,
)
@patch(
"prowler.providers.azure.services.entra.entra_service.Entra.__get_users__",
new=mock_entra_get_users,
)
@patch(
"prowler.providers.azure.services.entra.entra_service.Entra.__get_authorization_policy__",
new=mock_entra_get_authorization_policy,
)
class Test_Entra_Service:
def test__get_client__(self):
entra_client = Entra(
set_mocked_azure_provider(identity=AzureIdentityInfo(tenant_domain=DOMAIN))
)
assert entra_client.clients[DOMAIN].__class__.__name__ == "GraphServiceClient"
def test__get_subscriptions__(self):
entra_client = Entra(set_mocked_azure_provider())
assert entra_client.subscriptions.__class__.__name__ == "dict"
def test__get_users__(self):
entra_client = Entra(set_mocked_azure_provider())
assert len(entra_client.users) == 1
assert entra_client.users["user-1@tenant1.es"].id == "id-1"
assert entra_client.users["user-1@tenant1.es"].name == "User 1"
def test__get_authorization_policy__(self):
entra_client = Entra(set_mocked_azure_provider())
assert entra_client.authorization_policy.id == "id-1"
assert entra_client.authorization_policy.name == "Name 1"
assert entra_client.authorization_policy.description == "Description 1"
assert not entra_client.authorization_policy.default_user_role_permissions

View File

@@ -3,7 +3,6 @@ from uuid import uuid4
from azure.mgmt.keyvault.v2023_07_01.models import KeyAttributes, VaultProperties
from prowler.providers.azure.services.keyvault.keyvault_service import Key, KeyVaultInfo
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
@@ -32,21 +31,6 @@ class Test_keyvault_key_expiration_set_in_non_rbac:
def test_no_keys(self):
keyvault_client = mock.MagicMock
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=False
),
keys=[],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -58,7 +42,27 @@ class Test_keyvault_key_expiration_set_in_non_rbac:
from prowler.providers.azure.services.keyvault.keyvault_key_expiration_set_in_non_rbac.keyvault_key_expiration_set_in_non_rbac import (
keyvault_key_expiration_set_in_non_rbac,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[],
secrets=[],
)
]
}
check = keyvault_key_expiration_set_in_non_rbac()
result = check.execute()
assert len(result) == 0
@@ -68,28 +72,6 @@ class Test_keyvault_key_expiration_set_in_non_rbac:
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
key_name = "Key Name"
key = Key(
id="id",
name=key_name,
enabled=True,
location="location",
attributes=KeyAttributes(expires=None, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=False
),
keys=[key],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -101,7 +83,35 @@ class Test_keyvault_key_expiration_set_in_non_rbac:
from prowler.providers.azure.services.keyvault.keyvault_key_expiration_set_in_non_rbac.keyvault_key_expiration_set_in_non_rbac import (
keyvault_key_expiration_set_in_non_rbac,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
Key,
KeyVaultInfo,
)
key = Key(
id="id",
name=key_name,
enabled=True,
location="location",
attributes=KeyAttributes(expires=None, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[key],
secrets=[],
)
]
}
check = keyvault_key_expiration_set_in_non_rbac()
result = check.execute()
assert len(result) == 1
@@ -118,28 +128,6 @@ class Test_keyvault_key_expiration_set_in_non_rbac:
keyvault_client = mock.MagicMock
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
key = Key(
id="id",
name="name",
enabled=True,
location="location",
attributes=KeyAttributes(expires=49394, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=False
),
keys=[key],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -151,7 +139,35 @@ class Test_keyvault_key_expiration_set_in_non_rbac:
from prowler.providers.azure.services.keyvault.keyvault_key_expiration_set_in_non_rbac.keyvault_key_expiration_set_in_non_rbac import (
keyvault_key_expiration_set_in_non_rbac,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
Key,
KeyVaultInfo,
)
key = Key(
id="id",
name="name",
enabled=True,
location="location",
attributes=KeyAttributes(expires=49394, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[key],
secrets=[],
)
]
}
check = keyvault_key_expiration_set_in_non_rbac()
result = check.execute()
assert len(result) == 1

View File

@@ -3,7 +3,6 @@ from unittest import mock
from azure.keyvault.keys import KeyRotationLifetimeAction, KeyRotationPolicy
from azure.mgmt.keyvault.v2023_07_01.models import KeyAttributes, VaultProperties
from prowler.providers.azure.services.keyvault.keyvault_service import Key, KeyVaultInfo
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
@@ -32,21 +31,6 @@ class Test_keyvault_key_rotation_enabled:
def test_no_keys(self):
keyvault_client = mock.MagicMock
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=False
),
keys=[],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -58,7 +42,27 @@ class Test_keyvault_key_rotation_enabled:
from prowler.providers.azure.services.keyvault.keyvault_key_rotation_enabled.keyvault_key_rotation_enabled import (
keyvault_key_rotation_enabled,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[],
secrets=[],
)
]
}
check = keyvault_key_rotation_enabled()
result = check.execute()
assert len(result) == 0
@@ -67,30 +71,6 @@ class Test_keyvault_key_rotation_enabled:
keyvault_client = mock.MagicMock
keyvault_name = "keyvault_name"
key_name = "key_name"
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=False
),
keys=[
Key(
id="id",
name=key_name,
enabled=True,
location="location",
attributes=KeyAttributes(expires=None, enabled=True),
rotation_policy=None,
)
],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -102,7 +82,37 @@ class Test_keyvault_key_rotation_enabled:
from prowler.providers.azure.services.keyvault.keyvault_key_rotation_enabled.keyvault_key_rotation_enabled import (
keyvault_key_rotation_enabled,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
Key,
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[
Key(
id="id",
name=key_name,
enabled=True,
location="location",
attributes=KeyAttributes(expires=None, enabled=True),
rotation_policy=None,
)
],
secrets=[],
)
]
}
check = keyvault_key_rotation_enabled()
result = check.execute()
assert len(result) == 1
@@ -119,38 +129,6 @@ class Test_keyvault_key_rotation_enabled:
keyvault_client = mock.MagicMock
keyvault_name = "keyvault_name"
key_name = "key_name"
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=False
),
keys=[
Key(
id="id",
name=key_name,
enabled=True,
location="location",
attributes=KeyAttributes(expires=None, enabled=True),
rotation_policy=KeyRotationPolicy(
lifetime_actions=[
KeyRotationLifetimeAction(
action="Rotate",
lifetime_action_type="Rotate",
lifetime_percentage=80,
)
]
),
)
],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -162,7 +140,45 @@ class Test_keyvault_key_rotation_enabled:
from prowler.providers.azure.services.keyvault.keyvault_key_rotation_enabled.keyvault_key_rotation_enabled import (
keyvault_key_rotation_enabled,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
Key,
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[
Key(
id="id",
name=key_name,
enabled=True,
location="location",
attributes=KeyAttributes(expires=None, enabled=True),
rotation_policy=KeyRotationPolicy(
lifetime_actions=[
KeyRotationLifetimeAction(
action="Rotate",
lifetime_action_type="Rotate",
lifetime_percentage=80,
)
]
),
)
],
secrets=[],
)
]
}
check = keyvault_key_rotation_enabled()
result = check.execute()
assert len(result) == 1

View File

@@ -0,0 +1,167 @@
from unittest import mock
from azure.mgmt.keyvault.v2023_07_01.models import VaultProperties
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
class Test_keyvault_logging_enabled:
def test_keyvault_logging_enabled(self):
keyvault_client = mock.MagicMock
keyvault_client.key_vaults = {}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_service.Monitor",
new=mock.MagicMock(),
), mock.patch(
"prowler.providers.azure.services.keyvault.keyvault_logging_enabled.keyvault_logging_enabled.keyvault_client",
new=keyvault_client,
):
from prowler.providers.azure.services.keyvault.keyvault_logging_enabled.keyvault_logging_enabled import (
keyvault_logging_enabled,
)
check = keyvault_logging_enabled()
result = check.execute()
assert len(result) == 0
def test_no_diagnostic_settings(self):
keyvault_client = mock.MagicMock
keyvault_client.key_vaults = {AZURE_SUBSCRIPTION_ID: []}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_service.Monitor",
new=mock.MagicMock(),
), mock.patch(
"prowler.providers.azure.services.keyvault.keyvault_logging_enabled.keyvault_logging_enabled.keyvault_client",
new=keyvault_client,
):
from prowler.providers.azure.services.keyvault.keyvault_logging_enabled.keyvault_logging_enabled import (
keyvault_logging_enabled,
)
check = keyvault_logging_enabled()
result = check.execute()
assert len(result) == 0
def test_diagnostic_settings_configured(self):
keyvault_client = mock.MagicMock
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_service.Monitor",
new=mock.MagicMock(),
), mock.patch(
"prowler.providers.azure.services.keyvault.keyvault_logging_enabled.keyvault_logging_enabled.keyvault_client",
new=keyvault_client,
):
from prowler.providers.azure.services.keyvault.keyvault_logging_enabled.keyvault_logging_enabled import (
keyvault_logging_enabled,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
)
from prowler.providers.azure.services.monitor.monitor_service import (
DiagnosticSetting,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name_keyvault",
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[],
secrets=[],
monitor_diagnostic_settings=[
DiagnosticSetting(
id="id/id",
logs=[
mock.MagicMock(
category_group="audit",
category="None",
enabled=True,
),
mock.MagicMock(
category_group="allLogs",
category="None",
enabled=False,
),
],
storage_account_name="storage_account_name",
storage_account_id="storage_account_id",
name="name_diagnostic_setting",
),
],
),
KeyVaultInfo(
id="id2",
name="name_keyvault2",
location="location2",
resource_group="resource_group2",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[],
secrets=[],
monitor_diagnostic_settings=[
DiagnosticSetting(
id="id2/id2",
logs=[
mock.MagicMock(
category_group="audit",
category="None",
enabled=True,
),
mock.MagicMock(
category_group="allLogs",
category="None",
enabled=True,
),
],
storage_account_name="storage_account_name2",
storage_account_id="storage_account_id2",
name="name_diagnostic_setting2",
),
],
),
]
}
check = keyvault_logging_enabled()
result = check.execute()
assert len(result) == 2
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "name_diagnostic_setting"
assert result[0].resource_id == "id/id"
assert (
result[0].status_extended
== f"Diagnostic setting name_diagnostic_setting for Key Vault name_keyvault in subscription {AZURE_SUBSCRIPTION_ID} does not have audit logging."
)
assert result[1].status == "PASS"
assert result[1].subscription == AZURE_SUBSCRIPTION_ID
assert result[1].resource_name == "name_diagnostic_setting2"
assert result[1].resource_id == "id2/id2"
assert (
result[1].status_extended
== f"Diagnostic setting name_diagnostic_setting2 for Key Vault name_keyvault2 in subscription {AZURE_SUBSCRIPTION_ID} has audit logging."
)

View File

@@ -3,10 +3,6 @@ from uuid import uuid4
from azure.mgmt.keyvault.v2023_07_01.models import SecretAttributes, VaultProperties
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
Secret,
)
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
@@ -35,21 +31,6 @@ class Test_keyvault_non_rbac_secret_expiration_set:
def test_no_secrets(self):
keyvault_client = mock.MagicMock
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=False
),
keys=[],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -61,6 +42,27 @@ class Test_keyvault_non_rbac_secret_expiration_set:
from prowler.providers.azure.services.keyvault.keyvault_non_rbac_secret_expiration_set.keyvault_non_rbac_secret_expiration_set import (
keyvault_non_rbac_secret_expiration_set,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[],
secrets=[],
)
]
}
check = keyvault_non_rbac_secret_expiration_set()
result = check.execute()
@@ -71,28 +73,6 @@ class Test_keyvault_non_rbac_secret_expiration_set:
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
secret_name = "Secret"
secret = Secret(
id="id",
name=secret_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=None, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=False
),
keys=[],
secrets=[secret],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -104,7 +84,35 @@ class Test_keyvault_non_rbac_secret_expiration_set:
from prowler.providers.azure.services.keyvault.keyvault_non_rbac_secret_expiration_set.keyvault_non_rbac_secret_expiration_set import (
keyvault_non_rbac_secret_expiration_set,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
Secret,
)
secret = Secret(
id="id",
name=secret_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=None, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[],
secrets=[secret],
)
]
}
check = keyvault_non_rbac_secret_expiration_set()
result = check.execute()
assert len(result) == 1
@@ -123,35 +131,6 @@ class Test_keyvault_non_rbac_secret_expiration_set:
keyvault_id = str(uuid4())
secret1_name = "Secret1"
secret2_name = "Secret2"
secret1 = Secret(
id="id",
name=secret1_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=None),
)
secret2 = Secret(
id="id",
name=secret2_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=84934),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=False
),
keys=[],
secrets=[secret1, secret2],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -163,7 +142,42 @@ class Test_keyvault_non_rbac_secret_expiration_set:
from prowler.providers.azure.services.keyvault.keyvault_non_rbac_secret_expiration_set.keyvault_non_rbac_secret_expiration_set import (
keyvault_non_rbac_secret_expiration_set,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
Secret,
)
secret1 = Secret(
id="id",
name=secret1_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=None),
)
secret2 = Secret(
id="id",
name=secret2_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=84934),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[],
secrets=[secret1, secret2],
)
]
}
check = keyvault_non_rbac_secret_expiration_set()
result = check.execute()
assert len(result) == 1
@@ -180,28 +194,6 @@ class Test_keyvault_non_rbac_secret_expiration_set:
keyvault_client = mock.MagicMock
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
secret = Secret(
id="id",
name="name",
enabled=False,
location="location",
attributes=SecretAttributes(expires=None),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=False
),
keys=[],
secrets=[secret],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -213,7 +205,35 @@ class Test_keyvault_non_rbac_secret_expiration_set:
from prowler.providers.azure.services.keyvault.keyvault_non_rbac_secret_expiration_set.keyvault_non_rbac_secret_expiration_set import (
keyvault_non_rbac_secret_expiration_set,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
Secret,
)
secret = Secret(
id="id",
name="name",
enabled=False,
location="location",
attributes=SecretAttributes(expires=None),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[],
secrets=[secret],
)
]
}
check = keyvault_non_rbac_secret_expiration_set()
result = check.execute()
assert len(result) == 1

View File

@@ -6,7 +6,6 @@ from azure.mgmt.keyvault.v2023_07_01.models import (
VaultProperties,
)
from prowler.providers.azure.services.keyvault.keyvault_service import KeyVaultInfo
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
@@ -37,24 +36,6 @@ class Test_keyvault_private_endpoints:
keyvault_client = mock.MagicMock
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
private_endpoint_connections=None,
),
keys=[],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -66,6 +47,28 @@ class Test_keyvault_private_endpoints:
from prowler.providers.azure.services.keyvault.keyvault_private_endpoints.keyvault_private_endpoints import (
keyvault_private_endpoints,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
private_endpoint_connections=None,
),
keys=[],
secrets=[],
)
]
}
check = keyvault_private_endpoints()
result = check.execute()
@@ -86,23 +89,6 @@ class Test_keyvault_private_endpoints:
private_endpoint = PrivateEndpointConnectionItem(
id="id",
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
),
keys=[],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -114,7 +100,27 @@ class Test_keyvault_private_endpoints:
from prowler.providers.azure.services.keyvault.keyvault_private_endpoints.keyvault_private_endpoints import (
keyvault_private_endpoints,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
),
keys=[],
secrets=[],
)
]
}
keyvault_client.key_vaults[AZURE_SUBSCRIPTION_ID][
0
].properties.private_endpoint_connections = [private_endpoint]

View File

@@ -3,7 +3,6 @@ from uuid import uuid4
from azure.mgmt.keyvault.v2023_07_01.models import VaultProperties
from prowler.providers.azure.services.keyvault.keyvault_service import KeyVaultInfo
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
@@ -34,23 +33,6 @@ class Test_keyvault_rbac_enabled:
keyvault_client = mock.MagicMock
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -62,7 +44,27 @@ class Test_keyvault_rbac_enabled:
from prowler.providers.azure.services.keyvault.keyvault_rbac_enabled.keyvault_rbac_enabled import (
keyvault_rbac_enabled,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=False,
),
keys=[],
secrets=[],
)
]
}
check = keyvault_rbac_enabled()
result = check.execute()
assert len(result) == 1
@@ -79,23 +81,6 @@ class Test_keyvault_rbac_enabled:
keyvault_client = mock.MagicMock
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
),
keys=[],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -107,7 +92,27 @@ class Test_keyvault_rbac_enabled:
from prowler.providers.azure.services.keyvault.keyvault_rbac_enabled.keyvault_rbac_enabled import (
keyvault_rbac_enabled,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
),
keys=[],
secrets=[],
)
]
}
check = keyvault_rbac_enabled()
result = check.execute()
assert len(result) == 1

View File

@@ -3,7 +3,6 @@ from uuid import uuid4
from azure.mgmt.keyvault.v2023_07_01.models import KeyAttributes, VaultProperties
from prowler.providers.azure.services.keyvault.keyvault_service import Key, KeyVaultInfo
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
@@ -32,21 +31,6 @@ class Test_keyvault_rbac_key_expiration_set:
def test_no_keys(self):
keyvault_client = mock.MagicMock
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=True
),
keys=[],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -58,7 +42,27 @@ class Test_keyvault_rbac_key_expiration_set:
from prowler.providers.azure.services.keyvault.keyvault_rbac_key_expiration_set.keyvault_rbac_key_expiration_set import (
keyvault_rbac_key_expiration_set,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
),
keys=[],
secrets=[],
)
]
}
check = keyvault_rbac_key_expiration_set()
result = check.execute()
assert len(result) == 0
@@ -68,28 +72,6 @@ class Test_keyvault_rbac_key_expiration_set:
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
key_name = "Key Name"
key = Key(
id="id",
name=key_name,
enabled=True,
location="location",
attributes=KeyAttributes(expires=None, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=True
),
keys=[key],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -101,7 +83,35 @@ class Test_keyvault_rbac_key_expiration_set:
from prowler.providers.azure.services.keyvault.keyvault_rbac_key_expiration_set.keyvault_rbac_key_expiration_set import (
keyvault_rbac_key_expiration_set,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
Key,
KeyVaultInfo,
)
key = Key(
id="id",
name=key_name,
enabled=True,
location="location",
attributes=KeyAttributes(expires=None, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
),
keys=[key],
secrets=[],
)
]
}
check = keyvault_rbac_key_expiration_set()
result = check.execute()
assert len(result) == 1
@@ -118,28 +128,6 @@ class Test_keyvault_rbac_key_expiration_set:
keyvault_client = mock.MagicMock
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
key = Key(
id="id",
name="name",
enabled=True,
location="location",
attributes=KeyAttributes(expires=49394, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=True
),
keys=[key],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -151,7 +139,35 @@ class Test_keyvault_rbac_key_expiration_set:
from prowler.providers.azure.services.keyvault.keyvault_rbac_key_expiration_set.keyvault_rbac_key_expiration_set import (
keyvault_rbac_key_expiration_set,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
Key,
KeyVaultInfo,
)
key = Key(
id="id",
name="name",
enabled=True,
location="location",
attributes=KeyAttributes(expires=49394, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
),
keys=[key],
secrets=[],
)
]
}
check = keyvault_rbac_key_expiration_set()
result = check.execute()
assert len(result) == 1

View File

@@ -3,10 +3,6 @@ from uuid import uuid4
from azure.mgmt.keyvault.v2023_07_01.models import SecretAttributes, VaultProperties
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
Secret,
)
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
@@ -35,21 +31,6 @@ class Test_keyvault_rbac_secret_expiration_set:
def test_no_secrets(self):
keyvault_client = mock.MagicMock
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=True
),
keys=[],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -61,7 +42,27 @@ class Test_keyvault_rbac_secret_expiration_set:
from prowler.providers.azure.services.keyvault.keyvault_rbac_secret_expiration_set.keyvault_rbac_secret_expiration_set import (
keyvault_rbac_secret_expiration_set,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
),
keys=[],
secrets=[],
)
]
}
check = keyvault_rbac_secret_expiration_set()
result = check.execute()
assert len(result) == 0
@@ -71,28 +72,6 @@ class Test_keyvault_rbac_secret_expiration_set:
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
secret_name = "Secret"
secret = Secret(
id="id",
name=secret_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=None),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=True
),
keys=[],
secrets=[secret],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -104,7 +83,35 @@ class Test_keyvault_rbac_secret_expiration_set:
from prowler.providers.azure.services.keyvault.keyvault_rbac_secret_expiration_set.keyvault_rbac_secret_expiration_set import (
keyvault_rbac_secret_expiration_set,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
Secret,
)
secret = Secret(
id="id",
name=secret_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=None),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
),
keys=[],
secrets=[secret],
)
]
}
check = keyvault_rbac_secret_expiration_set()
result = check.execute()
assert len(result) == 1
@@ -123,35 +130,6 @@ class Test_keyvault_rbac_secret_expiration_set:
keyvault_id = str(uuid4())
secret1_name = "Secret1"
secret2_name = "Secret2"
secret1 = Secret(
id="id",
name=secret1_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=None),
)
secret2 = Secret(
id="id",
name=secret2_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=84934),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=True
),
keys=[],
secrets=[secret1, secret2],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -163,7 +141,42 @@ class Test_keyvault_rbac_secret_expiration_set:
from prowler.providers.azure.services.keyvault.keyvault_rbac_secret_expiration_set.keyvault_rbac_secret_expiration_set import (
keyvault_rbac_secret_expiration_set,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
Secret,
)
secret1 = Secret(
id="id",
name=secret1_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=None),
)
secret2 = Secret(
id="id",
name=secret2_name,
enabled=True,
location="location",
attributes=SecretAttributes(expires=84934),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
),
keys=[],
secrets=[secret1, secret2],
)
]
}
check = keyvault_rbac_secret_expiration_set()
result = check.execute()
assert len(result) == 1
@@ -180,28 +193,6 @@ class Test_keyvault_rbac_secret_expiration_set:
keyvault_client = mock.MagicMock
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
secret = Secret(
id="id",
name="name",
enabled=False,
location="location",
attributes=SecretAttributes(expires=None),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid", sku="sku", enable_rbac_authorization=True
),
keys=[],
secrets=[secret],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -213,7 +204,35 @@ class Test_keyvault_rbac_secret_expiration_set:
from prowler.providers.azure.services.keyvault.keyvault_rbac_secret_expiration_set.keyvault_rbac_secret_expiration_set import (
keyvault_rbac_secret_expiration_set,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
Secret,
)
secret = Secret(
id="id",
name="name",
enabled=False,
location="location",
attributes=SecretAttributes(expires=None),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
),
keys=[],
secrets=[secret],
)
]
}
check = keyvault_rbac_secret_expiration_set()
result = check.execute()
assert len(result) == 1

View File

@@ -3,10 +3,6 @@ from uuid import uuid4
from azure.mgmt.keyvault.v2023_07_01.models import SecretAttributes, VaultProperties
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
Secret,
)
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
@@ -38,25 +34,6 @@ class Test_keyvault_recoverable:
keyvault_client = mock.MagicMock
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
enable_soft_delete=True,
enable_purge_protection=False,
),
keys=[],
secrets=[],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -68,7 +45,29 @@ class Test_keyvault_recoverable:
from prowler.providers.azure.services.keyvault.keyvault_recoverable.keyvault_recoverable import (
keyvault_recoverable,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
enable_soft_delete=True,
enable_purge_protection=False,
),
keys=[],
secrets=[],
)
]
}
check = keyvault_recoverable()
result = check.execute()
assert len(result) == 1
@@ -85,39 +84,6 @@ class Test_keyvault_recoverable:
keyvault_client = mock.MagicMock
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
secret1 = Secret(
id="id",
name="name",
enabled=True,
location="location",
attributes=SecretAttributes(expires=None, enabled=True),
)
secret2 = Secret(
id="id",
name="name",
enabled=True,
location="location",
attributes=SecretAttributes(expires=84934, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
enable_soft_delete=True,
enable_purge_protection=False,
),
keys=[],
secrets=[secret1, secret2],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -129,7 +95,44 @@ class Test_keyvault_recoverable:
from prowler.providers.azure.services.keyvault.keyvault_recoverable.keyvault_recoverable import (
keyvault_recoverable,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
Secret,
)
secret1 = Secret(
id="id",
name="name",
enabled=True,
location="location",
attributes=SecretAttributes(expires=None, enabled=True),
)
secret2 = Secret(
id="id",
name="name",
enabled=True,
location="location",
attributes=SecretAttributes(expires=84934, enabled=True),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
enable_soft_delete=True,
enable_purge_protection=False,
),
keys=[],
secrets=[secret1, secret2],
)
]
}
check = keyvault_recoverable()
result = check.execute()
assert len(result) == 1
@@ -146,32 +149,6 @@ class Test_keyvault_recoverable:
keyvault_client = mock.MagicMock
keyvault_name = "Keyvault Name"
keyvault_id = str(uuid4())
secret = Secret(
id="id",
name="name",
enabled=True,
location="location",
attributes=SecretAttributes(expires=None, enabled=False),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
enable_soft_delete=True,
enable_purge_protection=True,
),
keys=[],
secrets=[secret],
)
]
}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
@@ -183,7 +160,37 @@ class Test_keyvault_recoverable:
from prowler.providers.azure.services.keyvault.keyvault_recoverable.keyvault_recoverable import (
keyvault_recoverable,
)
from prowler.providers.azure.services.keyvault.keyvault_service import (
KeyVaultInfo,
Secret,
)
secret = Secret(
id="id",
name="name",
enabled=True,
location="location",
attributes=SecretAttributes(expires=None, enabled=False),
)
keyvault_client.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id=keyvault_id,
name=keyvault_name,
location="location",
resource_group="resource_group",
properties=VaultProperties(
tenant_id="tenantid",
sku="sku",
enable_rbac_authorization=True,
enable_soft_delete=True,
enable_purge_protection=True,
),
keys=[],
secrets=[secret],
)
]
}
check = keyvault_recoverable()
result = check.execute()
assert len(result) == 1

View File

@@ -1,105 +1,262 @@
from unittest.mock import patch
from unittest import mock
from unittest.mock import MagicMock, patch
from prowler.providers.azure.services.keyvault.keyvault_service import (
Key,
KeyVault,
KeyVaultInfo,
Secret,
)
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
def mock_keyvault_get_key_vaults(_, __):
keyvault_info = KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=None,
keys=[
Key(
id="id",
name="name",
enabled=True,
location="location",
attributes=None,
rotation_policy=None,
)
],
secrets=[
Secret(
id="id",
name="name",
enabled=True,
location="location",
attributes=None,
)
],
)
return {AZURE_SUBSCRIPTION_ID: [keyvault_info]}
# TODO: we have to fix this test not to use MagicMock but set the KeyVault service while mocking the import ot the Monitor client
# from prowler.providers.azure.services.keyvault.keyvault_service import (
# DiagnosticSetting,
# Key,
# KeyVault,
# KeyVaultInfo,
# Secret,
# )
# def mock_keyvault_get_key_vaults(_, __):
# keyvault_info = KeyVaultInfo(
# id="id",
# name="name",
# location="location",
# resource_group="resource_group",
# properties=None,
# keys=[
# Key(
# id="id",
# name="name",
# enabled=True,
# location="location",
# attributes=None,
# rotation_policy=None,
# )
# ],
# secrets=[
# Secret(
# id="id",
# name="name",
# enabled=True,
# location="location",
# attributes=None,
# )
# ],
# monitor_diagnostic_settings=[
# DiagnosticSetting(
# id="id",
# storage_account_id="storage_account_id",
# logs=[
# mock.MagicMock(
# categoty_group="audit", category="None", enabled=True
# ),
# mock.MagicMock(
# categoty_group="allLogs", category="None", enabled=False
# ),
# ],
# name="name",
# storage_account_name="storage_account_name",
# )
# ],
# )
# return {AZURE_SUBSCRIPTION_ID: [keyvault_info]}
@patch(
"prowler.providers.azure.services.keyvault.keyvault_service.KeyVault.__get_key_vaults__",
new=mock_keyvault_get_key_vaults,
)
# @patch(
# "prowler.providers.azure.services.keyvault.keyvault_service.KeyVault.__get_key_vaults__",
# new=mock_keyvault_get_key_vaults,
# )
class Test_keyvault_service:
def test__get_client__(self):
keyvault = KeyVault(set_mocked_azure_provider())
assert (
keyvault.clients[AZURE_SUBSCRIPTION_ID].__class__.__name__
== "KeyVaultManagementClient"
)
def test_keyvault_service_(self):
with patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), patch(
"prowler.providers.azure.services.monitor.monitor_service.Monitor",
new=MagicMock(),
):
from prowler.providers.azure.services.keyvault.keyvault_service import ( # KeyVault,
DiagnosticSetting,
Key,
KeyVaultInfo,
Secret,
)
def test__get_key_vaults__(self):
keyvault = KeyVault(set_mocked_azure_provider())
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].__class__.__name__
== "KeyVaultInfo"
)
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].id == "id"
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].name == "name"
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].location == "location"
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].resource_group
== "resource_group"
)
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].properties is None
# keyvault = KeyVault(set_mocked_azure_provider())
keyvault = MagicMock()
def test__get_keys__(self):
keyvault = KeyVault(set_mocked_azure_provider())
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].__class__.__name__
== "Key"
)
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].id == "id"
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].name == "name"
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].enabled is True
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].location == "location"
)
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].attributes is None
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].rotation_policy
is None
)
keyvault.key_vaults = {
AZURE_SUBSCRIPTION_ID: [
KeyVaultInfo(
id="id",
name="name",
location="location",
resource_group="resource_group",
properties=None,
keys=[
Key(
id="id",
name="name",
enabled=True,
location="location",
attributes=None,
rotation_policy=None,
)
],
secrets=[
Secret(
id="id",
name="name",
enabled=True,
location="location",
attributes=None,
)
],
monitor_diagnostic_settings=[
DiagnosticSetting(
id="id",
storage_account_id="storage_account_id",
logs=[
mock.MagicMock(
categoty_group="audit",
category="None",
enabled=True,
),
mock.MagicMock(
categoty_group="allLogs",
category="None",
enabled=False,
),
],
name="name",
storage_account_name="storage_account_name",
)
],
)
]
}
def test__get_secrets__(self):
keyvault = KeyVault(set_mocked_azure_provider())
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].__class__.__name__
== "Secret"
)
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].id == "id"
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].name == "name"
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].enabled is True
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].location
== "location"
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].attributes is None
)
# assert (
# keyvault.clients[AZURE_SUBSCRIPTION_ID].__class__.__name__
# == "KeyVaultManagementClient"
# )
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].__class__.__name__
== "KeyVaultInfo"
)
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].id == "id"
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].name == "name"
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].location == "location"
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].resource_group
== "resource_group"
)
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].properties is None
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].__class__.__name__
== "Key"
)
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].id == "id"
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].name == "name"
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].enabled is True
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].location
== "location"
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].attributes is None
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].keys[0].rotation_policy
is None
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0]
.secrets[0]
.__class__.__name__
== "Secret"
)
assert keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].id == "id"
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].name == "name"
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].enabled is True
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].location
== "location"
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].attributes
is None
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0].secrets[0].attributes
is None
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0]
.monitor_diagnostic_settings[0]
.id
== "id"
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0]
.monitor_diagnostic_settings[0]
.storage_account_id
== "storage_account_id"
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0]
.monitor_diagnostic_settings[0]
.logs[0]
.categoty_group
== "audit"
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0]
.monitor_diagnostic_settings[0]
.logs[0]
.category
== "None"
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0]
.monitor_diagnostic_settings[0]
.logs[0]
.enabled
is True
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0]
.monitor_diagnostic_settings[0]
.logs[1]
.categoty_group
== "allLogs"
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0]
.monitor_diagnostic_settings[0]
.logs[1]
.category
== "None"
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0]
.monitor_diagnostic_settings[0]
.logs[1]
.enabled
is False
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0]
.monitor_diagnostic_settings[0]
.name
== "name"
)
assert (
keyvault.key_vaults[AZURE_SUBSCRIPTION_ID][0]
.monitor_diagnostic_settings[0]
.storage_account_name
== "storage_account_name"
)

View File

@@ -0,0 +1,119 @@
from unittest import mock
from azure.mgmt.monitor.models import AlertRuleAnyOfOrLeafCondition
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
class Test_monitor_alert_create_policy_assignment:
def test_monitor_alert_create_policy_assignment_no_subscriptions(self):
monitor_client = mock.MagicMock
monitor_client.alert_rules = {}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_policy_assignment.monitor_alert_create_policy_assignment.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_policy_assignment.monitor_alert_create_policy_assignment import (
monitor_alert_create_policy_assignment,
)
check = monitor_alert_create_policy_assignment()
result = check.execute()
assert len(result) == 0
def test_no_alert_rules(self):
monitor_client = mock.MagicMock
monitor_client.alert_rules = {AZURE_SUBSCRIPTION_ID: []}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_policy_assignment.monitor_alert_create_policy_assignment.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_policy_assignment.monitor_alert_create_policy_assignment import (
monitor_alert_create_policy_assignment,
)
check = monitor_alert_create_policy_assignment()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "Monitor"
assert result[0].resource_id == "Monitor"
assert (
result[0].status_extended
== f"There is not an alert for creating Policy Assignments in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_alert_rules_configured(self):
monitor_client = mock.MagicMock
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_policy_assignment.monitor_alert_create_policy_assignment.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_policy_assignment.monitor_alert_create_policy_assignment import (
monitor_alert_create_policy_assignment,
)
from prowler.providers.azure.services.monitor.monitor_service import (
AlertRule,
AlertRuleAllOfCondition,
)
monitor_client.alert_rules = {
AZURE_SUBSCRIPTION_ID: [
AlertRule(
id="id",
name="name",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Authorization/policyAssignments/write",
field="operationName",
),
]
),
enabled=False,
description="description",
),
AlertRule(
id="id2",
name="name2",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Authorization/policyAssignments/write",
field="operationName",
),
]
),
enabled=True,
description="description2",
),
]
}
check = monitor_alert_create_policy_assignment()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "name2"
assert result[0].resource_id == "id2"
assert (
result[0].status_extended
== f"There is an alert configured for creating Policy Assignments in subscription {AZURE_SUBSCRIPTION_ID}."
)

View File

@@ -0,0 +1,117 @@
from unittest import mock
from azure.mgmt.monitor.models import AlertRuleAnyOfOrLeafCondition
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
class Test_monitor_alert_create_update_nsg:
def test_monitor_alert_create_update_nsg_no_subscriptions(self):
monitor_client = mock.MagicMock
monitor_client.alert_rules = {}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_nsg.monitor_alert_create_update_nsg.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_nsg.monitor_alert_create_update_nsg import (
monitor_alert_create_update_nsg,
)
check = monitor_alert_create_update_nsg()
result = check.execute()
assert len(result) == 0
def test_no_alert_rules(self):
monitor_client = mock.MagicMock()
monitor_client.alert_rules = {AZURE_SUBSCRIPTION_ID: []}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_nsg.monitor_alert_create_update_nsg.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_nsg.monitor_alert_create_update_nsg import (
monitor_alert_create_update_nsg,
)
check = monitor_alert_create_update_nsg()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "Monitor"
assert result[0].resource_id == "Monitor"
assert (
result[0].status_extended
== f"There is not an alert for creating/updating Network Security Groups in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_alert_rules_configured(self):
monitor_client = mock.MagicMock
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_nsg.monitor_alert_create_update_nsg.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_nsg.monitor_alert_create_update_nsg import (
monitor_alert_create_update_nsg,
)
from prowler.providers.azure.services.monitor.monitor_service import (
AlertRule,
AlertRuleAllOfCondition,
)
monitor_client.alert_rules = {
AZURE_SUBSCRIPTION_ID: [
AlertRule(
id="id",
name="name",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Network/networkSecurityGroups/write",
field="operationName",
),
]
),
enabled=False,
description="description",
),
AlertRule(
id="id2",
name="name2",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Network/networkSecurityGroups/write",
field="operationName",
),
]
),
enabled=True,
description="description2",
),
]
}
check = monitor_alert_create_update_nsg()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "name2"
assert result[0].resource_id == "id2"
assert (
result[0].status_extended
== f"There is an alert configured for creating/updating Network Security Groups in subscription {AZURE_SUBSCRIPTION_ID}."
)

View File

@@ -0,0 +1,118 @@
from unittest import mock
from azure.mgmt.monitor.models import AlertRuleAnyOfOrLeafCondition
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
class Test_monitor_alert_create_update_security_solution:
def test_monitor_alert_create_update_public_ip_address_rule_no_subscriptions(self):
monitor_client = mock.MagicMock()
monitor_client.alert_rules = {}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_public_ip_address_rule.monitor_alert_create_update_public_ip_address_rule.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_public_ip_address_rule.monitor_alert_create_update_public_ip_address_rule import (
monitor_alert_create_update_public_ip_address_rule,
)
check = monitor_alert_create_update_public_ip_address_rule()
result = check.execute()
assert len(result) == 0
def test_no_alert_rules(self):
monitor_client = mock.MagicMock()
monitor_client.alert_rules = {AZURE_SUBSCRIPTION_ID: []}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_public_ip_address_rule.monitor_alert_create_update_public_ip_address_rule.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_public_ip_address_rule.monitor_alert_create_update_public_ip_address_rule import (
monitor_alert_create_update_public_ip_address_rule,
)
check = monitor_alert_create_update_public_ip_address_rule()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "Monitor"
assert result[0].resource_id == "Monitor"
assert (
result[0].status_extended
== f"There is not an alert for creating/updating Public IP address rule in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_alert_rules_configured(self):
monitor_client = mock.MagicMock
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_public_ip_address_rule.monitor_alert_create_update_public_ip_address_rule.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_public_ip_address_rule.monitor_alert_create_update_public_ip_address_rule import (
monitor_alert_create_update_public_ip_address_rule,
)
from prowler.providers.azure.services.monitor.monitor_service import (
AlertRule,
AlertRuleAllOfCondition,
)
monitor_client.alert_rules = {
AZURE_SUBSCRIPTION_ID: [
AlertRule(
id="id",
name="name",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Network/publicIPAddresses/write",
field="operationName",
),
]
),
enabled=False,
description="description",
),
AlertRule(
id="id2",
name="name2",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Network/publicIPAddresses/write",
field="operationName",
),
]
),
enabled=True,
description="description2",
),
]
}
check = monitor_alert_create_update_public_ip_address_rule()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "name2"
assert result[0].resource_id == "id2"
assert (
result[0].status_extended
== f"There is an alert configured for creating/updating Public IP address rule in subscription {AZURE_SUBSCRIPTION_ID}."
)

View File

@@ -0,0 +1,118 @@
from unittest import mock
from azure.mgmt.monitor.models import AlertRuleAnyOfOrLeafCondition
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
class Test_monitor_alert_create_update_security_solution:
def test_monitor_alert_create_update_security_solution_no_subscriptions(self):
monitor_client = mock.MagicMock()
monitor_client.alert_rules = {}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_security_solution.monitor_alert_create_update_security_solution.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_security_solution.monitor_alert_create_update_security_solution import (
monitor_alert_create_update_security_solution,
)
check = monitor_alert_create_update_security_solution()
result = check.execute()
assert len(result) == 0
def test_no_alert_rules(self):
monitor_client = mock.MagicMock()
monitor_client.alert_rules = {AZURE_SUBSCRIPTION_ID: []}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_security_solution.monitor_alert_create_update_security_solution.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_security_solution.monitor_alert_create_update_security_solution import (
monitor_alert_create_update_security_solution,
)
check = monitor_alert_create_update_security_solution()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "Monitor"
assert result[0].resource_id == "Monitor"
assert (
result[0].status_extended
== f"There is not an alert for creating/updating Security Solution in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_alert_rules_configured(self):
monitor_client = mock.MagicMock
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_security_solution.monitor_alert_create_update_security_solution.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_security_solution.monitor_alert_create_update_security_solution import (
monitor_alert_create_update_security_solution,
)
from prowler.providers.azure.services.monitor.monitor_service import (
AlertRule,
AlertRuleAllOfCondition,
)
monitor_client.alert_rules = {
AZURE_SUBSCRIPTION_ID: [
AlertRule(
id="id",
name="name",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Security/securitySolutions/write",
field="operationName",
),
]
),
enabled=False,
description="description",
),
AlertRule(
id="id2",
name="name2",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Security/securitySolutions/write",
field="operationName",
),
]
),
enabled=True,
description="description2",
),
]
}
check = monitor_alert_create_update_security_solution()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "name2"
assert result[0].resource_id == "id2"
assert (
result[0].status_extended
== f"There is an alert configured for creating/updating Security Solution in subscription {AZURE_SUBSCRIPTION_ID}."
)

View File

@@ -0,0 +1,118 @@
from unittest import mock
from azure.mgmt.monitor.models import AlertRuleAnyOfOrLeafCondition
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
class Test_monitor_alert_create_update_sqlserver_fr:
def test_monitor_alert_create_update_sqlserver_fr_no_subscriptions(self):
monitor_client = mock.MagicMock()
monitor_client.alert_rules = {}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_sqlserver_fr.monitor_alert_create_update_sqlserver_fr.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_sqlserver_fr.monitor_alert_create_update_sqlserver_fr import (
monitor_alert_create_update_sqlserver_fr,
)
check = monitor_alert_create_update_sqlserver_fr()
result = check.execute()
assert len(result) == 0
def test_no_alert_rules(self):
monitor_client = mock.MagicMock()
monitor_client.alert_rules = {AZURE_SUBSCRIPTION_ID: []}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_sqlserver_fr.monitor_alert_create_update_sqlserver_fr.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_sqlserver_fr.monitor_alert_create_update_sqlserver_fr import (
monitor_alert_create_update_sqlserver_fr,
)
check = monitor_alert_create_update_sqlserver_fr()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "Monitor"
assert result[0].resource_id == "Monitor"
assert (
result[0].status_extended
== f"There is not an alert for creating/updating SQL Server firewall rule in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_alert_rules_configured(self):
monitor_client = mock.MagicMock
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_create_update_sqlserver_fr.monitor_alert_create_update_sqlserver_fr.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_create_update_sqlserver_fr.monitor_alert_create_update_sqlserver_fr import (
monitor_alert_create_update_sqlserver_fr,
)
from prowler.providers.azure.services.monitor.monitor_service import (
AlertRule,
AlertRuleAllOfCondition,
)
monitor_client.alert_rules = {
AZURE_SUBSCRIPTION_ID: [
AlertRule(
id="id",
name="name",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Sql/servers/firewallRules/write",
field="operationName",
),
]
),
enabled=False,
description="description",
),
AlertRule(
id="id2",
name="name2",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Sql/servers/firewallRules/write",
field="operationName",
),
]
),
enabled=True,
description="description2",
),
]
}
check = monitor_alert_create_update_sqlserver_fr()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "name2"
assert result[0].resource_id == "id2"
assert (
result[0].status_extended
== f"There is an alert configured for creating/updating SQL Server firewall rule in subscription {AZURE_SUBSCRIPTION_ID}."
)

View File

@@ -0,0 +1,118 @@
from unittest import mock
from azure.mgmt.monitor.models import AlertRuleAnyOfOrLeafCondition
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
class Test_monitor_alert_delete_nsg:
def test_monitor_alert_delete_nsg_no_subscriptions(self):
monitor_client = mock.MagicMock()
monitor_client.alert_rules = {}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_delete_nsg.monitor_alert_delete_nsg.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_delete_nsg.monitor_alert_delete_nsg import (
monitor_alert_delete_nsg,
)
check = monitor_alert_delete_nsg()
result = check.execute()
assert len(result) == 0
def test_no_alert_rules(self):
monitor_client = mock.MagicMock()
monitor_client.alert_rules = {AZURE_SUBSCRIPTION_ID: []}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_delete_nsg.monitor_alert_delete_nsg.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_delete_nsg.monitor_alert_delete_nsg import (
monitor_alert_delete_nsg,
)
check = monitor_alert_delete_nsg()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "Monitor"
assert result[0].resource_id == "Monitor"
assert (
result[0].status_extended
== f"There is not an alert for deleting Network Security Groups in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_alert_rules_configured(self):
monitor_client = mock.MagicMock
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_delete_nsg.monitor_alert_delete_nsg.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_delete_nsg.monitor_alert_delete_nsg import (
monitor_alert_delete_nsg,
)
from prowler.providers.azure.services.monitor.monitor_service import (
AlertRule,
AlertRuleAllOfCondition,
)
monitor_client.alert_rules = {
AZURE_SUBSCRIPTION_ID: [
AlertRule(
id="id",
name="name",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Network/networkSecurityGroups/delete",
field="operationName",
),
]
),
enabled=False,
description="description",
),
AlertRule(
id="id2",
name="name2",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Network/networkSecurityGroups/delete",
field="operationName",
),
]
),
enabled=True,
description="description2",
),
]
}
check = monitor_alert_delete_nsg()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "name2"
assert result[0].resource_id == "id2"
assert (
result[0].status_extended
== f"There is an alert configured for deleting Network Security Groups in subscription {AZURE_SUBSCRIPTION_ID}."
)

View File

@@ -0,0 +1,119 @@
from unittest import mock
from azure.mgmt.monitor.models import AlertRuleAnyOfOrLeafCondition
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
class Test_monitor_alert_delete_policy_assignment:
def test_monitor_alert_delete_policy_assignment_no_subscriptions(self):
monitor_client = mock.MagicMock
monitor_client.alert_rules = {}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_delete_policy_assignment.monitor_alert_delete_policy_assignment.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_delete_policy_assignment.monitor_alert_delete_policy_assignment import (
monitor_alert_delete_policy_assignment,
)
check = monitor_alert_delete_policy_assignment()
result = check.execute()
assert len(result) == 0
def test_no_alert_rules(self):
monitor_client = mock.MagicMock
monitor_client.alert_rules = {AZURE_SUBSCRIPTION_ID: []}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_delete_policy_assignment.monitor_alert_delete_policy_assignment.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_delete_policy_assignment.monitor_alert_delete_policy_assignment import (
monitor_alert_delete_policy_assignment,
)
check = monitor_alert_delete_policy_assignment()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "Monitor"
assert result[0].resource_id == "Monitor"
assert (
result[0].status_extended
== f"There is not an alert for deleting policy assignment in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_alert_rules_configured(self):
monitor_client = mock.MagicMock
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_delete_policy_assignment.monitor_alert_delete_policy_assignment.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_delete_policy_assignment.monitor_alert_delete_policy_assignment import (
monitor_alert_delete_policy_assignment,
)
from prowler.providers.azure.services.monitor.monitor_service import (
AlertRule,
AlertRuleAllOfCondition,
)
monitor_client.alert_rules = {
AZURE_SUBSCRIPTION_ID: [
AlertRule(
id="id",
name="name",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Authorization/policyAssignments/delete",
field="operationName",
),
]
),
enabled=False,
description="description",
),
AlertRule(
id="id2",
name="name2",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Authorization/policyAssignments/delete",
field="operationName",
),
]
),
enabled=True,
description="description2",
),
]
}
check = monitor_alert_delete_policy_assignment()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "name2"
assert result[0].resource_id == "id2"
assert (
result[0].status_extended
== f"There is an alert configured for deleting policy assignment in subscription {AZURE_SUBSCRIPTION_ID}."
)

View File

@@ -0,0 +1,118 @@
from unittest import mock
from azure.mgmt.monitor.models import AlertRuleAnyOfOrLeafCondition
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
class Test_monitor_alert_create_update_security_solution:
def test_monitor_alert_delete_public_ip_address_rule_no_subscriptions(self):
monitor_client = mock.MagicMock()
monitor_client.alert_rules = {}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_delete_public_ip_address_rule.monitor_alert_delete_public_ip_address_rule.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_delete_public_ip_address_rule.monitor_alert_delete_public_ip_address_rule import (
monitor_alert_delete_public_ip_address_rule,
)
check = monitor_alert_delete_public_ip_address_rule()
result = check.execute()
assert len(result) == 0
def test_no_alert_rules(self):
monitor_client = mock.MagicMock()
monitor_client.alert_rules = {AZURE_SUBSCRIPTION_ID: []}
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_delete_public_ip_address_rule.monitor_alert_delete_public_ip_address_rule.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_delete_public_ip_address_rule.monitor_alert_delete_public_ip_address_rule import (
monitor_alert_delete_public_ip_address_rule,
)
check = monitor_alert_delete_public_ip_address_rule()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "Monitor"
assert result[0].resource_id == "Monitor"
assert (
result[0].status_extended
== f"There is not an alert for deleting public IP address rule in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_alert_rules_configured(self):
monitor_client = mock.MagicMock
with mock.patch(
"prowler.providers.common.common.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.monitor.monitor_alert_delete_public_ip_address_rule.monitor_alert_delete_public_ip_address_rule.monitor_client",
new=monitor_client,
):
from prowler.providers.azure.services.monitor.monitor_alert_delete_public_ip_address_rule.monitor_alert_delete_public_ip_address_rule import (
monitor_alert_delete_public_ip_address_rule,
)
from prowler.providers.azure.services.monitor.monitor_service import (
AlertRule,
AlertRuleAllOfCondition,
)
monitor_client.alert_rules = {
AZURE_SUBSCRIPTION_ID: [
AlertRule(
id="id",
name="name",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Network/publicIPAddresses/delete",
field="operationName",
),
]
),
enabled=False,
description="description",
),
AlertRule(
id="id2",
name="name2",
condition=AlertRuleAllOfCondition(
all_of=[
AlertRuleAnyOfOrLeafCondition(),
AlertRuleAnyOfOrLeafCondition(
equals="Microsoft.Network/publicIPAddresses/delete",
field="operationName",
),
]
),
enabled=True,
description="description2",
),
]
}
check = monitor_alert_delete_public_ip_address_rule()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "name2"
assert result[0].resource_id == "id2"
assert (
result[0].status_extended
== f"There is an alert configured for deleting public IP address rule in subscription {AZURE_SUBSCRIPTION_ID}."
)

Some files were not shown because too many files have changed in this diff Show More