Compare commits

..

25 Commits

Author SHA1 Message Date
OlmeNav
b384718bb0 Merge branch 'master' into todo-check-class 2025-09-12 12:48:58 +02:00
Rubén De la Torre Vico
5b0365947f feat: add first Prowler MCP server version (#8695) 2025-09-12 09:56:36 +02:00
Daniel Barranquero
b512f6c421 fix(firehose): false positive in firehose_stream_encrypted_at_rest (#8599)
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-09-11 09:55:16 -04:00
Alejandro Bailo
c4a8771647 chore(dependencies): update package versions and track them (#8696) 2025-09-11 15:36:06 +02:00
Alejandro Bailo
6f967c6da7 fix(auth): validate email field (#8698) 2025-09-11 15:29:49 +02:00
Alejandro Bailo
82cd29d595 fix(auth): add method attribute to form for proper submission handling (#8699) 2025-09-11 15:02:36 +02:00
Daniel Barranquero
14c2334e1b fix(defender): change policies rules key (#8702) 2025-09-11 13:46:21 +02:00
OlmeNav
120f788095 docs(changelog): add entry for CheckID, filename and classname validation. #8690 2025-09-11 12:59:35 +02:00
OlmeNav
e9f528ce27 fix: replace logger.error with ValidationError and update tests 2025-09-11 12:50:42 +02:00
OlmeNav
8286531a9a Merge remote-tracking branch 'upstream/master' into todo-check-class 2025-09-11 12:09:28 +02:00
angelolmn
350739ae71 test: add test for CheckID, filename and classname verification. 2025-09-11 11:59:50 +02:00
Rubén De la Torre Vico
3598514cb4 chore(aws/config): adapt metadata to new standarized format (#8641)
Co-authored-by: HugoPBrito <hugopbrit@gmail.com>
2025-09-10 17:46:11 +02:00
Hugo Pereira Brito
c4ba061f30 chore(outputs): adapt to new metadata specification (#8651) 2025-09-10 17:21:19 +02:00
Chandrapal Badshah
f4530b21d2 fix(lighthouse): make Enter submit text (#8664)
Co-authored-by: Chandrapal Badshah <12944530+Chan9390@users.noreply.github.com>
2025-09-10 16:34:35 +02:00
Chandrapal Badshah
3949ab736d fix(lighthouse): allow scrolling during AI response streaming (#8669)
Co-authored-by: Chandrapal Badshah <12944530+Chan9390@users.noreply.github.com>
2025-09-10 16:34:24 +02:00
sumit-tft
9da5066b18 feat(ui): add copy link icon to finding detail page (#8685)
Co-authored-by: alejandrobailo <alejandrobailo94@gmail.com>
2025-09-10 16:30:16 +02:00
Rubén De la Torre Vico
941539616c chore(aws/neptune): adapt some metadata fields to new format (#8494)
Co-authored-by: HugoPBrito <hugopbrit@gmail.com>
Co-authored-by: Hugo Pereira Brito <101209179+HugoPBrito@users.noreply.github.com>
2025-09-10 16:21:30 +02:00
sumit-tft
135fa044b7 feat(ui): Add Prowler Hub menu item with tooltip (#8692)
Co-authored-by: alejandrobailo <alejandrobailo94@gmail.com>
2025-09-10 16:09:09 +02:00
Andoni Alonso
48913c1886 docs(aws): refactor getting started and auth (#8683) 2025-09-10 13:45:36 +02:00
Pedro Martín
ea20943f83 feat(actions): support dashboard changes in changelog (#8694) 2025-09-10 11:05:56 +02:00
Hugo Pereira Brito
2738cfd1bd feat(dashboard): add Description and markdown support (#8667) 2025-09-10 10:53:53 +02:00
Rubén De la Torre Vico
265c3d818e docs(developer-guide): enhance check metadata format (#8411)
Co-authored-by: HugoPBrito <hugopbrit@gmail.com>
2025-09-10 09:19:08 +02:00
Alejandro Bailo
c0a9fdf8c8 docs(jira): add comprehensive guide for Jira integration in Prowler App (#8681)
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
Co-authored-by: Adrián Jesús Peña Rodríguez <adrianjpr@gmail.com>
2025-09-09 17:01:12 +02:00
Rubén De la Torre Vico
8b3335f426 chore: add metadata-review label for .metadata.json files (#8689) 2025-09-09 20:32:04 +05:45
angelolmn
258216d651 feat: Verify that the CheckID is the same as the filename and classname in the Check class 2025-09-09 13:58:02 +02:00
95 changed files with 6380 additions and 1372 deletions

4
.github/labeler.yml vendored
View File

@@ -119,3 +119,7 @@ compliance:
review-django-migrations:
- changed-files:
- any-glob-to-any-file: "api/src/backend/api/migrations/**"
metadata-review:
- changed-files:
- any-glob-to-any-file: "**/*.metadata.json"

View File

@@ -13,7 +13,7 @@ jobs:
contents: read
pull-requests: write
env:
MONITORED_FOLDERS: "api ui prowler"
MONITORED_FOLDERS: "api ui prowler dashboard"
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2

View File

@@ -0,0 +1,34 @@
/* Override Tailwind CSS reset for markdown content */
.markdown-content ul {
list-style: disc !important;
margin-left: 20px !important;
padding-left: 10px !important;
margin-bottom: 8px !important;
}
.markdown-content ol {
list-style: decimal !important;
margin-left: 20px !important;
padding-left: 10px !important;
margin-bottom: 8px !important;
}
.markdown-content li {
margin-bottom: 4px !important;
display: list-item !important;
}
.markdown-content p {
margin-bottom: 8px !important;
}
/* Ensure nested lists work properly */
.markdown-content ul ul {
margin-top: 4px !important;
margin-bottom: 4px !important;
}
.markdown-content ol ol {
margin-top: 4px !important;
margin-bottom: 4px !important;
}

View File

@@ -1654,6 +1654,39 @@ def generate_table(data, index, color_mapping_severity, color_mapping_status):
[
html.Div(
[
# Description as first details item
html.Div(
[
html.P(
html.Strong(
"Description: ",
style={
"margin-bottom": "8px"
},
)
),
html.Div(
dcc.Markdown(
str(
data.get(
"DESCRIPTION",
"",
)
),
dangerously_allow_html=True,
style={
"margin-left": "0px",
"padding-left": "10px",
},
),
className="markdown-content",
style={
"margin-left": "0px",
"padding-left": "10px",
},
),
],
),
html.Div(
[
html.P(
@@ -1793,19 +1826,27 @@ def generate_table(data, index, color_mapping_severity, color_mapping_status):
html.P(
html.Strong(
"Risk: ",
style={
"margin-right": "5px"
},
style={},
)
),
html.P(
str(data.get("RISK", "")),
html.Div(
dcc.Markdown(
str(
data.get("RISK", "")
),
dangerously_allow_html=True,
style={
"margin-left": "0px",
"padding-left": "10px",
},
),
className="markdown-content",
style={
"margin-left": "5px"
"margin-left": "0px",
"padding-left": "10px",
},
),
],
style={"display": "flex"},
),
html.Div(
[
@@ -1847,23 +1888,32 @@ def generate_table(data, index, color_mapping_severity, color_mapping_status):
html.Strong(
"Recommendation: ",
style={
"margin-right": "5px"
"margin-bottom": "8px"
},
)
),
html.P(
str(
data.get(
"REMEDIATION_RECOMMENDATION_TEXT",
"",
)
html.Div(
dcc.Markdown(
str(
data.get(
"REMEDIATION_RECOMMENDATION_TEXT",
"",
)
),
dangerously_allow_html=True,
style={
"margin-left": "0px",
"padding-left": "10px",
},
),
className="markdown-content",
style={
"margin-left": "5px"
"margin-left": "0px",
"padding-left": "10px",
},
),
],
style={"display": "flex"},
style={"margin-bottom": "15px"},
),
html.Div(
[

View File

@@ -279,4 +279,4 @@ You can filter scans to specific organizations or projects:
prowler mongodbatlas --atlas-project-id <project_id>
```
See more details about MongoDB Atlas Authentication in [Requirements](../getting-started/requirements.md#mongodb-atlas)
See more details about MongoDB Atlas Authentication in [MongoDB Atlas Authentication](../tutorials/mongodbatlas/authentication.md)

View File

@@ -0,0 +1,213 @@
# Check Metadata Guidelines
## Introduction
This guide provides comprehensive guidelines for creating check metadata in Prowler. For basic information on check metadata structure, refer to the [check metadata](./checks.md#metadata-structure-for-prowler-checks) section.
## Check Title Guidelines
### Writing Guidelines
1. **Determine Resource Finding Scope (Singular vs. Plural)**:
When determining whether to use singular or plural in the check title, examine the code for certain patterns. If the code contains a loop that generates an individual report for each resource, use the singular form. If the code produces a single report that covers all resources collectively, use the plural form. For organization- or account-wide checks, select the scope that best matches the breadth of the evaluation. Additionally, review the `status_extended` field messages in the code, as they often provide clues about whether the check is scoped to individual resources or to groups of resources.
Analyze the detection code to determine if the check reports on individual resources or aggregated resources:
- **Singular**: Use when the check creates one report per resource (e.g., "EC2 instance has IMDSv2 enforced", "S3 bucket does not allow public write access").
- **Plural**: Use when the check creates one report for all resources together (e.g., "All EC2 instances have IMDSv2 enforced", "S3 buckets do not allow public write access").
2. **Describe the Compliant (*PASS*) State**:
Always write the title to describe the **desired, compliant state** of the resources. The title should reflect what it looks like when the audited resource is following the check's requirements.
3. **Be Specific and Factual**:
Include the exact secure configuration being verified. Avoid vague or generic terms like "properly configured".
4. **Avoid Redundant or Action Words**:
Do not include verbs like "Check", "Verify", "Ensure", or "Monitor". The title is a declarative statement of the secure condition.
5. **Length Limit**:
Keep the title under 150 characters.
### Common Mistakes to Avoid
- Starting with verbs like "Check", "Verify", "Ensure", "Make sure". Always start with the affected resource instead.
- Being too vague or generic (e.g., "Ensure security groups are properly configured", what does it mean? "properly configured" is not a clear description of the compliant state).
- Focusing on the non-compliant state instead of the compliant state.
- Using unclear scope and resource identification.
## Check Type Guidelines (AWS Only)
### AWS Security Hub Type Format
AWS Security Hub uses a three-part type taxonomy:
- **Namespace**: The top-level security domain.
- **Category**: The security control family or area.
- **Classifier**: The specific security concern (optional).
A partial path may be defined (e.g., `TTPs` or `TTPs/Defense Evasion` are valid).
### Selection Guidelines
1. **Be Specific**: Use the most specific classifier that accurately describes the check.
2. **Standard Compliance**: Consider if the check relates to specific compliance standards.
3. **Multiple Types**: You can specify multiple types if the check addresses multiple concerns.
## Description Guidelines
### Writing Guidelines
1. **Focus on the Finding**: All fields should address how the finding affects the security posture, rather than the control itself.
2. **Use Natural Language**: Write in simple, clear paragraphs with complete, grammatically correct sentences.
3. **Use Markdown Formatting**: Enhance readability with:
- Use **bold** for emphasis on key security concepts.
- Use *italic* for a secondary emphasis. Use it for clarifications, conditions, or optional notes. But don't abuse it.
- Use `code` formatting for specific configuration values, or technical details. Don't use it for service names or common technical terms.
- Use one or two line breaks (`\n` or `\n\n`) to separate distinct ideas.
- Use bullet points (`-`) for listing multiple concepts or actions.
- Use numbers for listing steps or sequential actions.
4. **Be Concise**: Maximum 400 characters (spaces count). Every word should add value.
5. **Explain What the Finding Means**: Focus on what the security control evaluates and what it means when it passes or fails, but without explicitly stating the pass or fail state.
6. **Be Technical but Clear**: Use appropriate technical terminology while remaining understandable.
7. **Avoid Risk Descriptions**: Do not describe potential risks, threats, or consequences.
8. **CheckTitle and Description can be the same**: If the check is very simple and the title is already clear, you can use the same text for the description.
### Common Mistakes to Avoid
- **Technical Implementation Details**: "The control loops through all instances and calls the describe_instances API...".
- **Vague Descriptions**: "This control verifies proper configuration of resources". What does it mean? "proper configuration" is not a clear description of the compliant state.
- **Risk Descriptions**: "This could lead to data breaches" or "This poses a security threat".
- **Starting with Verbs**: "Check if...", "Verify...", "Ensure...". Always start with the affected resource instead.
- **References to Pass/Fail States**: Avoid using words like "pass" or "fail".
## Risk Guidelines
### Writing Guidelines
1. **Explain the Cybersecurity Impact**: Focus on how the finding affects confidentiality, integrity, or availability (CIA triad). If the CIA triad does not apply, explain the risk in terms of the organization's business objectives.
2. **Be Specific About Threats**: Clearly state what could happen if this security control is not in place. What attacks or incidents become possible?
3. **Focus on Risk Context**: Explain the specific security implications of the finding, not just generic security risks.
4. **Use Markdown Formatting**: Enhance readability with markdown formatting:
- Use **bold** for emphasis on key security concepts.
- Use *italic* for a secondary emphasis. Use it for clarifications, conditions, or optional notes. But don't abuse it.
- Use `code` formatting for specific configuration values, or technical details. Don't use it for service names or common technical terms.
- Use one or two line breaks (`\n` or `\n\n`) to separate distinct ideas.
- Use bullet points (`-`) for listing multiple concepts or actions.
- Use numbers for listing steps or sequential actions.
5. **Be Concise**: Maximum 400 characters. Make every word count.
### Common Mistakes to Avoid
- **Generic Risks**: "This could lead to security issues" or "Regulatory compliance violations".
- **Technical Implementation Focus**: "The API call might fail and return incorrect results...".
- **Overly Broad Statements**: "This is a serious security risk that could impact everything".
- **Vague Threats**: "This could be exploited by threat actors" without explaining how.
## Recommendation Guidelines
### Writing Guidelines
1. **Provide Actionable Best Practice Guidance**: Explain what should be done to maintain security posture. Focus on preventive measures and proactive security practices.
2. **Be Principle-Based**: Reference established security principles (least privilege, defense in depth, zero trust, separation of duties) where applicable.
3. **Focus on Prevention**: Explain best practices that prevent the security issue from occurring, not just detection or remediation.
4. **Use Markdown Formatting**: Enhance readability with markdown formatting:
- Use **bold** for emphasis on key security concepts.
- Use *italic* for a secondary emphasis. Use it for clarifications, conditions, or optional notes. But don't abuse it.
- Use `code` formatting for specific configuration values, or technical details. Don't use it for service names or common technical terms.
- Use one or two line breaks (`\n` or `\n\n`) to separate distinct ideas.
- Use bullet points (`-`) for listing multiple concepts or actions.
- Use numbers for listing steps or sequential actions.
5. **Be Concise**: Maximum 400 characters.
### Common Mistakes to Avoid
- **Specific Remediation Steps**: "1. Go to the console\n2. Click on settings..." - Focus on principles, not click-by-click instructions.
- **Implementation Details**: "Configure the JSON policy with the following IAM actions..." - Explain what to achieve, not how.
- **Vague Guidance**: "Follow security best practices..." without explaining what those practices are.
- **Resource-Specific Recommendations**: "Enable MFA on user john.doe@example.com" - Keep it general.
- **Missing Context**: Not explaining why the best practice is important for security.
### Good Examples
- *"Avoid exposing sensitive resources directly to the Internet; configure access controls to limit exposure."*
- *"Apply the principle of least privilege when assigning permissions to users and services."*
- *"Regularly review and update your security configurations to align with current best practices."*
## Remediation Code Guidelines
### Critical Requirement
The **fundamental principle** is to focus on the **specific change** that converts the finding from non-compliant to compliant.
Also is important to keep all code examples as short as possible, including the essential code to fix the issue. Remove any extra configuration, optional parameters, or nice-to-have settings and add comments to explain the code when possible.
### Common Guidelines for All Code Fields
1. **Be Minimal**: Keep code blocks as short as possible - only include what is absolutely necessary.
2. **Focus on the Fix**: Remove any extra configuration, optional parameters, or nice-to-have settings.
3. **Be Accurate**: Ensure all commands and code are syntactically correct.
4. **Use Markdown Formatting**: Format code properly using code blocks and appropriate syntax highlighting.
5. **Follow Best Practices**: Use the most secure and recommended approaches for each platform.
### CLI Guidelines
- Only provide a single command that directly changes the finding from fail to pass.
- The command must be executable as-is and resolve the security issue completely.
- Use proper command syntax for the provider (AWS CLI, Azure CLI, gcloud, kubectl, etc.).
- Do not use markdown formatting or code blocks - just the raw command.
- Do not include multiple commands, comments, or explanations.
- If the issue cannot be resolved with a single command, leave this field empty.
### Native IaC Guidelines
- **Keep It Minimal**: Only include the specific resource/configuration that fixes the security issue.
- Format as markdown code blocks with proper syntax highlighting.
- Include only the required properties to fix the issue.
- Add comments indicating the critical line(s) that remediate the check.
- Use `example_resource` as the generic name for all resources and IDs.
### Terraform Guidelines
- **Keep It Minimal**: Only include the specific resource/configuration that fixes the security issue.
- Provide valid HCL (HashiCorp Configuration Language) code with an example of a compliant configuration.
- Use the latest Terraform syntax and provider versions.
- Include only the required arguments to fix the issue - skip optional parameters.
- Format as markdown code blocks with `hcl` syntax highlighting.
- Add comments indicating the critical line(s) that remediate the check.
- Use `example_resource` as the generic name for all resources and IDs.
- Skip provider requirements unless critical for the fix.
### Other (Manual Steps) Guidelines
- **Keep It Minimal**: Only include the exact steps needed to fix the security issue.
- Provide step-by-step instructions for manual remediation through web interfaces.
- Use numbered lists for sequential steps.
- Be specific about menu locations, button names, and settings.
- Skip optional configurations or nice-to-have settings.
- Format using markdown for better readability.
## Categories Guidelines
### Selection Guidelines
1. **Be Specific**: Only select categories that directly relate to what the automated control evaluates.
2. **Primary Focus**: Consider the primary security concern the automated control addresses.
3. **Avoid Over-Categorization**: Do not select categories just because they are tangentially related.
### Available Categories
| Category | Definition |
|-------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| encryption | Ensures data is encrypted in transit and/or at rest, including key management practices |
| internet-exposed | Checks that limit or flag public access to services, APIs, or assets from the Internet |
| logging | Ensures appropriate logging of events, activities, and system interactions for traceability |
| secrets | Manages and protects credentials, API keys, tokens, and other sensitive information |
| resilience | Ensures systems can maintain availability and recover from disruptions, failures, or degradation. Includes redundancy, fault-tolerance, auto-scaling, backup, disaster recovery, and failover strategies |
| threat-detection | Identifies suspicious activity or behaviors using IDS, malware scanning, or anomaly detection |
| trust-boundaries | Enforces isolation or segmentation between different trust levels (e.g., VPCs, tenants, network zones) |
| vulnerabilities | Detects or remediates known software, infrastructure, or config vulnerabilities (e.g., CVEs) |
| cluster-security | Secures Kubernetes cluster components such as API server, etcd, and role-based access |
| container-security | Ensures container images and runtimes follow security best practices |
| node-security | Secures nodes running containers or services |
| gen-ai | Checks related to safe and secure use of generative AI services or models |
| ci-cd | Ensures secure configurations in CI/CD pipelines |
| identity-access | Governs user and service identities, including least privilege, MFA, and permission boundaries |
| email-security | Ensures detection and protection against phishing, spam, spoofing, etc. |
| forensics-ready | Ensures systems are instrumented to support post-incident investigations. Any digital trace or evidence (logs, volume snapshots, memory dumps, network captures, etc.) preserved immutably and accompanied by integrity guarantees, which can be used in a forensic analysis |
| software-supply-chain | Detects or prevents tampering, unauthorized packages, or third-party risks in software supply chain |
| e3 | M365-specific controls enabled by or dependent on an E3 license (e.g., baseline security policies, conditional access) |
| e5 | M365-specific controls enabled by or dependent on an E5 license (e.g., advanced threat protection, audit, DLP, and eDiscovery) |

View File

@@ -40,7 +40,7 @@ Each check in Prowler follows a straightforward structure. Within the newly crea
- `__init__.py` (empty file) Ensures Python treats the check folder as a package.
- `<check_name>.py` (code file) Contains the check logic, following the prescribed format. Please refer to the [prowler's check code structure](./checks.md#prowlers-check-code-structure) for more information.
- `<check_name>.metadata.json` (metadata file) Defines the check's metadata for contextual information. Please refer to the [check metadata](./checks.md#) for more information.
- `<check_name>.metadata.json` (metadata file) Defines the check's metadata for contextual information. Please refer to the [check metadata](./checks.md#metadata-structure-for-prowler-checks) for more information.
## Prowler's Check Code Structure
@@ -226,68 +226,148 @@ Below is a generic example of a check metadata file. **Do not include comments i
```json
{
"Provider": "aws",
"CheckID": "example_check_id",
"CheckTitle": "Example Check Title",
"CheckType": ["Infrastructure Security"],
"ServiceName": "ec2",
"SubServiceName": "ami",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"Severity": "critical",
"CheckID": "service_resource_security_setting",
"CheckTitle": "Service resource has security setting enabled",
"CheckType": [],
"ServiceName": "service",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "Other",
"Description": "Example description of the check.",
"Risk": "Example risk if the check fails.",
"RelatedUrl": "https://example.com",
"Description": "This check verifies that the service resource has the required **security setting** enabled to protect against potential vulnerabilities.\n\nIt ensures that the resource follows security best practices and maintains proper access controls. The check evaluates whether the security configuration is properly implemented and active.",
"Risk": "Without proper security settings, the resource may be vulnerable to:\n\n- **Unauthorized access** - Malicious actors could gain entry\n- **Data breaches** - Sensitive information could be compromised\n- **Security threats** - Various attack vectors could be exploited\n\nThis could result in compliance violations and potential financial or reputational damage.",
"RelatedUrl": "",
"AdditionalURLs": ["https://example.com/security-documentation", "https://example.com/best-practices"],
"Remediation": {
"Code": {
"CLI": "example CLI command",
"NativeIaC": "",
"Other": "",
"Terraform": ""
"CLI": "provider-cli service enable-security-setting --resource-id resource-123",
"NativeIaC": "```yaml\nType: Provider::Service::Resource\nProperties:\n SecuritySetting: enabled\n ResourceId: resource-123\n```",
"Other": "1. Open the provider management console\n2. Navigate to the service section\n3. Select the resource\n4. Enable the security setting\n5. Save the configuration",
"Terraform": "```hcl\nresource \"provider_service_resource\" \"example\" {\n resource_id = \"resource-123\"\n security_setting = true\n}\n```"
},
"Recommendation": {
"Text": "Example recommendation text.",
"Url": "https://example.com/remediation"
"Text": "Enable security settings on all service resources to ensure proper protection. Regularly review and update security configurations to align with current best practices.",
"Url": "https://hub.prowler.com/check/service_resource_security_setting"
}
},
"Categories": ["example-category"],
"Categories": ["internet-exposed", "secrets"],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
"RelatedTo": ["service_resource_security_setting", "service_resource_security_setting_2"],
"Notes": "This is a generic example check that should be customized for specific provider and service requirements."
}
```
### Metadata Fields and Their Purpose
- **Provider** — The Prowler provider related to the check. The name **must** be lowercase and match the provider folder name. For supported providers refer to [Prowler Hub](https://hub.prowler.com/check) or directly to [Prowler Code](https://github.com/prowler-cloud/prowler/tree/master/prowler/providers).
- **CheckID** — The unique identifier for the check inside the provider, this field **must** match the check's folder and python file and json metadata file name. For more information about the naming refer to the [Naming Format for Checks](#naming-format-for-checks) section.
- **CheckTitle** — A concise, descriptive title for the check.
- **CheckType** — *For now this field is only standardized for the AWS provider*.
- For AWS this field must follow the [AWS Security Hub Types](https://docs.aws.amazon.com/securityhub/latest/userguide/asff-required-attributes.html#Types) format. So the common pattern to follow is `namespace/category/classifier`, refer to the attached documentation for the valid values for this fields.
- **ServiceName** — The name of the provider service being audited. This field **must** be in lowercase and match with the service folder name. For supported services refer to [Prowler Hub](https://hub.prowler.com/check) or directly to [Prowler Code](https://github.com/prowler-cloud/prowler/tree/master/prowler/providers).
- **SubServiceName** — The subservice or resource within the service, if applicable. For more information refer to the [Naming Format for Checks](#naming-format-for-checks) section.
- **ResourceIdTemplate** — A template for the unique resource identifier. For more information refer to the [Resource Identification in Prowler](#resource-identification-in-prowler) section.
- **Severity** — The severity of the finding if the check fails. Must be one of: `critical`, `high`, `medium`, `low`, or `informational`, this field **must** be in lowercase. To get more information about the severity levels refer to the [Prowler's Check Severity Levels](#prowlers-check-severity-levels) section.
- **ResourceType** — The type of resource being audited. *For now this field is only standardized for the AWS provider*.
- For AWS use the [Security Hub resource types](https://docs.aws.amazon.com/securityhub/latest/userguide/asff-resources.html) or, if not available, the PascalCase version of the [CloudFormation type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) (e.g., `AwsEc2Instance`). Use "Other" if no match exists.
- **Description** — A short description of what the check does.
- **Risk** — The risk or impact if the check fails, explaining why the finding matters.
- **RelatedUrl** — A URL to official documentation or further reading about the check's purpose. If no official documentation is available, use the risk and recommendation text from trusted third-party sources.
- **Remediation** — Guidance for fixing a failed check, including:
- **Code** — Remediation commands or code snippets for CLI, Terraform, native IaC, or other tools like the Web Console.
- **Recommendation** — A textual human readable recommendation. Here it is not necessary to include actual steps, but rather a general recommendation about what to do to fix the check.
- **Categories** — One or more categories for grouping checks in execution (e.g., `internet-exposed`). For the current list of categories, refer to the [Prowler Hub](https://hub.prowler.com/check).
- **DependsOn** — Currently not used.
- **RelatedTo** — Currently not used.
- **Notes** — Any additional information not covered by other fields.
#### Provider
### Remediation Code Guidelines
The Prowler provider related to the check. The name **must** be lowercase and match the provider folder name. For supported providers refer to [Prowler Hub](https://hub.prowler.com/check) or directly to [Prowler Code](https://github.com/prowler-cloud/prowler/tree/master/prowler/providers).
When providing remediation steps, reference the following sources:
#### CheckID
- Official provider documentation.
- [Prowler Checks Remediation Index](https://docs.prowler.com/checks/checks-index)
- [TrendMicro Cloud One Conformity](https://www.trendmicro.com/cloudoneconformity)
- [CloudMatos Remediation Repository](https://github.com/cloudmatos/matos/tree/master/remediations)
The unique identifier for the check inside the provider. This field **must** match the check's folder, Python file, and JSON metadata file name. For more information about naming, refer to the [Naming Format for Checks](#naming-format-for-checks) section.
#### CheckTitle
The `CheckTitle` field must be plain text, clearly and succinctly define **the best practice being evaluated and which resource(s) each finding applies to**. The title should be specific, concise (no more than 150 characters), and reference the relevant resource(s) involved.
**Always write the `CheckTitle` to describe the *PASS* case**, the desired secure or compliant state of the resource(s). This helps ensure that findings are easy to interpret and that the title always reflects the best practice being met.
For detailed guidelines on writing effective check titles, including how to determine singular vs. plural scope and common mistakes to avoid, see [CheckTitle Guidelines](./check-metadata-guidelines.md#checktitle-guidelines).
#### CheckType
???+ warning
This field is only applicable to the AWS provider.
It follows the [AWS Security Hub Types](https://docs.aws.amazon.com/securityhub/latest/userguide/asff-required-attributes.html#Types) format using the pattern `namespace/category/classifier`.
For the complete AWS Security Hub selection guidelines, see [CheckType Guidelines](./check-metadata-guidelines.md#checktype-guidelines-aws-only).
#### ServiceName
The name of the provider service being audited. Must be lowercase and match the service folder name. For supported services refer to [Prowler Hub](https://hub.prowler.com/check) or the [Prowler Code](https://github.com/prowler-cloud/prowler/tree/master/prowler/providers).
#### SubServiceName
This field is in the process of being deprecated and should be **left empty**.
#### ResourceIdTemplate
This field is in the process of being deprecated and should be **left empty**.
#### Severity
Severity level if the check fails. Must be one of: `critical`, `high`, `medium`, `low`, or `informational`, and written in lowercase. See [Prowler's Check Severity Levels](#prowlers-check-severity-levels) for details.
#### ResourceType
The type of resource being audited. This field helps categorize and organize findings by resource type for better analysis and reporting. For each provider:
- **AWS**: Use [Security Hub resource types](https://docs.aws.amazon.com/securityhub/latest/userguide/asff-resources.html) or PascalCase CloudFormation types removing the `::` separator used in CloudFormation templates (e.g., in CloudFormation template the type of an EC2 instance is `AWS::EC2::Instance` but in the check it should be `AwsEc2Instance`). Use `Other` if none apply.
- **Azure**: Use types from [Azure Resource Graph](https://learn.microsoft.com/en-us/azure/governance/resource-graph/reference/supported-tables-resources), for example: `Microsoft.Storage/storageAccounts`.
- **Google Cloud**: Use [Cloud Asset Inventory asset types](https://cloud.google.com/asset-inventory/docs/asset-types), for example: `compute.googleapis.com/Instance`.
- **Kubernetes**: Use types shown under `KIND` from `kubectl api-resources`.
- **M365 / GitHub**: Leave empty due to lack of standardized types.
#### Description
A concise, natural language explanation that **clearly describes what the finding means**, focusing on clarity and context rather than technical implementation details. Use simple paragraphs with line breaks if needed, but avoid sections, code blocks, or complex formatting. This field is limited to maximum 400 characters.
For detailed writing guidelines and common mistakes to avoid, see [Description Guidelines](./check-metadata-guidelines.md#description-guidelines).
#### Risk
A clear, natural language explanation of **why this finding poses a cybersecurity risk**. Focus on how it may impact confidentiality, integrity, or availability. If those do not apply, describe any relevant operational or financial risks. Use simple paragraphs with line breaks if needed, but avoid sections, code blocks, or complex formatting. Limit your explanation to 400 characters.
For detailed writing guidelines and common mistakes to avoid, see [Risk Guidelines](./check-metadata-guidelines.md#risk-guidelines).
#### RelatedUrl
*Deprecated*. Use `AdditionalURLs` for adding your URLs references.
#### AdditionalURLs
???+ warning
URLs must be valid and not repeated.
A list of official documentation URLs for further reading. These should be authoritative sources that provide additional context, best practices, or detailed information about the security control being checked. Prefer official provider documentation, security standards, or well-established security resources. Avoid third-party blogs or unofficial sources unless they are highly reputable and directly relevant.
#### Remediation
Provides both code examples and best practice recommendations for addressing the security issue.
- **Code**: Contains remediation examples in different formats:
- **CLI**: Command-line interface commands to make the finding compliant in runtime.
- **NativeIaC**: Native Infrastructure as Code templates with an example of a compliant configuration. For now it applies to:
- **AWS**: CloudFormation YAML formatted code (do not use JSON format).
- **Azure**: Bicep formatted code (do not use ARM templates).
- **Terraform**: HashiCorp Configuration Language (HCL) code with an example of a compliant configuration.
- **Other**: Manual steps through web interfaces or other tools to make the finding compliant.
For detailed guidelines on writing remediation code, see [Remediation Code Guidelines](./check-metadata-guidelines.md#remediation-code-guidelines).
- **Recommendation**
- **Text**: Generic best practice guidance in natural language using Markdown format (maximum 400 characters). For writing guidelines, see [Recommendation Guidelines](./check-metadata-guidelines.md#recommendation-guidelines).
- **Url**: [Prowler Hub URL](https://hub.prowler.com/) of the check. This URL is always composed by `https://hub.prowler.com/check/<check_id>`.
#### Categories
One or more functional groupings used for execution filtering (e.g., `internet-exposed`). You can define new categories just by adding to this field.
For the complete list of available categories, see [Categories Guidelines](./check-metadata-guidelines.md#categories-guidelines).
#### DependsOn
List of check IDs of checks that if are compliant, this check will be a compliant too or it is not going to give any finding.
#### RelatedTo
List of check IDs of checks that are conceptually related, even if they do not share a technical dependency.
#### Notes
Any additional information not covered in the above fields.
### Python Model Reference

View File

@@ -101,6 +101,7 @@ Prowler supports multiple output formats, allowing users to tailor findings pres
finding_dict["DESCRIPTION"] = finding.metadata.Description
finding_dict["RISK"] = finding.metadata.Risk
finding_dict["RELATED_URL"] = finding.metadata.RelatedUrl
finding_dict["ADDITIONAL_URLS"] = unroll_list(finding.metadata.AdditionalURLs)
finding_dict["REMEDIATION_RECOMMENDATION_TEXT"] = (
finding.metadata.Remediation.Recommendation.Text
)

View File

@@ -2,7 +2,11 @@
Prowler requires AWS credentials to function properly. Authentication is available through the following methods:
- Static Credentials
- Assumed Role
## Required Permissions
To ensure full functionality, attach the following AWS managed policies to the designated user or role:
- `arn:aws:iam::aws:policy/SecurityAudit`
@@ -13,37 +17,114 @@ To ensure full functionality, attach the following AWS managed policies to the d
For certain checks, additional read-only permissions are required. Attach the following custom policy to your role: [prowler-additions-policy.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-additions-policy.json)
## Configure AWS Credentials
## Assume Role (Recommended)
Use one of the following methods to authenticate:
This method grants permanent access and is the recommended setup for production environments.
```console
aws configure
```
=== "CloudFormation"
or
1. Download the [Prowler Scan Role Template](https://raw.githubusercontent.com/prowler-cloud/prowler/refs/heads/master/permissions/templates/cloudformation/prowler-scan-role.yml)
```console
export AWS_ACCESS_KEY_ID="ASXXXXXXX"
export AWS_SECRET_ACCESS_KEY="XXXXXXXXX"
export AWS_SESSION_TOKEN="XXXXXXXXX"
```
![Prowler Scan Role Template](./img/prowler-scan-role-template.png)
These credentials must be associated with a user or role with the necessary permissions to perform security checks.
![Download Role Template](./img/download-role-template.png)
2. Open the [AWS Console](https://console.aws.amazon.com), search for **CloudFormation**
![CloudFormation Search](./img/cloudformation-nav.png)
## AWS Profiles
3. Go to **Stacks** and click "Create stack" > "With new resources (standard)"
Specify a custom AWS profile using the following command:
![Create Stack](./img/create-stack.png)
```console
prowler aws -p/--profile <profile_name>
```
4. In **Specify Template**, choose "Upload a template file" and select the downloaded file
## Multi-Factor Authentication (MFA)
![Upload a template file](./img/upload-template-file.png)
![Upload file from downloads](./img/upload-template-from-downloads.png)
For IAM entities requiring Multi-Factor Authentication (MFA), use the `--mfa` flag. Prowler prompts for the following values to initiate a new session:
5. Click "Next", provide a stack name and the **External ID** shown in the Prowler Cloud setup screen
- **ARN of your MFA device**
- **TOTP (Time-Based One-Time Password)**
![External ID](./img/prowler-cloud-external-id.png)
![Stack Data](./img/fill-stack-data.png)
!!! info
An **External ID** is required when assuming the *ProwlerScan* role to comply with AWS [confused deputy prevention](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html).
6. Acknowledge the IAM resource creation warning and proceed
![Stack Creation Second Step](./img/stack-creation-second-step.png)
7. Click "Submit" to deploy the stack
![Click on submit](./img/submit-third-page.png)
=== "Terraform"
To provision the scan role using Terraform:
1. Run the following commands:
```bash
terraform init
terraform plan
terraform apply
```
2. During `plan` and `apply`, provide the **External ID** when prompted, which is available in the Prowler Cloud or Prowler App UI:
![Get External ID](./img/get-external-id-prowler-cloud.png)
> 💡 Note: Terraform will use the AWS credentials of the default profile.
---
## Credentials
=== "Long term credentials"
1. Go to the [AWS Console](https://console.aws.amazon.com), open **CloudShell**
![AWS CloudShell](./img/aws-cloudshell.png)
2. Run:
```bash
aws iam create-access-key
```
3. Copy the output containing:
- `AccessKeyId`
- `SecretAccessKey`
![CloudShell Output](./img/cloudshell-output.png)
=== "Short term credentials (Recommended)"
Use the [AWS Access Portal](https://docs.aws.amazon.com/singlesignon/latest/userguide/howtogetcredentials.html) or the CLI:
1. Retrieve short-term credentials for the IAM identity using this command:
```bash
aws sts get-session-token --duration-seconds 900
```
???+ note
Check the aws documentation [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/sts_example_sts_GetSessionToken_section.html)
2. Copy the output containing:
- `AccessKeyId`
- `SecretAccessKey`
- `SessionToken`
> Sample output:
```json
{
"Credentials": {
"AccessKeyId": "ASIAIOSFODNN7EXAMPLE",
"SecretAccessKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY",
"SessionToken": "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE",
"Expiration": "2020-05-19T18:06:10+00:00"
}
}
```

View File

@@ -1,39 +1,31 @@
# Getting Started with AWS on Prowler Cloud/App
# Getting Started With AWS on Prowler
## Prowler App
<iframe width="560" height="380" src="https://www.youtube-nocookie.com/embed/RPgIWOCERzY" title="Prowler Cloud Onboarding AWS" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen="1"></iframe>
Set up your AWS account to enable security scanning using Prowler Cloud/App.
> Walkthrough video onboarding an AWS Account using Assumed Role.
## Requirements
To configure your AWS account, youll need:
1. Access to Prowler Cloud/App
2. Properly configured AWS credentials (either static or via an assumed IAM role)
---
## Step 1: Get Your AWS Account ID
### Step 1: Get Your AWS Account ID
1. Log in to the [AWS Console](https://console.aws.amazon.com)
2. Locate your AWS account ID in the top-right dropdown menu
![Account ID detail](./img/aws-account-id.png)
---
## Step 2: Access Prowler Cloud/App
### Step 2: Access Prowler Cloud or Prowler App
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](../prowler-app.md)
2. Go to `Configuration` > `Cloud Providers`
2. Go to "Configuration" > "Cloud Providers"
![Cloud Providers Page](../img/cloud-providers-page.png)
3. Click `Add Cloud Provider`
3. Click "Add Cloud Provider"
![Add a Cloud Provider](../img/add-cloud-provider.png)
4. Select `Amazon Web Services`
4. Select "Amazon Web Services"
![Select AWS Provider](./img/select-aws.png)
@@ -41,96 +33,39 @@ To configure your AWS account, youll need:
![Add account ID](./img/add-account-id.png)
6. Choose your preferred authentication method (next step)
6. Choose the preferred authentication method (next step)
![Select auth method](./img/select-auth-method.png)
---
## Step 3: Set Up AWS Authentication
### Step 3: Set Up AWS Authentication
Before proceeding, choose your preferred authentication mode:
Before proceeding, choose the preferred authentication mode:
Credentials
**Credentials**
* Quick scan as current user
* No extra setup
* Credentials time out
* Quick scan as current user
* No extra setup
* Credentials time out
Assumed Role
**Assumed Role**
* Preferred Setup
* Permanent Credentials
* Requires access to create role
* Preferred Setup ✅
* Permanent Credentials ✅
* Requires access to create role ❌
---
### 🔐 Assume Role (Recommended)
![Assume Role Overview](./img/assume-role-overview.png)
#### Assume Role (Recommended)
This method grants permanent access and is the recommended setup for production environments.
=== "CloudFormation"
![Assume Role Overview](img/assume-role-overview.png)
1. Download the [Prowler Scan Role Template](https://raw.githubusercontent.com/prowler-cloud/prowler/refs/heads/master/permissions/templates/cloudformation/prowler-scan-role.yml)
For detailed instructions on how to create the role, see [Authentication > Assume Role](./authentication.md#assume-role-recommended).
![Prowler Scan Role Template](./img/prowler-scan-role-template.png)
![Download Role Template](./img/download-role-template.png)
2. Open the [AWS Console](https://console.aws.amazon.com), search for **CloudFormation**
![CloudFormation Search](./img/cloudformation-nav.png)
3. Go to **Stacks** and click `Create stack` > `With new resources (standard)`
![Create Stack](./img/create-stack.png)
4. In **Specify Template**, choose `Upload a template file` and select the downloaded file
![Upload a template file](./img/upload-template-file.png)
![Upload file from downloads](./img/upload-template-from-downloads.png)
5. Click `Next`, provide a stack name and the **External ID** shown in the Prowler Cloud setup screen
![External ID](./img/prowler-cloud-external-id.png)
![Stack Data](./img/fill-stack-data.png)
!!! info
An **External ID** is required when assuming the *ProwlerScan* role to comply with AWS [confused deputy prevention](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html).
6. Acknowledge the IAM resource creation warning and proceed
![Stack Creation Second Step](./img/stack-creation-second-step.png)
7. Click `Submit` to deploy the stack
![Click on submit](./img/submit-third-page.png)
=== "Terraform"
To provision the scan role using Terraform:
1. Run the following commands:
```bash
terraform init
terraform plan
terraform apply
```
2. During `plan` and `apply`, you will be prompted for the **External ID**, which is available in the Prowler Cloud/App UI:
![Get External ID](./img/get-external-id-prowler-cloud.png)
> 💡 Note: Terraform will use the AWS credentials of your default profile.
---
### Finish Setup with Assume Role
8. Once the role is created, go to the **IAM Console**, click on the `ProwlerScan` role to open its details:
8. Once the role is created, go to the **IAM Console**, click on the "ProwlerScan" role to open its details:
![ProwlerScan role info](./img/prowler-scan-pre-info.png)
@@ -138,80 +73,69 @@ This method grants permanent access and is the recommended setup for production
![New Role Info](./img/get-role-arn.png)
10. Paste the ARN into the corresponding field in Prowler Cloud/App
10. Paste the ARN into the corresponding field in Prowler Cloud or Prowler App
![Input the Role ARN](./img/paste-role-arn-prowler.png)
11. Click `Next`, then `Launch Scan`
11. Click "Next", then "Launch Scan"
![Next button in Prowler Cloud](./img/next-button-prowler-cloud.png)
![Launch Scan](./img/launch-scan-button-prowler-cloud.png)
---
### 🔑 Credentials (Static Access Keys)
#### Credentials (Static Access Keys)
You can also configure your AWS account using static credentials (not recommended for long-term use):
AWS accounts can also be configured using static credentials (not recommended for long-term use):
![Connect via credentials](./img/connect-via-credentials.png)
=== "Long term credentials"
For detailed instructions on how to create the credentials, see [Authentication > Credentials](./authentication.md#credentials).
1. Go to the [AWS Console](https://console.aws.amazon.com), open **CloudShell**
1. Complete the form in Prowler Cloud or Prowler App and click "Next"
![AWS CloudShell](./img/aws-cloudshell.png)
![Filled credentials page](./img/prowler-cloud-credentials-next.png)
2. Run:
2. Click "Launch Scan"
```bash
aws iam create-access-key
```
![Launch Scan](./img/launch-scan-button-prowler-cloud.png)
3. Copy the output containing:
---
- `AccessKeyId`
- `SecretAccessKey`
## Prowler CLI
![CloudShell Output](./img/cloudshell-output.png)
### Configure AWS Credentials
> ⚠️ Save these credentials securely and paste them into the Prowler Cloud/App setup screen.
To authenticate with AWS, use one of the following methods:
=== "Short term credentials (Recommended)"
```console
aws configure
```
You can use your [AWS Access Portal](https://docs.aws.amazon.com/singlesignon/latest/userguide/howtogetcredentials.html) or the CLI:
or
1. Retrieve short-term credentials for the IAM identity using this command:
```console
export AWS_ACCESS_KEY_ID="ASXXXXXXX"
export AWS_SECRET_ACCESS_KEY="XXXXXXXXX"
export AWS_SESSION_TOKEN="XXXXXXXXX"
```
```bash
aws sts get-session-token --duration-seconds 900
```
These credentials must be associated with a user or role with the necessary permissions to perform security checks.
???+ note
Check the aws documentation [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/sts_example_sts_GetSessionToken_section.html)
More details on Assume Role settings from the CLI in [Assume Role](./role-assumption.md) page.
2. Copy the output containing:
- `AccessKeyId`
- `SecretAccessKey`
### AWS Profiles
> Sample output:
```json
{
"Credentials": {
"AccessKeyId": "ASIAIOSFODNN7EXAMPLE",
"SecretAccessKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY",
"SessionToken": "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE",
"Expiration": "2020-05-19T18:06:10+00:00"
}
}
```
To use a custom AWS profile, specify it with the following command:
> ⚠️ Save these credentials securely and paste them into the Prowler Cloud/App setup screen.
```console
prowler aws -p/--profile <profile_name>
```
Complete the form in Prowler Cloud/App and click `Next`
### Multi-Factor Authentication (MFA)
![Filled credentials page](./img/prowler-cloud-credentials-next.png)
For IAM entities requiring Multi-Factor Authentication (MFA), use the `--mfa` flag. Prowler prompts for the following values to initiate a new session:
Click `Launch Scan`
![Launch Scan](./img/launch-scan-button-prowler-cloud.png)
- **ARN of your MFA device**
- **TOTP (time-based one-time password)**

View File

@@ -1,4 +1,4 @@
# AWS Assume Role in Prowler
# AWS Assume Role in Prowler (CLI)
## Authentication Overview

Binary file not shown.

After

Width:  |  Height:  |  Size: 145 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 422 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 584 KiB

View File

@@ -42,4 +42,4 @@ prowler mongodbatlas
- Note the public key and private key
- Store credentials securely
For more details about MongoDB Atlas, see the [MongoDB Atlas Tutorial](../tutorials/mongodbatlas/getting-started-mongodbatlas.md).
For more details about MongoDB Atlas, see the [MongoDB Atlas Tutorial](./getting-started-mongodbatlas.md).

View File

@@ -0,0 +1,189 @@
# Jira Integration
Prowler App enables automatic export of security findings to Jira, providing seamless integration with Atlassian's work item tracking and project management platform. This comprehensive guide demonstrates how to configure and manage Jira integrations to streamline security incident management and enhance team collaboration across security workflows.
Integrating Prowler App with Jira provides:
* **Streamlined management:** Convert security findings directly into actionable Jira work items
* **Enhanced team collaboration:** Leverage existing project management workflows for security remediation
* **Automated ticket creation:** Reduce manual effort in tracking and assigning security work items
## How It Works
When enabled and configured:
1. Security findings can be manually sent to Jira from the Findings table.
2. Each finding creates a Jira work item with all the check's metadata, including guidance on how to remediate it.
## Configuration
To configure Jira integration in Prowler App:
1. Navigate to **Integrations** in the Prowler App interface
2. Locate the **Jira** card and click **Manage**, then select **Add integration**
![Integrations tab](./img/jira/integrations-tab.png)
3. Complete the integration settings:
* **Jira domain:** Enter the Jira domain (e.g., from `https://your-domain.atlassian.net` -> `your-domain`)
* **Email:** Your Jira account email
* **API Token:** API token with the following scopes: `read:jira-user`, `read:jira-work`, `write:jira-work`
![Connection settings](./img/jira/connection-settings.png)
!!! note "Generate Jira API Token"
To generate a Jira API token, visit: https://id.atlassian.com/manage-profile/security/api-tokens
Once configured successfully, the integration is ready to send findings to Jira.
## Sending Findings to Jira
### Manual Export
To manually send individual findings to Jira:
1. Navigate to the **Findings** section in Prowler App
2. Select one finding you want to export
3. Click the action button on the table row and select **Send to Jira**
4. Select the Jira integration and project
5. Click **Send to Jira**
![Send to Jira modal](./img/jira/send-to-jira-modal.png)
## Integration Status
Monitor and manage your Jira integrations through the management interface:
1. Review configured integrations in the integrations dashboard
2. Each integration displays:
- **Connection Status:** Connected or Disconnected indicator
- **Instance Information:** Jira domain and last checked timestamp
### Actions
Each Jira integration provides management actions through dedicated buttons:
| Button | Purpose | Available Actions | Notes |
|--------|---------|------------------|-------|
| **Test** | Verify integration connectivity | • Test Jira API access<br/>• Validate credentials<br/>• Check project permissions<br/>• Verify work item creation capability | Results displayed in notification message |
| **Credentials** | Update authentication settings | • Change API token<br/>• Update email<br/>• Update Jira domain | Click "Update Credentials" to save changes |
| **Enable/Disable** | Toggle integration status | • Enable or disable integration<br/>| Status change takes effect immediately |
| **Delete** | Remove integration permanently | • Permanently delete integration<br/>• Remove all configuration data | ⚠️ **Cannot be undone** - confirm before deleting |
## Troubleshooting
### Connection test fails
- Verify Jira instance domain is correct and accessible
- Confirm API token or credentials are valid
- Ensure API access is enabled in Jira settings and the needed scopes are granted
### Check task status (API)
If the Jira issue does not appear in your Jira project, follow these steps to verify the export task status via the API.
!!! note
Replace `http://localhost:8080` with the base URL where your Prowler API is accessible (for example, `https://api.yourdomain.com`).
1) Get an access token (replace email and password):
```
curl --location 'http://localhost:8080/api/v1/tokens' \
--header 'Content-Type: application/vnd.api+json' \
--header 'Accept: application/vnd.api+json' \
--data-raw '{
"data": {
"type": "tokens",
"attributes": {
"email": "YOUR_USER_EMAIL",
"password": "YOUR_USER_PASSWORD"
}
}
}'
```
2) List tasks filtered by the Jira task (`integration-jira`) using the access token:
```
curl --location --globoff 'http://localhost:8080/api/v1/tasks?filter[name]=integration-jira' \
--header 'Accept: application/vnd.api+json' \
--header 'Authorization: Bearer ACCESS_TOKEN' | jq
```
!!! note
If you dont have `jq` installed, run the command without `| jq`.
3) Share the output so we can help. A typical result will look like:
```
{
"links": {
"first": "https://api.dev.prowler.com/api/v1/tasks?page%5Bnumber%5D=1",
"last": "https://api.dev.prowler.com/api/v1/tasks?page%5Bnumber%5D=122",
"next": "https://api.dev.prowler.com/api/v1/tasks?page%5Bnumber%5D=2",
"prev": null
},
"data": [
{
"type": "tasks",
"id": "9a79ab21-39ae-4161-9f6e-2844eb0da0fb",
"attributes": {
"inserted_at": "2025-09-09T08:11:38.643620Z",
"completed_at": "2025-09-09T08:11:41.264285Z",
"name": "integration-jira",
"state": "completed",
"result": {
"created_count": 0,
"failed_count": 1
},
"task_args": {
"integration_id": "a476c2c0-0a00-4720-bfb9-286e9eb5c7bd",
"project_key": "PRWLR",
"issue_type": "Task",
"finding_ids": [
"01992d53-3af7-7759-be48-68fc405391e6"
]
},
"metadata": {}
}
},
{
"type": "tasks",
"id": "5f525135-9d37-4b01-9ac8-afeaf8793eac",
"attributes": {
"inserted_at": "2025-09-09T08:07:22.184164Z",
"completed_at": "2025-09-09T08:07:24.909185Z",
"name": "integration-jira",
"state": "completed",
"result": {
"created_count": 1,
"failed_count": 0
},
"task_args": {
"integration_id": "a476c2c0-0a00-4720-bfb9-286e9eb5c7bd",
"project_key": "JIRA",
"issue_type": "Task",
"finding_ids": [
"0198f018-8b7b-7154-a509-1a2b1ffba02d"
]
},
"metadata": {}
}
}
],
"meta": {
"pagination": {
"page": 1,
"pages": 122,
"count": 1214
},
"version": "v1"
}
}
```
How to read it:
- "created_count": number of Jira issues successfully created.
- "failed_count": number of Jira issues that could not be created. If `failed_count > 0` or the issue does not appear in Jira, please contact us so we can assist while detailed logs are not available through the UI.

View File

@@ -1,6 +1,6 @@
# Prowler App
**Prowler App** is a user-friendly interface for Prowler CLI, providing a visual dashboard to monitor your cloud security posture. This tutorial will guide you through setting up and using Prowler App.
**Prowler App** is a web application that simplifies running Prowler. This tutorial will guide you through setting up and using it.
## Accessing Prowler App and API Documentation

View File

@@ -106,6 +106,7 @@ The CSV format follows a standardized structure across all providers. The follow
- RELATED\_TO
- NOTES
- PROWLER\_VERSION
- ADDITIONAL\_URLS
#### CSV Headers Mapping
@@ -163,6 +164,7 @@ The JSON-OCSF output format implements the [Detection Finding](https://schema.oc
"depends_on": [],
"related_to": [],
"notes": "",
"additional_urls": [],
"compliance": {
"MITRE-ATTACK": [
"T1552"
@@ -398,6 +400,7 @@ The following is the mapping between the native JSON and the Detection Finding f
| Categories| unmapped.categories
| DependsOn| unmapped.depends\_on
| RelatedTo| unmapped.related\_to
| AdditionalURLs| unmapped.additional\_urls
| Notes| unmapped.notes
| Profile| _Not mapped yet_
| AccountId| cloud.account.uid

View File

@@ -1,5 +1,5 @@
AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION
<auth_method>;2025-02-14 14:27:03.913874;<account_uid>;;;;;;<finding_uid>;aws;accessanalyzer_enabled;Check if IAM Access Analyzer is enabled;IAM;FAIL;IAM Access Analyzer in account <account_uid> is not enabled.;False;accessanalyzer;;low;Other;<resource_uid>;<resource_name>;;;aws;<region>;Check if IAM Access Analyzer is enabled;AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.;https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html;Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost).;https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html;;;aws accessanalyzer create-analyzer --analyzer-name <NAME> --type <ACCOUNT|ORGANIZATION>;;CIS-1.4: 1.20 | CIS-1.5: 1.20 | KISA-ISMS-P-2023: 2.5.6, 2.6.4, 2.8.1, 2.8.2 | CIS-2.0: 1.20 | KISA-ISMS-P-2023-korean: 2.5.6, 2.6.4, 2.8.1, 2.8.2 | AWS-Account-Security-Onboarding: Enabled security services, Create analyzers in each active regions, Verify that events are present in SecurityHub aggregated view | CIS-3.0: 1.20;;;;;<prowler_version>
<auth_method>;2025-02-14 14:27:03.913874;<account_uid>;;;;;;<finding_uid>;aws;account_maintain_current_contact_details;Maintain current contact details.;IAM;MANUAL;Login to the AWS Console. Choose your account name on the top right of the window -> My Account -> Contact Information.;False;account;;medium;Other;<resource_uid>;<account_uid>;;;aws;<region>;Maintain current contact details.;Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy. If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question.;;Using the Billing and Cost Management console complete contact details.;https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html;;;No command available.;https://docs.prowler.com/checks/aws/iam-policies/iam_18-maintain-contact-details#aws-console;CIS-1.4: 1.1 | CIS-1.5: 1.1 | KISA-ISMS-P-2023: 2.1.3 | CIS-2.0: 1.1 | KISA-ISMS-P-2023-korean: 2.1.3 | AWS-Well-Architected-Framework-Security-Pillar: SEC03-BP03, SEC10-BP01 | AWS-Account-Security-Onboarding: Billing, emergency, security contacts | CIS-3.0: 1.1 | ENS-RD2022: op.ext.7.aws.am.1;;;;;<prowler_version>
<auth_method>;2025-02-14 14:27:03.913874;<account_uid>;;;;;;<finding_uid>;aws;account_maintain_different_contact_details_to_security_billing_and_operations;Maintain different contact details to security, billing and operations.;IAM;FAIL;SECURITY, BILLING and OPERATIONS contacts not found or they are not different between each other and between ROOT contact.;False;account;;medium;Other;<resource_uid>;<account_uid>;;;aws;<region>;Maintain different contact details to security, billing and operations.;Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy. If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question.;https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html;Using the Billing and Cost Management console complete contact details.;https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html;;;;https://docs.prowler.com/checks/aws/iam-policies/iam_18-maintain-contact-details#aws-console;KISA-ISMS-P-2023: 2.1.3 | KISA-ISMS-P-2023-korean: 2.1.3;;;;;<prowler_version>
<auth_method>;2025-02-14 14:27:03.913874;<account_uid>;;;;;;<finding_uid>;aws;account_security_contact_information_is_registered;Ensure security contact information is registered.;IAM;MANUAL;Login to the AWS Console. Choose your account name on the top right of the window -> My Account -> Alternate Contacts -> Security Section.;False;account;;medium;Other;<resource_uid>:root;<account_uid>;;;aws;<region>;Ensure security contact information is registered.;AWS provides customers with the option of specifying the contact information for accounts security team. It is recommended that this information be provided. Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them.;;Go to the My Account section and complete alternate contacts.;https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html;;;No command available.;https://docs.prowler.com/checks/aws/iam-policies/iam_19#aws-console;CIS-1.4: 1.2 | CIS-1.5: 1.2 | AWS-Foundational-Security-Best-Practices: account, acm | KISA-ISMS-P-2023: 2.1.3, 2.2.1 | CIS-2.0: 1.2 | KISA-ISMS-P-2023-korean: 2.1.3, 2.2.1 | AWS-Well-Architected-Framework-Security-Pillar: SEC03-BP03, SEC10-BP01 | AWS-Account-Security-Onboarding: Billing, emergency, security contacts | CIS-3.0: 1.2 | ENS-RD2022: op.ext.7.aws.am.1;;;;;<prowler_version>
AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION;ADDITIONAL_URLS
<auth_method>;2025-02-14 14:27:03.913874;<account_uid>;;;;;;<finding_uid>;aws;accessanalyzer_enabled;Check if IAM Access Analyzer is enabled;IAM;FAIL;IAM Access Analyzer in account <account_uid> is not enabled.;False;accessanalyzer;;low;Other;<resource_uid>;<resource_name>;;;aws;<region>;Check if IAM Access Analyzer is enabled;AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.;https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html;Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost).;https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html;;;aws accessanalyzer create-analyzer --analyzer-name <NAME> --type <ACCOUNT|ORGANIZATION>;;CIS-1.4: 1.20 | CIS-1.5: 1.20 | KISA-ISMS-P-2023: 2.5.6, 2.6.4, 2.8.1, 2.8.2 | CIS-2.0: 1.20 | KISA-ISMS-P-2023-korean: 2.5.6, 2.6.4, 2.8.1, 2.8.2 | AWS-Account-Security-Onboarding: Enabled security services, Create analyzers in each active regions, Verify that events are present in SecurityHub aggregated view | CIS-3.0: 1.20;;;;;<prowler_version>;https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html | https://aws.amazon.com/iam/features/analyze-access/
<auth_method>;2025-02-14 14:27:03.913874;<account_uid>;;;;;;<finding_uid>;aws;account_maintain_current_contact_details;Maintain current contact details.;IAM;MANUAL;Login to the AWS Console. Choose your account name on the top right of the window -> My Account -> Contact Information.;False;account;;medium;Other;<resource_uid>;<account_uid>;;;aws;<region>;Maintain current contact details.;Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy. If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question.;;Using the Billing and Cost Management console complete contact details.;https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html;;;No command available.;https://docs.prowler.com/checks/aws/iam-policies/iam_18-maintain-contact-details#aws-console;CIS-1.4: 1.1 | CIS-1.5: 1.1 | KISA-ISMS-P-2023: 2.1.3 | CIS-2.0: 1.1 | KISA-ISMS-P-2023-korean: 2.1.3 | AWS-Well-Architected-Framework-Security-Pillar: SEC03-BP03, SEC10-BP01 | AWS-Account-Security-Onboarding: Billing, emergency, security contacts | CIS-3.0: 1.1 | ENS-RD2022: op.ext.7.aws.am.1;;;;;<prowler_version>;https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html | https://aws.amazon.com/iam/features/analyze-access/
<auth_method>;2025-02-14 14:27:03.913874;<account_uid>;;;;;;<finding_uid>;aws;account_maintain_different_contact_details_to_security_billing_and_operations;Maintain different contact details to security, billing and operations.;IAM;FAIL;SECURITY, BILLING and OPERATIONS contacts not found or they are not different between each other and between ROOT contact.;False;account;;medium;Other;<resource_uid>;<account_uid>;;;aws;<region>;Maintain different contact details to security, billing and operations.;Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy. If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question.;https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html;Using the Billing and Cost Management console complete contact details.;https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html;;;;https://docs.prowler.com/checks/aws/iam-policies/iam_18-maintain-contact-details#aws-console;KISA-ISMS-P-2023: 2.1.3 | KISA-ISMS-P-2023-korean: 2.1.3;;;;;<prowler_version>;https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html | https://aws.amazon.com/iam/features/analyze-access/
<auth_method>;2025-02-14 14:27:03.913874;<account_uid>;;;;;;<finding_uid>;aws;account_security_contact_information_is_registered;Ensure security contact information is registered.;IAM;MANUAL;Login to the AWS Console. Choose your account name on the top right of the window -> My Account -> Alternate Contacts -> Security Section.;False;account;;medium;Other;<resource_uid>:root;<account_uid>;;;aws;<region>;Ensure security contact information is registered.;AWS provides customers with the option of specifying the contact information for accounts security team. It is recommended that this information be provided. Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them.;;Go to the My Account section and complete alternate contacts.;https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html;;;No command available.;https://docs.prowler.com/checks/aws/iam-policies/iam_19#aws-console;CIS-1.4: 1.2 | CIS-1.5: 1.2 | AWS-Foundational-Security-Best-Practices: account, acm | KISA-ISMS-P-2023: 2.1.3, 2.2.1 | CIS-2.0: 1.2 | KISA-ISMS-P-2023-korean: 2.1.3, 2.2.1 | AWS-Well-Architected-Framework-Security-Pillar: SEC03-BP03, SEC10-BP01 | AWS-Account-Security-Onboarding: Billing, emergency, security contacts | CIS-3.0: 1.2 | ENS-RD2022: op.ext.7.aws.am.1;;;;;<prowler_version>;https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html | https://aws.amazon.com/iam/features/analyze-access/
1 AUTH_METHOD TIMESTAMP ACCOUNT_UID ACCOUNT_NAME ACCOUNT_EMAIL ACCOUNT_ORGANIZATION_UID ACCOUNT_ORGANIZATION_NAME ACCOUNT_TAGS FINDING_UID PROVIDER CHECK_ID CHECK_TITLE CHECK_TYPE STATUS STATUS_EXTENDED MUTED SERVICE_NAME SUBSERVICE_NAME SEVERITY RESOURCE_TYPE RESOURCE_UID RESOURCE_NAME RESOURCE_DETAILS RESOURCE_TAGS PARTITION REGION DESCRIPTION RISK RELATED_URL REMEDIATION_RECOMMENDATION_TEXT REMEDIATION_RECOMMENDATION_URL REMEDIATION_CODE_NATIVEIAC REMEDIATION_CODE_TERRAFORM REMEDIATION_CODE_CLI REMEDIATION_CODE_OTHER COMPLIANCE CATEGORIES DEPENDS_ON RELATED_TO NOTES PROWLER_VERSION ADDITIONAL_URLS
2 <auth_method> 2025-02-14 14:27:03.913874 <account_uid> <finding_uid> aws accessanalyzer_enabled Check if IAM Access Analyzer is enabled IAM FAIL IAM Access Analyzer in account <account_uid> is not enabled. False accessanalyzer low Other <resource_uid> <resource_name> aws <region> Check if IAM Access Analyzer is enabled AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy. https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost). https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html aws accessanalyzer create-analyzer --analyzer-name <NAME> --type <ACCOUNT|ORGANIZATION> CIS-1.4: 1.20 | CIS-1.5: 1.20 | KISA-ISMS-P-2023: 2.5.6, 2.6.4, 2.8.1, 2.8.2 | CIS-2.0: 1.20 | KISA-ISMS-P-2023-korean: 2.5.6, 2.6.4, 2.8.1, 2.8.2 | AWS-Account-Security-Onboarding: Enabled security services, Create analyzers in each active regions, Verify that events are present in SecurityHub aggregated view | CIS-3.0: 1.20 <prowler_version> https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html | https://aws.amazon.com/iam/features/analyze-access/
3 <auth_method> 2025-02-14 14:27:03.913874 <account_uid> <finding_uid> aws account_maintain_current_contact_details Maintain current contact details. IAM MANUAL Login to the AWS Console. Choose your account name on the top right of the window -> My Account -> Contact Information. False account medium Other <resource_uid> <account_uid> aws <region> Maintain current contact details. Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy. If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question. Using the Billing and Cost Management console complete contact details. https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html No command available. https://docs.prowler.com/checks/aws/iam-policies/iam_18-maintain-contact-details#aws-console CIS-1.4: 1.1 | CIS-1.5: 1.1 | KISA-ISMS-P-2023: 2.1.3 | CIS-2.0: 1.1 | KISA-ISMS-P-2023-korean: 2.1.3 | AWS-Well-Architected-Framework-Security-Pillar: SEC03-BP03, SEC10-BP01 | AWS-Account-Security-Onboarding: Billing, emergency, security contacts | CIS-3.0: 1.1 | ENS-RD2022: op.ext.7.aws.am.1 <prowler_version> https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html | https://aws.amazon.com/iam/features/analyze-access/
4 <auth_method> 2025-02-14 14:27:03.913874 <account_uid> <finding_uid> aws account_maintain_different_contact_details_to_security_billing_and_operations Maintain different contact details to security, billing and operations. IAM FAIL SECURITY, BILLING and OPERATIONS contacts not found or they are not different between each other and between ROOT contact. False account medium Other <resource_uid> <account_uid> aws <region> Maintain different contact details to security, billing and operations. Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy. If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question. https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html Using the Billing and Cost Management console complete contact details. https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html https://docs.prowler.com/checks/aws/iam-policies/iam_18-maintain-contact-details#aws-console KISA-ISMS-P-2023: 2.1.3 | KISA-ISMS-P-2023-korean: 2.1.3 <prowler_version> https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html | https://aws.amazon.com/iam/features/analyze-access/
5 <auth_method> 2025-02-14 14:27:03.913874 <account_uid> <finding_uid> aws account_security_contact_information_is_registered Ensure security contact information is registered. IAM MANUAL Login to the AWS Console. Choose your account name on the top right of the window -> My Account -> Alternate Contacts -> Security Section. False account medium Other <resource_uid>:root <account_uid> aws <region> Ensure security contact information is registered. AWS provides customers with the option of specifying the contact information for accounts security team. It is recommended that this information be provided. Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them. Go to the My Account section and complete alternate contacts. https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html No command available. https://docs.prowler.com/checks/aws/iam-policies/iam_19#aws-console CIS-1.4: 1.2 | CIS-1.5: 1.2 | AWS-Foundational-Security-Best-Practices: account, acm | KISA-ISMS-P-2023: 2.1.3, 2.2.1 | CIS-2.0: 1.2 | KISA-ISMS-P-2023-korean: 2.1.3, 2.2.1 | AWS-Well-Architected-Framework-Security-Pillar: SEC03-BP03, SEC10-BP01 | AWS-Account-Security-Onboarding: Billing, emergency, security contacts | CIS-3.0: 1.2 | ENS-RD2022: op.ext.7.aws.am.1 <prowler_version> https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html | https://aws.amazon.com/iam/features/analyze-access/

View File

@@ -27,6 +27,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"CIS-1.4": [
@@ -158,6 +159,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"CIS-1.4": [
@@ -286,6 +288,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"KISA-ISMS-P-2023": [
@@ -391,6 +394,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"CIS-1.4": [
@@ -525,6 +529,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"CIS-1.4": [

View File

@@ -1,5 +1,5 @@
AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION
<auth_method>;2025-02-14 14:27:30.710664;<account_uid>;<account_name>;;<account_organization_uid>;ProwlerPro.onmicrosoft.com;;<finding_uid>;azure;aks_cluster_rbac_enabled;Ensure AKS RBAC is enabled;;PASS;RBAC is enabled for cluster '<resource_name>' in subscription '<account_name>'.;False;aks;;medium;Microsoft.ContainerService/ManagedClusters;/subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name>;<resource_name>;;;<partition>;<region>;Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.;Kubernetes RBAC and AKS help you secure your cluster access and provide only the minimum required permissions to developers and operators.;https://learn.microsoft.com/en-us/azure/aks/azure-ad-rbac?tabs=portal;;https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v2-privileged-access#pa-7-follow-just-enough-administration-least-privilege-principle;;https://docs.prowler.com/checks/azure/azure-kubernetes-policies/bc_azr_kubernetes_2#terraform;;https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/AKS/enable-role-based-access-control-for-kubernetes-service.html#;ENS-RD2022: op.acc.2.az.r1.eid.1;;;;;<prowler_version>
<auth_method>;2025-02-14 14:27:30.710664;<account_uid>;<account_name>;;<account_organization_uid>;ProwlerPro.onmicrosoft.com;;<finding_uid>;azure;aks_clusters_created_with_private_nodes;Ensure clusters are created with Private Nodes;;PASS;Cluster '<resource_name>' was created with private nodes in subscription '<account_name>';False;aks;;high;Microsoft.ContainerService/ManagedClusters;/subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name>;<resource_name>;;;<partition>;<region>;Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.;Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.;https://learn.microsoft.com/en-us/azure/aks/private-clusters;;https://learn.microsoft.com/en-us/azure/aks/access-private-cluster;;;;;ENS-RD2022: mp.com.4.r2.az.aks.1 | MITRE-ATTACK: T1190, T1530;;;;;<prowler_version>
<auth_method>;2025-02-14 14:27:30.710664;<account_uid>;<account_name>;;<account_organization_uid>;ProwlerPro.onmicrosoft.com;;<finding_uid>;azure;aks_clusters_public_access_disabled;Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled;;FAIL;Public access to nodes is enabled for cluster '<resource_name>' in subscription '<account_name>';False;aks;;high;Microsoft.ContainerService/ManagedClusters;/subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name>;<resource_name>;;;<partition>;<region>;Disable access to the Kubernetes API from outside the node network if it is not required.;In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network. Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.;https://learn.microsoft.com/en-us/azure/aks/private-clusters?tabs=azure-portal;To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone;https://learn.microsoft.com/en-us/azure/aks/access-private-cluster?tabs=azure-cli;;;az aks update -n <cluster_name> -g <resource_group> --disable-public-fqdn;;ENS-RD2022: mp.com.4.az.aks.2 | MITRE-ATTACK: T1190, T1530;;;;;<prowler_version>
<auth_method>;2025-02-14 14:27:30.710664;<account_uid>;<account_name>;;<account_organization_uid>;ProwlerPro.onmicrosoft.com;;<finding_uid>;azure;aks_network_policy_enabled;Ensure Network Policy is Enabled and set as appropriate;;PASS;Network policy is enabled for cluster '<resource_name>' in subscription '<account_name>'.;False;aks;;medium;Microsoft.ContainerService/managedClusters;/subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name>;<resource_name>;;;<partition>;<region>;When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.;All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them. Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors. These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.;https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v2-network-security#ns-2-connect-private-networks-together;;https://learn.microsoft.com/en-us/azure/aks/use-network-policies;;https://docs.prowler.com/checks/azure/azure-kubernetes-policies/bc_azr_kubernetes_4#terraform;;;ENS-RD2022: mp.com.4.r2.az.aks.1;;;;Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy. Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion. If Network Policy is used, a cluster must have at least 2 nodes of type n1-standard-1 or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 n1-standard-1 instances. Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.;<prowler_version>
AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION;ADDITIONAL_URLS
<auth_method>;2025-02-14 14:27:30.710664;<account_uid>;<account_name>;;<account_organization_uid>;ProwlerPro.onmicrosoft.com;;<finding_uid>;azure;aks_cluster_rbac_enabled;Ensure AKS RBAC is enabled;;PASS;RBAC is enabled for cluster '<resource_name>' in subscription '<account_name>'.;False;aks;;medium;Microsoft.ContainerService/ManagedClusters;/subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name>;<resource_name>;;;<partition>;<region>;Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.;Kubernetes RBAC and AKS help you secure your cluster access and provide only the minimum required permissions to developers and operators.;https://learn.microsoft.com/en-us/azure/aks/azure-ad-rbac?tabs=portal;;https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v2-privileged-access#pa-7-follow-just-enough-administration-least-privilege-principle;;https://docs.prowler.com/checks/azure/azure-kubernetes-policies/bc_azr_kubernetes_2#terraform;;https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/AKS/enable-role-based-access-control-for-kubernetes-service.html#;ENS-RD2022: op.acc.2.az.r1.eid.1;;;;;<prowler_version>;https://learn.microsoft.com/azure/aks/azure-ad-rbac | https://learn.microsoft.com/azure/aks/concepts-identity
<auth_method>;2025-02-14 14:27:30.710664;<account_uid>;<account_name>;;<account_organization_uid>;ProwlerPro.onmicrosoft.com;;<finding_uid>;azure;aks_clusters_created_with_private_nodes;Ensure clusters are created with Private Nodes;;PASS;Cluster '<resource_name>' was created with private nodes in subscription '<account_name>';False;aks;;high;Microsoft.ContainerService/ManagedClusters;/subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name>;<resource_name>;;;<partition>;<region>;Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.;Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts.;https://learn.microsoft.com/en-us/azure/aks/private-clusters;;https://learn.microsoft.com/en-us/azure/aks/access-private-cluster;;;;;ENS-RD2022: mp.com.4.r2.az.aks.1 | MITRE-ATTACK: T1190, T1530;;;;;<prowler_version>;https://learn.microsoft.com/azure/aks/azure-ad-rbac | https://learn.microsoft.com/azure/aks/concepts-identity
<auth_method>;2025-02-14 14:27:30.710664;<account_uid>;<account_name>;;<account_organization_uid>;ProwlerPro.onmicrosoft.com;;<finding_uid>;azure;aks_clusters_public_access_disabled;Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled;;FAIL;Public access to nodes is enabled for cluster '<resource_name>' in subscription '<account_name>';False;aks;;high;Microsoft.ContainerService/ManagedClusters;/subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name>;<resource_name>;;;<partition>;<region>;Disable access to the Kubernetes API from outside the node network if it is not required.;In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network. Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API.;https://learn.microsoft.com/en-us/azure/aks/private-clusters?tabs=azure-portal;To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone;https://learn.microsoft.com/en-us/azure/aks/access-private-cluster?tabs=azure-cli;;;az aks update -n <cluster_name> -g <resource_group> --disable-public-fqdn;;ENS-RD2022: mp.com.4.az.aks.2 | MITRE-ATTACK: T1190, T1530;;;;;<prowler_version>;https://learn.microsoft.com/azure/aks/azure-ad-rbac | https://learn.microsoft.com/azure/aks/concepts-identity
<auth_method>;2025-02-14 14:27:30.710664;<account_uid>;<account_name>;;<account_organization_uid>;ProwlerPro.onmicrosoft.com;;<finding_uid>;azure;aks_network_policy_enabled;Ensure Network Policy is Enabled and set as appropriate;;PASS;Network policy is enabled for cluster '<resource_name>' in subscription '<account_name>'.;False;aks;;medium;Microsoft.ContainerService/managedClusters;/subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name>;<resource_name>;;;<partition>;<region>;When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.;All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them. Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors. These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service.;https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v2-network-security#ns-2-connect-private-networks-together;;https://learn.microsoft.com/en-us/azure/aks/use-network-policies;;https://docs.prowler.com/checks/azure/azure-kubernetes-policies/bc_azr_kubernetes_4#terraform;;;ENS-RD2022: mp.com.4.r2.az.aks.1;;;;Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy. Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion. If Network Policy is used, a cluster must have at least 2 nodes of type n1-standard-1 or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 n1-standard-1 instances. Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU.;<prowler_version>;https://learn.microsoft.com/azure/aks/azure-ad-rbac | https://learn.microsoft.com/azure/aks/concepts-identity
1 AUTH_METHOD TIMESTAMP ACCOUNT_UID ACCOUNT_NAME ACCOUNT_EMAIL ACCOUNT_ORGANIZATION_UID ACCOUNT_ORGANIZATION_NAME ACCOUNT_TAGS FINDING_UID PROVIDER CHECK_ID CHECK_TITLE CHECK_TYPE STATUS STATUS_EXTENDED MUTED SERVICE_NAME SUBSERVICE_NAME SEVERITY RESOURCE_TYPE RESOURCE_UID RESOURCE_NAME RESOURCE_DETAILS RESOURCE_TAGS PARTITION REGION DESCRIPTION RISK RELATED_URL REMEDIATION_RECOMMENDATION_TEXT REMEDIATION_RECOMMENDATION_URL REMEDIATION_CODE_NATIVEIAC REMEDIATION_CODE_TERRAFORM REMEDIATION_CODE_CLI REMEDIATION_CODE_OTHER COMPLIANCE CATEGORIES DEPENDS_ON RELATED_TO NOTES PROWLER_VERSION ADDITIONAL_URLS
2 <auth_method> 2025-02-14 14:27:30.710664 <account_uid> <account_name> <account_organization_uid> ProwlerPro.onmicrosoft.com <finding_uid> azure aks_cluster_rbac_enabled Ensure AKS RBAC is enabled PASS RBAC is enabled for cluster '<resource_name>' in subscription '<account_name>'. False aks medium Microsoft.ContainerService/ManagedClusters /subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name> <resource_name> <partition> <region> Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership. Kubernetes RBAC and AKS help you secure your cluster access and provide only the minimum required permissions to developers and operators. https://learn.microsoft.com/en-us/azure/aks/azure-ad-rbac?tabs=portal https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v2-privileged-access#pa-7-follow-just-enough-administration-least-privilege-principle https://docs.prowler.com/checks/azure/azure-kubernetes-policies/bc_azr_kubernetes_2#terraform https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/AKS/enable-role-based-access-control-for-kubernetes-service.html# ENS-RD2022: op.acc.2.az.r1.eid.1 <prowler_version> https://learn.microsoft.com/azure/aks/azure-ad-rbac | https://learn.microsoft.com/azure/aks/concepts-identity
3 <auth_method> 2025-02-14 14:27:30.710664 <account_uid> <account_name> <account_organization_uid> ProwlerPro.onmicrosoft.com <finding_uid> azure aks_clusters_created_with_private_nodes Ensure clusters are created with Private Nodes PASS Cluster '<resource_name>' was created with private nodes in subscription '<account_name>' False aks high Microsoft.ContainerService/ManagedClusters /subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name> <resource_name> <partition> <region> Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses. Disabling public IP addresses on cluster nodes restricts access to only internal networks, forcing attackers to obtain local network access before attempting to compromise the underlying Kubernetes hosts. https://learn.microsoft.com/en-us/azure/aks/private-clusters https://learn.microsoft.com/en-us/azure/aks/access-private-cluster ENS-RD2022: mp.com.4.r2.az.aks.1 | MITRE-ATTACK: T1190, T1530 <prowler_version> https://learn.microsoft.com/azure/aks/azure-ad-rbac | https://learn.microsoft.com/azure/aks/concepts-identity
4 <auth_method> 2025-02-14 14:27:30.710664 <account_uid> <account_name> <account_organization_uid> ProwlerPro.onmicrosoft.com <finding_uid> azure aks_clusters_public_access_disabled Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled FAIL Public access to nodes is enabled for cluster '<resource_name>' in subscription '<account_name>' False aks high Microsoft.ContainerService/ManagedClusters /subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name> <resource_name> <partition> <region> Disable access to the Kubernetes API from outside the node network if it is not required. In a private cluster, the master node has two endpoints, a private and public endpoint. The private endpoint is the internal IP address of the master, behind an internal load balancer in the master's wirtual network. Nodes communicate with the master using the private endpoint. The public endpoint enables the Kubernetes API to be accessed from outside the master's virtual network. Although Kubernetes API requires an authorized token to perform sensitive actions, a vulnerability could potentially expose the Kubernetes publically with unrestricted access. Additionally, an attacker may be able to identify the current cluster and Kubernetes API version and determine whether it is vulnerable to an attack. Unless required, disabling public endpoint will help prevent such threats, and require the attacker to be on the master's virtual network to perform any attack on the Kubernetes API. https://learn.microsoft.com/en-us/azure/aks/private-clusters?tabs=azure-portal To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone https://learn.microsoft.com/en-us/azure/aks/access-private-cluster?tabs=azure-cli az aks update -n <cluster_name> -g <resource_group> --disable-public-fqdn ENS-RD2022: mp.com.4.az.aks.2 | MITRE-ATTACK: T1190, T1530 <prowler_version> https://learn.microsoft.com/azure/aks/azure-ad-rbac | https://learn.microsoft.com/azure/aks/concepts-identity
5 <auth_method> 2025-02-14 14:27:30.710664 <account_uid> <account_name> <account_organization_uid> ProwlerPro.onmicrosoft.com <finding_uid> azure aks_network_policy_enabled Ensure Network Policy is Enabled and set as appropriate PASS Network policy is enabled for cluster '<resource_name>' in subscription '<account_name>'. False aks medium Microsoft.ContainerService/managedClusters /subscriptions/<account_uid>/resourcegroups/<resource_name>_group/providers/Microsoft.ContainerService/managedClusters/<resource_name> <resource_name> <partition> <region> When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster. All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. Back-end applications are often only exposed to required front-end services, for example. Or, database components are only accessible to the application tiers that connect to them. Network Policy is a Kubernetes specification that defines access policies for communication between Pods. Using Network Policies, you define an ordered set of rules to send and receive traffic and apply them to a collection of pods that match one or more label selectors. These network policy rules are defined as YAML manifests. Network policies can be included as part of a wider manifest that also creates a deployment or service. https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v2-network-security#ns-2-connect-private-networks-together https://learn.microsoft.com/en-us/azure/aks/use-network-policies https://docs.prowler.com/checks/azure/azure-kubernetes-policies/bc_azr_kubernetes_4#terraform ENS-RD2022: mp.com.4.r2.az.aks.1 Network Policy requires the Network Policy add-on. This add-on is included automatically when a cluster with Network Policy is created, but for an existing cluster, needs to be added prior to enabling Network Policy. Enabling/Disabling Network Policy causes a rolling update of all cluster nodes, similar to performing a cluster upgrade. This operation is long-running and will block other operations on the cluster (including delete) until it has run to completion. If Network Policy is used, a cluster must have at least 2 nodes of type n1-standard-1 or higher. The recommended minimum size cluster to run Network Policy enforcement is 3 n1-standard-1 instances. Enabling Network Policy enforcement consumes additional resources in nodes. Specifically, it increases the memory footprint of the kube-system process by approximately 128MB, and requires approximately 300 millicores of CPU. <prowler_version> https://learn.microsoft.com/azure/aks/azure-ad-rbac | https://learn.microsoft.com/azure/aks/concepts-identity

View File

@@ -27,6 +27,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "Because Application Insights relies on a Log Analytics Workspace, an organization will incur additional expenses when using this service.",
"compliance": {
"CIS-2.1": [
@@ -131,6 +132,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"CIS-2.1": [
@@ -247,6 +249,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"CIS-2.1": [
@@ -360,6 +363,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "When using an Azure container registry, you might occasionally encounter problems. For example, you might not be able to pull a container image because of an issue with Docker in your local environment. Or, a network issue might prevent you from connecting to the registry.",
"compliance": {
"MITRE-ATTACK": [
@@ -460,6 +464,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"CIS-2.1": [

View File

@@ -1,5 +1,5 @@
AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION
<auth_method>;2025-02-14 14:27:20.697446;<account_uid>;<account_name>;;<account_organization_uid>;<account_organization_name>;<account_tags>;<finding_uid>;gcp;apikeys_key_exists;Ensure API Keys Only Exist for Active Services;;PASS;Project <account_uid> does not have active API Keys.;False;apikeys;;medium;API Key;<account_uid>;<account_name>;;;;<region>;API Keys should only be used for services in cases where other authentication methods are unavailable. Unused keys with their permissions in tact may still exist within a project. Keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to use standard authentication flow instead.;Security risks involved in using API-Keys appear below: API keys are simple encrypted strings, API keys do not identify the user or the application making the API request, API keys are typically accessible to clients, making it easy to discover and steal an API key.;;To avoid the security risk in using API keys, it is recommended to use standard authentication flow instead.;https://cloud.google.com/docs/authentication/api-keys;;;gcloud alpha services api-keys delete;;MITRE-ATTACK: T1098 | CIS-2.0: 1.12 | ENS-RD2022: op.acc.2.gcp.rbak.1 | CIS-3.0: 1.12;;;;;<prowler_version>
<auth_method>;2025-02-14 14:27:20.697446;<account_uid>;<account_name>;;<account_organization_uid>;<account_organization_name>;<account_tags>;<finding_uid>;gcp;artifacts_container_analysis_enabled;Ensure Image Vulnerability Analysis using AR Container Analysis or a third-party provider;Security | Configuration;FAIL;AR Container Analysis is not enabled in project <account_uid>.;False;artifacts;Container Analysis;medium;Service;<resource_uid>;<resource_name>;;;;<region>;Scan images stored in Google Container Registry (GCR) for vulnerabilities using AR Container Analysis or a third-party provider. This helps identify and mitigate security risks associated with known vulnerabilities in container images.;Without image vulnerability scanning, container images stored in Artifact Registry may contain known vulnerabilities, increasing the risk of exploitation by malicious actors.;https://cloud.google.com/artifact-analysis/docs;Enable vulnerability scanning for images stored in Artifact Registry using AR Container Analysis or a third-party provider.;https://cloud.google.com/artifact-analysis/docs/container-scanning-overview;;;gcloud services enable containeranalysis.googleapis.com;;MITRE-ATTACK: T1525 | ENS-RD2022: op.exp.4.r4.gcp.log.1, op.mon.3.gcp.scc.1;;;;By default, AR Container Analysis is disabled.;<prowler_version>
<auth_method>;2025-02-14 14:27:20.697446;<account_uid>;<account_name>;;<account_organization_uid>;<account_organization_name>;<account_tags>;<finding_uid>;gcp;compute_firewall_rdp_access_from_the_internet_allowed;Ensure That RDP Access Is Restricted From the Internet;;PASS;Firewall <resource_name> does not expose port 3389 (RDP) to the internet.;False;networking;;critical;FirewallRule;<resource_uid>;<resource_name>;;;;<region>;GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow users to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances. Firewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the Internet to a VPC or VM instance using `RDP` on `Port 3389` can be avoided.;Allowing unrestricted Remote Desktop Protocol (RDP) access can increase opportunities for malicious activities such as hacking, Man-In-The-Middle attacks (MITM) and Pass-The-Hash (PTH) attacks.;;Ensure that Google Cloud Virtual Private Cloud (VPC) firewall rules do not allow unrestricted access (i.e. 0.0.0.0/0) on TCP port 3389 in order to restrict Remote Desktop Protocol (RDP) traffic to trusted IP addresses or IP ranges only and reduce the attack surface. TCP port 3389 is used for secure remote GUI login to Windows VM instances by connecting a RDP client application with an RDP server.;https://cloud.google.com/vpc/docs/using-firewalls;;https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#terraform;https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#cli-command;https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudVPC/unrestricted-rdp-access.html;MITRE-ATTACK: T1190, T1199, T1048, T1498, T1046 | CIS-2.0: 3.7 | ENS-RD2022: mp.com.1.gcp.fw.1 | CIS-3.0: 3.7;internet-exposed;;;;<prowler_version>
<auth_method>;2025-02-14 14:27:20.697446;<account_uid>;<account_name>;;<account_organization_uid>;<account_organization_name>;<account_tags>;<finding_uid>;gcp;compute_firewall_rdp_access_from_the_internet_allowed;Ensure That RDP Access Is Restricted From the Internet;;PASS;Firewall <resource_name> does not expose port 3389 (RDP) to the internet.;False;networking;;critical;FirewallRule;<resource_uid>;<resource_name>;;;;<region>;GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow users to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances. Firewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the Internet to a VPC or VM instance using `RDP` on `Port 3389` can be avoided.;Allowing unrestricted Remote Desktop Protocol (RDP) access can increase opportunities for malicious activities such as hacking, Man-In-The-Middle attacks (MITM) and Pass-The-Hash (PTH) attacks.;;Ensure that Google Cloud Virtual Private Cloud (VPC) firewall rules do not allow unrestricted access (i.e. 0.0.0.0/0) on TCP port 3389 in order to restrict Remote Desktop Protocol (RDP) traffic to trusted IP addresses or IP ranges only and reduce the attack surface. TCP port 3389 is used for secure remote GUI login to Windows VM instances by connecting a RDP client application with an RDP server.;https://cloud.google.com/vpc/docs/using-firewalls;;https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#terraform;https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#cli-command;https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudVPC/unrestricted-rdp-access.html;MITRE-ATTACK: T1190, T1199, T1048, T1498, T1046 | CIS-2.0: 3.7 | ENS-RD2022: mp.com.1.gcp.fw.1 | CIS-3.0: 3.7;internet-exposed;;;;<prowler_version>
AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION;ADDITIONAL_URLS
<auth_method>;2025-02-14 14:27:20.697446;<account_uid>;<account_name>;;<account_organization_uid>;<account_organization_name>;<account_tags>;<finding_uid>;gcp;apikeys_key_exists;Ensure API Keys Only Exist for Active Services;;PASS;Project <account_uid> does not have active API Keys.;False;apikeys;;medium;API Key;<account_uid>;<account_name>;;;;<region>;API Keys should only be used for services in cases where other authentication methods are unavailable. Unused keys with their permissions in tact may still exist within a project. Keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to use standard authentication flow instead.;Security risks involved in using API-Keys appear below: API keys are simple encrypted strings, API keys do not identify the user or the application making the API request, API keys are typically accessible to clients, making it easy to discover and steal an API key.;;To avoid the security risk in using API keys, it is recommended to use standard authentication flow instead.;https://cloud.google.com/docs/authentication/api-keys;;;gcloud alpha services api-keys delete;;MITRE-ATTACK: T1098 | CIS-2.0: 1.12 | ENS-RD2022: op.acc.2.gcp.rbak.1 | CIS-3.0: 1.12;;;;;<prowler_version>;https://cloud.google.com/api-keys/docs/best-practices | https://cloud.google.com/docs/authentication
<auth_method>;2025-02-14 14:27:20.697446;<account_uid>;<account_name>;;<account_organization_uid>;<account_organization_name>;<account_tags>;<finding_uid>;gcp;artifacts_container_analysis_enabled;Ensure Image Vulnerability Analysis using AR Container Analysis or a third-party provider;Security | Configuration;FAIL;AR Container Analysis is not enabled in project <account_uid>.;False;artifacts;Container Analysis;medium;Service;<resource_uid>;<resource_name>;;;;<region>;Scan images stored in Google Container Registry (GCR) for vulnerabilities using AR Container Analysis or a third-party provider. This helps identify and mitigate security risks associated with known vulnerabilities in container images.;Without image vulnerability scanning, container images stored in Artifact Registry may contain known vulnerabilities, increasing the risk of exploitation by malicious actors.;https://cloud.google.com/artifact-analysis/docs;Enable vulnerability scanning for images stored in Artifact Registry using AR Container Analysis or a third-party provider.;https://cloud.google.com/artifact-analysis/docs/container-scanning-overview;;;gcloud services enable containeranalysis.googleapis.com;;MITRE-ATTACK: T1525 | ENS-RD2022: op.exp.4.r4.gcp.log.1, op.mon.3.gcp.scc.1;;;;By default, AR Container Analysis is disabled.;<prowler_version>;https://cloud.google.com/api-keys/docs/best-practices | https://cloud.google.com/docs/authentication
<auth_method>;2025-02-14 14:27:20.697446;<account_uid>;<account_name>;;<account_organization_uid>;<account_organization_name>;<account_tags>;<finding_uid>;gcp;compute_firewall_rdp_access_from_the_internet_allowed;Ensure That RDP Access Is Restricted From the Internet;;PASS;Firewall <resource_name> does not expose port 3389 (RDP) to the internet.;False;networking;;critical;FirewallRule;<resource_uid>;<resource_name>;;;;<region>;GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow users to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances. Firewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the Internet to a VPC or VM instance using `RDP` on `Port 3389` can be avoided.;Allowing unrestricted Remote Desktop Protocol (RDP) access can increase opportunities for malicious activities such as hacking, Man-In-The-Middle attacks (MITM) and Pass-The-Hash (PTH) attacks.;;Ensure that Google Cloud Virtual Private Cloud (VPC) firewall rules do not allow unrestricted access (i.e. 0.0.0.0/0) on TCP port 3389 in order to restrict Remote Desktop Protocol (RDP) traffic to trusted IP addresses or IP ranges only and reduce the attack surface. TCP port 3389 is used for secure remote GUI login to Windows VM instances by connecting a RDP client application with an RDP server.;https://cloud.google.com/vpc/docs/using-firewalls;;https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#terraform;https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#cli-command;https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudVPC/unrestricted-rdp-access.html;MITRE-ATTACK: T1190, T1199, T1048, T1498, T1046 | CIS-2.0: 3.7 | ENS-RD2022: mp.com.1.gcp.fw.1 | CIS-3.0: 3.7;internet-exposed;;;;<prowler_version>;https://cloud.google.com/api-keys/docs | https://cloud.google.com/docs/authentication
<auth_method>;2025-02-14 14:27:20.697446;<account_uid>;<account_name>;;<account_organization_uid>;<account_organization_name>;<account_tags>;<finding_uid>;gcp;compute_firewall_rdp_access_from_the_internet_allowed;Ensure That RDP Access Is Restricted From the Internet;;PASS;Firewall <resource_name> does not expose port 3389 (RDP) to the internet.;False;networking;;critical;FirewallRule;<resource_uid>;<resource_name>;;;;<region>;GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow users to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances. Firewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the Internet to a VPC or VM instance using `RDP` on `Port 3389` can be avoided.;Allowing unrestricted Remote Desktop Protocol (RDP) access can increase opportunities for malicious activities such as hacking, Man-In-The-Middle attacks (MITM) and Pass-The-Hash (PTH) attacks.;;Ensure that Google Cloud Virtual Private Cloud (VPC) firewall rules do not allow unrestricted access (i.e. 0.0.0.0/0) on TCP port 3389 in order to restrict Remote Desktop Protocol (RDP) traffic to trusted IP addresses or IP ranges only and reduce the attack surface. TCP port 3389 is used for secure remote GUI login to Windows VM instances by connecting a RDP client application with an RDP server.;https://cloud.google.com/vpc/docs/using-firewalls;;https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#terraform;https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#cli-command;https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudVPC/unrestricted-rdp-access.html;MITRE-ATTACK: T1190, T1199, T1048, T1498, T1046 | CIS-2.0: 3.7 | ENS-RD2022: mp.com.1.gcp.fw.1 | CIS-3.0: 3.7;internet-exposed;;;;<prowler_version>;https://cloud.google.com/api-keys/docs | https://cloud.google.com/docs/authentication
1 AUTH_METHOD TIMESTAMP ACCOUNT_UID ACCOUNT_NAME ACCOUNT_EMAIL ACCOUNT_ORGANIZATION_UID ACCOUNT_ORGANIZATION_NAME ACCOUNT_TAGS FINDING_UID PROVIDER CHECK_ID CHECK_TITLE CHECK_TYPE STATUS STATUS_EXTENDED MUTED SERVICE_NAME SUBSERVICE_NAME SEVERITY RESOURCE_TYPE RESOURCE_UID RESOURCE_NAME RESOURCE_DETAILS RESOURCE_TAGS PARTITION REGION DESCRIPTION RISK RELATED_URL REMEDIATION_RECOMMENDATION_TEXT REMEDIATION_RECOMMENDATION_URL REMEDIATION_CODE_NATIVEIAC REMEDIATION_CODE_TERRAFORM REMEDIATION_CODE_CLI REMEDIATION_CODE_OTHER COMPLIANCE CATEGORIES DEPENDS_ON RELATED_TO NOTES PROWLER_VERSION ADDITIONAL_URLS
2 <auth_method> 2025-02-14 14:27:20.697446 <account_uid> <account_name> <account_organization_uid> <account_organization_name> <account_tags> <finding_uid> gcp apikeys_key_exists Ensure API Keys Only Exist for Active Services PASS Project <account_uid> does not have active API Keys. False apikeys medium API Key <account_uid> <account_name> <region> API Keys should only be used for services in cases where other authentication methods are unavailable. Unused keys with their permissions in tact may still exist within a project. Keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to use standard authentication flow instead. Security risks involved in using API-Keys appear below: API keys are simple encrypted strings, API keys do not identify the user or the application making the API request, API keys are typically accessible to clients, making it easy to discover and steal an API key. To avoid the security risk in using API keys, it is recommended to use standard authentication flow instead. https://cloud.google.com/docs/authentication/api-keys gcloud alpha services api-keys delete MITRE-ATTACK: T1098 | CIS-2.0: 1.12 | ENS-RD2022: op.acc.2.gcp.rbak.1 | CIS-3.0: 1.12 <prowler_version> https://cloud.google.com/api-keys/docs/best-practices | https://cloud.google.com/docs/authentication
3 <auth_method> 2025-02-14 14:27:20.697446 <account_uid> <account_name> <account_organization_uid> <account_organization_name> <account_tags> <finding_uid> gcp artifacts_container_analysis_enabled Ensure Image Vulnerability Analysis using AR Container Analysis or a third-party provider Security | Configuration FAIL AR Container Analysis is not enabled in project <account_uid>. False artifacts Container Analysis medium Service <resource_uid> <resource_name> <region> Scan images stored in Google Container Registry (GCR) for vulnerabilities using AR Container Analysis or a third-party provider. This helps identify and mitigate security risks associated with known vulnerabilities in container images. Without image vulnerability scanning, container images stored in Artifact Registry may contain known vulnerabilities, increasing the risk of exploitation by malicious actors. https://cloud.google.com/artifact-analysis/docs Enable vulnerability scanning for images stored in Artifact Registry using AR Container Analysis or a third-party provider. https://cloud.google.com/artifact-analysis/docs/container-scanning-overview gcloud services enable containeranalysis.googleapis.com MITRE-ATTACK: T1525 | ENS-RD2022: op.exp.4.r4.gcp.log.1, op.mon.3.gcp.scc.1 By default, AR Container Analysis is disabled. <prowler_version> https://cloud.google.com/api-keys/docs/best-practices | https://cloud.google.com/docs/authentication
4 <auth_method> 2025-02-14 14:27:20.697446 <account_uid> <account_name> <account_organization_uid> <account_organization_name> <account_tags> <finding_uid> gcp compute_firewall_rdp_access_from_the_internet_allowed Ensure That RDP Access Is Restricted From the Internet PASS Firewall <resource_name> does not expose port 3389 (RDP) to the internet. False networking critical FirewallRule <resource_uid> <resource_name> <region> GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow users to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances. Firewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the Internet to a VPC or VM instance using `RDP` on `Port 3389` can be avoided. Allowing unrestricted Remote Desktop Protocol (RDP) access can increase opportunities for malicious activities such as hacking, Man-In-The-Middle attacks (MITM) and Pass-The-Hash (PTH) attacks. Ensure that Google Cloud Virtual Private Cloud (VPC) firewall rules do not allow unrestricted access (i.e. 0.0.0.0/0) on TCP port 3389 in order to restrict Remote Desktop Protocol (RDP) traffic to trusted IP addresses or IP ranges only and reduce the attack surface. TCP port 3389 is used for secure remote GUI login to Windows VM instances by connecting a RDP client application with an RDP server. https://cloud.google.com/vpc/docs/using-firewalls https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#terraform https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#cli-command https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudVPC/unrestricted-rdp-access.html MITRE-ATTACK: T1190, T1199, T1048, T1498, T1046 | CIS-2.0: 3.7 | ENS-RD2022: mp.com.1.gcp.fw.1 | CIS-3.0: 3.7 internet-exposed <prowler_version> https://cloud.google.com/api-keys/docs | https://cloud.google.com/docs/authentication
5 <auth_method> 2025-02-14 14:27:20.697446 <account_uid> <account_name> <account_organization_uid> <account_organization_name> <account_tags> <finding_uid> gcp compute_firewall_rdp_access_from_the_internet_allowed Ensure That RDP Access Is Restricted From the Internet PASS Firewall <resource_name> does not expose port 3389 (RDP) to the internet. False networking critical FirewallRule <resource_uid> <resource_name> <region> GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow users to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances. Firewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the Internet to a VPC or VM instance using `RDP` on `Port 3389` can be avoided. Allowing unrestricted Remote Desktop Protocol (RDP) access can increase opportunities for malicious activities such as hacking, Man-In-The-Middle attacks (MITM) and Pass-The-Hash (PTH) attacks. Ensure that Google Cloud Virtual Private Cloud (VPC) firewall rules do not allow unrestricted access (i.e. 0.0.0.0/0) on TCP port 3389 in order to restrict Remote Desktop Protocol (RDP) traffic to trusted IP addresses or IP ranges only and reduce the attack surface. TCP port 3389 is used for secure remote GUI login to Windows VM instances by connecting a RDP client application with an RDP server. https://cloud.google.com/vpc/docs/using-firewalls https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#terraform https://docs.<account_organization_name>/checks/gcp/google-cloud-networking-policies/bc_gcp_networking_2#cli-command https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudVPC/unrestricted-rdp-access.html MITRE-ATTACK: T1190, T1199, T1048, T1498, T1046 | CIS-2.0: 3.7 | ENS-RD2022: mp.com.1.gcp.fw.1 | CIS-3.0: 3.7 internet-exposed <prowler_version> https://cloud.google.com/api-keys/docs | https://cloud.google.com/docs/authentication

View File

@@ -27,6 +27,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"MITRE-ATTACK": [
@@ -147,6 +148,7 @@
"categories": [],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "By default, AR Container Analysis is disabled.",
"compliance": {
"MITRE-ATTACK": [
@@ -267,6 +269,7 @@
],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"MITRE-ATTACK": [
@@ -394,6 +397,7 @@
],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"MITRE-ATTACK": [
@@ -533,6 +537,7 @@
],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "",
"compliance": {
"MITRE-ATTACK": [

View File

@@ -1,5 +1,5 @@
AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION
<auth_method>;2025-02-14 14:27:38.533897;<account_uid>;context: <context>;;;;;<finding_uid>;kubernetes;apiserver_always_pull_images_plugin;Ensure that the admission control plugin AlwaysPullImages is set;;FAIL;AlwaysPullImages admission control plugin is not set in pod <resource_uid>;False;apiserver;;medium;KubernetesAPIServer;<resource_id>;<resource_name>;;;;namespace: kube-system;This check verifies that the AlwaysPullImages admission control plugin is enabled in the Kubernetes API server. This plugin ensures that every new pod always pulls the required images, enforcing image access control and preventing the use of possibly outdated or altered images.;Without AlwaysPullImages, once an image is pulled to a node, any pod can use it without any authorization check, potentially leading to security risks.;https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages;Configure the API server to use the AlwaysPullImages admission control plugin to ensure image security and integrity.;https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers;https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-admission-control-plugin-alwayspullimages-is-set#kubernetes;;--enable-admission-plugins=...,AlwaysPullImages,...;;CIS-1.10: 1.2.11 | CIS-1.8: 1.2.11;cluster-security;;;Enabling AlwaysPullImages can increase network and registry load and decrease container startup speed. It may not be suitable for all environments.;<prowler_version>
<auth_method>;2025-02-14 14:27:38.533897;<account_uid>;context: <context>;;;;;<finding_uid>;kubernetes;apiserver_anonymous_requests;Ensure that the --anonymous-auth argument is set to false;;PASS;API Server does not have anonymous-auth enabled in pod <resource_uid>;False;apiserver;;high;KubernetesAPIServer;<resource_id>;<resource_name>;;;;namespace: kube-system;Disable anonymous requests to the API server. When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests, which are then served by the API server. Disallowing anonymous requests strengthens security by ensuring all access is authenticated.;Enabling anonymous access to the API server can expose the cluster to unauthorized access and potential security vulnerabilities.;https://kubernetes.io/docs/admin/authentication/#anonymous-requests;Ensure the --anonymous-auth argument in the API server is set to false. This will reject all anonymous requests, enforcing authenticated access to the server.;https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/;https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-anonymous-auth-argument-is-set-to-false-1#kubernetes;;--anonymous-auth=false;;CIS-1.10: 1.2.1 | CIS-1.8: 1.2.1;trustboundaries;;;While anonymous access can be useful for health checks and discovery, consider the security implications for your specific environment.;<prowler_version>
<auth_method>;2025-02-14 14:27:38.533897;<account_uid>;context: <context>;;;;;<finding_uid>;kubernetes;apiserver_audit_log_maxage_set;Ensure that the --audit-log-maxage argument is set to 30 or as appropriate;;FAIL;Audit log max age is not set to 30 or as appropriate in pod <resource_uid>;False;apiserver;;medium;KubernetesAPIServer;<resource_id>;<resource_name>;;;;namespace: kube-system;This check ensures that the Kubernetes API server is configured with an appropriate audit log retention period. Setting --audit-log-maxage to 30 or as per business requirements helps in maintaining logs for sufficient time to investigate past events.;Without an adequate log retention period, there may be insufficient audit history to investigate and analyze past events or security incidents.;https://kubernetes.io/docs/concepts/cluster-administration/audit/;Configure the API server audit log retention period to retain logs for at least 30 days or as per your organization's requirements.;https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/;https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-audit-log-maxage-argument-is-set-to-30-or-as-appropriate#kubernetes;;--audit-log-maxage=30;;CIS-1.10: 1.2.17 | CIS-1.8: 1.2.18;logging;;;Ensure the audit log retention period is set appropriately to balance between storage constraints and the need for historical data.;<prowler_version>
<auth_method>;2025-02-14 14:27:38.533897;<account_uid>;context: <context>;;;;;<finding_uid>;kubernetes;apiserver_audit_log_maxbackup_set;Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate;;FAIL;Audit log max backup is not set to 10 or as appropriate in pod <resource_uid>;False;apiserver;;medium;KubernetesAPIServer;<resource_id>;<resource_name>;;;;namespace: kube-system;This check ensures that the Kubernetes API server is configured with an appropriate number of audit log backups. Setting --audit-log-maxbackup to 10 or as per business requirements helps maintain a sufficient log backup for investigations or analysis.;Without an adequate number of audit log backups, there may be insufficient log history to investigate past events or security incidents.;https://kubernetes.io/docs/concepts/cluster-administration/audit/;Configure the API server audit log backup retention to 10 or as per your organization's requirements.;https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/;https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate#kubernetes;;--audit-log-maxbackup=10;;CIS-1.10: 1.2.18 | CIS-1.8: 1.2.19;logging;;;Ensure the audit log backup retention period is set appropriately to balance between storage constraints and the need for historical data.;<prowler_version>
AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION;ADDITIONAL_URLS
<auth_method>;2025-02-14 14:27:38.533897;<account_uid>;context: <context>;;;;;<finding_uid>;kubernetes;apiserver_always_pull_images_plugin;Ensure that the admission control plugin AlwaysPullImages is set;;FAIL;AlwaysPullImages admission control plugin is not set in pod <resource_uid>;False;apiserver;;medium;KubernetesAPIServer;<resource_id>;<resource_name>;;;;namespace: kube-system;This check verifies that the AlwaysPullImages admission control plugin is enabled in the Kubernetes API server. This plugin ensures that every new pod always pulls the required images, enforcing image access control and preventing the use of possibly outdated or altered images.;Without AlwaysPullImages, once an image is pulled to a node, any pod can use it without any authorization check, potentially leading to security risks.;https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages;Configure the API server to use the AlwaysPullImages admission control plugin to ensure image security and integrity.;https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers;https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-admission-control-plugin-alwayspullimages-is-set#kubernetes;;--enable-admission-plugins=...,AlwaysPullImages,...;;CIS-1.10: 1.2.11 | CIS-1.8: 1.2.11;cluster-security;;;Enabling AlwaysPullImages can increase network and registry load and decrease container startup speed. It may not be suitable for all environments.;<prowler_version>;https://kubernetes.io/docs/concepts/containers/images/ | https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/
<auth_method>;2025-02-14 14:27:38.533897;<account_uid>;context: <context>;;;;;<finding_uid>;kubernetes;apiserver_anonymous_requests;Ensure that the --anonymous-auth argument is set to false;;PASS;API Server does not have anonymous-auth enabled in pod <resource_uid>;False;apiserver;;high;KubernetesAPIServer;<resource_id>;<resource_name>;;;;namespace: kube-system;Disable anonymous requests to the API server. When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests, which are then served by the API server. Disallowing anonymous requests strengthens security by ensuring all access is authenticated.;Enabling anonymous access to the API server can expose the cluster to unauthorized access and potential security vulnerabilities.;https://kubernetes.io/docs/admin/authentication/#anonymous-requests;Ensure the --anonymous-auth argument in the API server is set to false. This will reject all anonymous requests, enforcing authenticated access to the server.;https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/;https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-anonymous-auth-argument-is-set-to-false-1#kubernetes;;--anonymous-auth=false;;CIS-1.10: 1.2.1 | CIS-1.8: 1.2.1;trustboundaries;;;While anonymous access can be useful for health checks and discovery, consider the security implications for your specific environment.;<prowler_version>;https://kubernetes.io/docs/concepts/containers/images/ | https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/
<auth_method>;2025-02-14 14:27:38.533897;<account_uid>;context: <context>;;;;;<finding_uid>;kubernetes;apiserver_audit_log_maxage_set;Ensure that the --audit-log-maxage argument is set to 30 or as appropriate;;FAIL;Audit log max age is not set to 30 or as appropriate in pod <resource_uid>;False;apiserver;;medium;KubernetesAPIServer;<resource_id>;<resource_name>;;;;namespace: kube-system;This check ensures that the Kubernetes API server is configured with an appropriate audit log retention period. Setting --audit-log-maxage to 30 or as per business requirements helps in maintaining logs for sufficient time to investigate past events.;Without an adequate log retention period, there may be insufficient audit history to investigate and analyze past events or security incidents.;https://kubernetes.io/docs/concepts/cluster-administration/audit/;Configure the API server audit log retention period to retain logs for at least 30 days or as per your organization's requirements.;https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/;https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-audit-log-maxage-argument-is-set-to-30-or-as-appropriate#kubernetes;;--audit-log-maxage=30;;CIS-1.10: 1.2.17 | CIS-1.8: 1.2.18;logging;;;Ensure the audit log retention period is set appropriately to balance between storage constraints and the need for historical data.;<prowler_version>;https://kubernetes.io/docs/concepts/containers/images/ | https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/
<auth_method>;2025-02-14 14:27:38.533897;<account_uid>;context: <context>;;;;;<finding_uid>;kubernetes;apiserver_audit_log_maxbackup_set;Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate;;FAIL;Audit log max backup is not set to 10 or as appropriate in pod <resource_uid>;False;apiserver;;medium;KubernetesAPIServer;<resource_id>;<resource_name>;;;;namespace: kube-system;This check ensures that the Kubernetes API server is configured with an appropriate number of audit log backups. Setting --audit-log-maxbackup to 10 or as per business requirements helps maintain a sufficient log backup for investigations or analysis.;Without an adequate number of audit log backups, there may be insufficient log history to investigate past events or security incidents.;https://kubernetes.io/docs/concepts/cluster-administration/audit/;Configure the API server audit log backup retention to 10 or as per your organization's requirements.;https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/;https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate#kubernetes;;--audit-log-maxbackup=10;;CIS-1.10: 1.2.18 | CIS-1.8: 1.2.19;logging;;;Ensure the audit log backup retention period is set appropriately to balance between storage constraints and the need for historical data.;<prowler_version>;https://kubernetes.io/docs/concepts/containers/images/ | https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/
1 AUTH_METHOD TIMESTAMP ACCOUNT_UID ACCOUNT_NAME ACCOUNT_EMAIL ACCOUNT_ORGANIZATION_UID ACCOUNT_ORGANIZATION_NAME ACCOUNT_TAGS FINDING_UID PROVIDER CHECK_ID CHECK_TITLE CHECK_TYPE STATUS STATUS_EXTENDED MUTED SERVICE_NAME SUBSERVICE_NAME SEVERITY RESOURCE_TYPE RESOURCE_UID RESOURCE_NAME RESOURCE_DETAILS RESOURCE_TAGS PARTITION REGION DESCRIPTION RISK RELATED_URL REMEDIATION_RECOMMENDATION_TEXT REMEDIATION_RECOMMENDATION_URL REMEDIATION_CODE_NATIVEIAC REMEDIATION_CODE_TERRAFORM REMEDIATION_CODE_CLI REMEDIATION_CODE_OTHER COMPLIANCE CATEGORIES DEPENDS_ON RELATED_TO NOTES PROWLER_VERSION ADDITIONAL_URLS
2 <auth_method> 2025-02-14 14:27:38.533897 <account_uid> context: <context> <finding_uid> kubernetes apiserver_always_pull_images_plugin Ensure that the admission control plugin AlwaysPullImages is set FAIL AlwaysPullImages admission control plugin is not set in pod <resource_uid> False apiserver medium KubernetesAPIServer <resource_id> <resource_name> namespace: kube-system This check verifies that the AlwaysPullImages admission control plugin is enabled in the Kubernetes API server. This plugin ensures that every new pod always pulls the required images, enforcing image access control and preventing the use of possibly outdated or altered images. Without AlwaysPullImages, once an image is pulled to a node, any pod can use it without any authorization check, potentially leading to security risks. https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages Configure the API server to use the AlwaysPullImages admission control plugin to ensure image security and integrity. https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-admission-control-plugin-alwayspullimages-is-set#kubernetes --enable-admission-plugins=...,AlwaysPullImages,... CIS-1.10: 1.2.11 | CIS-1.8: 1.2.11 cluster-security Enabling AlwaysPullImages can increase network and registry load and decrease container startup speed. It may not be suitable for all environments. <prowler_version> https://kubernetes.io/docs/concepts/containers/images/ | https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/
3 <auth_method> 2025-02-14 14:27:38.533897 <account_uid> context: <context> <finding_uid> kubernetes apiserver_anonymous_requests Ensure that the --anonymous-auth argument is set to false PASS API Server does not have anonymous-auth enabled in pod <resource_uid> False apiserver high KubernetesAPIServer <resource_id> <resource_name> namespace: kube-system Disable anonymous requests to the API server. When enabled, requests that are not rejected by other configured authentication methods are treated as anonymous requests, which are then served by the API server. Disallowing anonymous requests strengthens security by ensuring all access is authenticated. Enabling anonymous access to the API server can expose the cluster to unauthorized access and potential security vulnerabilities. https://kubernetes.io/docs/admin/authentication/#anonymous-requests Ensure the --anonymous-auth argument in the API server is set to false. This will reject all anonymous requests, enforcing authenticated access to the server. https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-anonymous-auth-argument-is-set-to-false-1#kubernetes --anonymous-auth=false CIS-1.10: 1.2.1 | CIS-1.8: 1.2.1 trustboundaries While anonymous access can be useful for health checks and discovery, consider the security implications for your specific environment. <prowler_version> https://kubernetes.io/docs/concepts/containers/images/ | https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/
4 <auth_method> 2025-02-14 14:27:38.533897 <account_uid> context: <context> <finding_uid> kubernetes apiserver_audit_log_maxage_set Ensure that the --audit-log-maxage argument is set to 30 or as appropriate FAIL Audit log max age is not set to 30 or as appropriate in pod <resource_uid> False apiserver medium KubernetesAPIServer <resource_id> <resource_name> namespace: kube-system This check ensures that the Kubernetes API server is configured with an appropriate audit log retention period. Setting --audit-log-maxage to 30 or as per business requirements helps in maintaining logs for sufficient time to investigate past events. Without an adequate log retention period, there may be insufficient audit history to investigate and analyze past events or security incidents. https://kubernetes.io/docs/concepts/cluster-administration/audit/ Configure the API server audit log retention period to retain logs for at least 30 days or as per your organization's requirements. https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-audit-log-maxage-argument-is-set-to-30-or-as-appropriate#kubernetes --audit-log-maxage=30 CIS-1.10: 1.2.17 | CIS-1.8: 1.2.18 logging Ensure the audit log retention period is set appropriately to balance between storage constraints and the need for historical data. <prowler_version> https://kubernetes.io/docs/concepts/containers/images/ | https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/
5 <auth_method> 2025-02-14 14:27:38.533897 <account_uid> context: <context> <finding_uid> kubernetes apiserver_audit_log_maxbackup_set Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate FAIL Audit log max backup is not set to 10 or as appropriate in pod <resource_uid> False apiserver medium KubernetesAPIServer <resource_id> <resource_name> namespace: kube-system This check ensures that the Kubernetes API server is configured with an appropriate number of audit log backups. Setting --audit-log-maxbackup to 10 or as per business requirements helps maintain a sufficient log backup for investigations or analysis. Without an adequate number of audit log backups, there may be insufficient log history to investigate past events or security incidents. https://kubernetes.io/docs/concepts/cluster-administration/audit/ Configure the API server audit log backup retention to 10 or as per your organization's requirements. https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ https://docs.prowler.com/checks/kubernetes/kubernetes-policy-index/ensure-that-the-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate#kubernetes --audit-log-maxbackup=10 CIS-1.10: 1.2.18 | CIS-1.8: 1.2.19 logging Ensure the audit log backup retention period is set appropriately to balance between storage constraints and the need for historical data. <prowler_version> https://kubernetes.io/docs/concepts/containers/images/ | https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/

View File

@@ -28,6 +28,7 @@
],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "Enabling AlwaysPullImages can increase network and registry load and decrease container startup speed. It may not be suitable for all environments.",
"compliance": {
"CIS-1.10": [
@@ -161,6 +162,7 @@
],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "While anonymous access can be useful for health checks and discovery, consider the security implications for your specific environment.",
"compliance": {
"CIS-1.10": [
@@ -294,6 +296,7 @@
],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "Ensure the audit log retention period is set appropriately to balance between storage constraints and the need for historical data.",
"compliance": {
"CIS-1.10": [
@@ -427,6 +430,7 @@
],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "Ensure the audit log backup retention period is set appropriately to balance between storage constraints and the need for historical data.",
"compliance": {
"CIS-1.10": [
@@ -560,6 +564,7 @@
],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "Adjust the audit log file size limit based on your organization's storage capabilities and logging requirements.",
"compliance": {
"CIS-1.10": [
@@ -693,6 +698,7 @@
],
"depends_on": [],
"related_to": [],
"additional_urls": [],
"notes": "Audit logs are not enabled by default in Kubernetes. Configuring them is essential for security monitoring and forensic analysis.",
"compliance": {
"CIS-1.10": [

151
mcp_server/.gitignore vendored Normal file
View File

@@ -0,0 +1,151 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
Pipfile.lock
# poetry
poetry.lock
# pdm
.pdm.toml
.pdm-python
pdm.lock
# PEP 582
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
.idea/
# VS Code
.vscode/
.cursor/
# macOS
.DS_Store
# uv
uv.lock

88
mcp_server/README.md Normal file
View File

@@ -0,0 +1,88 @@
# Prowler MCP Server
Access the entire Prowler ecosystem through the Model Context Protocol (MCP), the supported capabilities right now are:
- Prowler Hub for checking the current covering in checks, fixers and compliance frameworks in Prowler.
## Requirements
- Python 3.12+
- Network access to `https://hub.prowler.com`
## Installation
### From Sources
It is needed to have [uv](https://docs.astral.sh/uv/) installed.
```bash
git clone https://github.com/prowler-cloud/prowler.git
```
## Running
After installation, start the MCP server via the console script:
```bash
cd prowler/mcp_server
uv run prowler-mcp
```
Alternatively, you can run from wherever you want using `uvx` command:
```bash
uvx /path/to/prowler/mcp_server/
```
## Available Tools
### Prowler Hub
All tools are exposed under the `prowler_hub` prefix.
- prowler_hub_get_check_filters: Return available filter values for checks (providers, services, severities, categories, compliances). Call this before `prowler_hub_get_checks` to build valid queries.
- prowler_hub_get_checks: List checks with option of advanced filtering.
- prowler_hub_search_checks: Fulltext search across check metadata.
- prowler_hub_get_compliance_frameworks: List/filter compliance frameworks.
- prowler_hub_search_compliance_frameworks: Full-text search across frameworks.
- prowler_hub_list_providers: List Prowler official providers and their services.
- prowler_hub_get_artifacts_count: Return total artifact count (checks + frameworks).
## MCP Client Configuration
Configure your MCP client to launch the server with the `uvx` command. Below is a generic snippet; consult your client's documentation for exact locations.
```json
{
"mcpServers": {
"prowler": {
"command": "uvx",
"args": ["/path/to/prowler/mcp_server/"]
}
}
}
```
### Claude Desktop (macOS/Windows)
Add the server to Claude Desktops config file, then restart the app.
- macOS: `~/Library/Application Support/Claude/claude_desktop_config.json`
- Windows: `%AppData%\Claude\claude_desktop_config.json` (e.g. `C:\\Users\\<you>\\AppData\\Roaming\\Claude\\claude_desktop_config.json`)
Example content to append/merge:
```json
{
"mcpServers": {
"prowler": {
"command": "uvx",
"args": ["/path/to/prowler/mcp_server/"]
}
}
}
```
## License
This project follows the repositorys main license. See the [LICENSE](../LICENSE) file at the repository root.

View File

@@ -0,0 +1,12 @@
"""
Prowler MCP - Model Context Protocol server for Prowler ecosystem
This package provides MCP tools for accessing:
- Prowler Hub: All security artifacts (detections, remediations and frameworks) supported by Prowler
"""
__version__ = "0.1.0"
__author__ = "Prowler Team"
__email__ = "engineering@prowler.com"
__all__ = ["__version__", "prowler_mcp_server"]

View File

@@ -0,0 +1,20 @@
import asyncio
import sys
from prowler_mcp_server.server import setup_main_server, prowler_mcp_server
def main():
"""Main entry point for the MCP server."""
try:
asyncio.run(setup_main_server())
prowler_mcp_server.run()
except KeyboardInterrupt:
print("\nShutting down Prowler MCP server...")
sys.exit(0)
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,3 @@
"""Prowler Hub module for MCP server."""
__all__ = ["prowler_hub_mcp"]

View File

@@ -0,0 +1,479 @@
"""
Prowler Hub MCP module
Provides access to Prowler Hub API for security checks and compliance frameworks.
"""
from typing import Optional, Any
import httpx
from fastmcp import FastMCP
# Initialize FastMCP for Prowler Hub
hub_mcp_server = FastMCP("prowler-hub")
# API base URL
BASE_URL = "https://hub.prowler.com/api"
# HTTP client configuration
client = httpx.Client(
base_url=BASE_URL, timeout=30.0, headers={"Accept": "application/json"}
)
# GitHub raw content base URL for Prowler checks
GITHUB_RAW_BASE = (
"https://raw.githubusercontent.com/prowler-cloud/prowler/refs/heads/master/"
"prowler/providers"
)
# Separate HTTP client for GitHub raw content
github_client = httpx.Client(
timeout=30.0,
headers={
"Accept": "*/*",
"User-Agent": "prowler-mcp-server/1.0",
},
)
def github_check_path(provider_id: str, check_id: str, suffix: str) -> str:
"""Build the GitHub raw URL for a given check artifact suffix using provider
and check_id.
Suffix examples: ".metadata.json", ".py", "_fixer.py"
"""
try:
service_id = check_id.split("_", 1)[0]
except IndexError:
service_id = check_id
return f"{GITHUB_RAW_BASE}/{provider_id}/services/{service_id}/{check_id}/{check_id}{suffix}"
@hub_mcp_server.tool()
async def get_check_filters() -> dict[str, Any]:
"""
Get available values for filtering for tool `get_checks`. Recommended to use before calling `get_checks` to get the available values for the filters.
Returns:
Available filter options including providers, types, services, severities,
categories, and compliance frameworks with their respective counts
"""
try:
response = client.get("/check/filters")
response.raise_for_status()
filters = response.json()
return {"filters": filters}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {"error": str(e)}
# Security Check Tools
@hub_mcp_server.tool()
async def get_checks(
providers: Optional[str] = None,
types: Optional[str] = None,
services: Optional[str] = None,
severities: Optional[str] = None,
categories: Optional[str] = None,
compliances: Optional[str] = None,
ids: Optional[str] = None,
fields: Optional[str] = "id,service,severity,title,description,risk",
) -> dict[str, Any]:
"""
List security Prowler Checks. The list can be filtered by the parameters defined for the tool.
It is recommended to use the tool `get_check_filters` to get the available values for the filters.
A not filtered request will return more than 1000 checks, so it is recommended to use the filters.
Args:
providers: Filter by Prowler provider IDs. Example: "aws,azure". Use the tool `list_providers` to get the available providers IDs.
types: Filter by check types.
services: Filter by provider services IDs. Example: "s3,keyvault". Use the tool `list_providers` to get the available services IDs in a provider.
severities: Filter by severity levels. Example: "medium,high". Available values are "low", "medium", "high", "critical".
categories: Filter by categories. Example: "cluster-security,encryption".
compliances: Filter by compliance framework IDs. Example: "cis_4.0_aws,ens_rd2022_azure".
ids: Filter by specific check IDs. Example: "s3_bucket_level_public_access_block".
fields: Specify which fields from checks metadata to return (id is always included). Example: "id,title,description,risk".
Available values are "id", "title", "description", "provider", "type", "service", "subservice", "severity", "risk", "reference", "remediation", "services_required", "aws_arn_template", "notes", "categories", "default_value", "resource_type", "related_url", "depends_on", "related_to", "fixer".
The default parameters are "id,title,description".
If null, all fields will be returned.
Returns:
List of security checks matching the filters. The structure is as follows:
{
"count": N,
"checks": [
{"id": "check_id_1", "title": "check_title_1", "description": "check_description_1", ...},
{"id": "check_id_2", "title": "check_title_2", "description": "check_description_2", ...},
{"id": "check_id_3", "title": "check_title_3", "description": "check_description_3", ...},
...
]
}
"""
params: dict[str, str] = {}
if providers:
params["providers"] = providers
if types:
params["types"] = types
if services:
params["services"] = services
if severities:
params["severities"] = severities
if categories:
params["categories"] = categories
if compliances:
params["compliances"] = compliances
if ids:
params["ids"] = ids
if fields:
params["fields"] = fields
try:
response = client.get("/check", params=params)
response.raise_for_status()
checks = response.json()
checks_dict = {}
for check in checks:
check_data = {}
# Always include the id field as it's mandatory for the response structure
if "id" in check:
check_data["id"] = check["id"]
# Include other requested fields
for field in fields.split(","):
if field != "id" and field in check: # Skip id since it's already added
check_data[field] = check[field]
checks_dict[check["id"]] = check_data
return {"count": len(checks), "checks": checks_dict}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {"error": str(e)}
@hub_mcp_server.tool()
async def get_check_raw_metadata(
provider_id: str,
check_id: str,
) -> dict[str, Any]:
"""
Fetch the raw check metadata JSON, this is a low level version of the tool `get_checks`.
It is recommended to use the tool `get_checks` filtering about the `ids` parameter instead of using this tool.
Args:
provider_id: Prowler provider ID (e.g., "aws", "azure").
check_id: Prowler check ID (folder and base filename).
Returns:
Raw metadata JSON as stored in Prowler.
"""
if provider_id and check_id:
url = github_check_path(provider_id, check_id, ".metadata.json")
try:
resp = github_client.get(url)
resp.raise_for_status()
return resp.json()
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
return {
"error": f"Check {check_id} not found in Prowler",
}
else:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {
"error": f"Error fetching check {check_id} from Prowler: {str(e)}",
}
else:
return {
"error": "Provider ID and check ID are required",
}
@hub_mcp_server.tool()
async def get_check_code(
provider_id: str,
check_id: str,
) -> dict[str, Any]:
"""
Fetch the check implementation Python code from Prowler.
Args:
provider_id: Prowler provider ID (e.g., "aws", "azure").
check_id: Prowler check ID (e.g., "opensearch_service_domains_not_publicly_accessible").
Returns:
Dict with the code content as text.
"""
if provider_id and check_id:
url = github_check_path(provider_id, check_id, ".py")
try:
resp = github_client.get(url)
resp.raise_for_status()
return {
"content": resp.text,
}
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
return {
"error": f"Check {check_id} not found in Prowler",
}
else:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {
"error": str(e),
}
else:
return {
"error": "Provider ID and check ID are required",
}
@hub_mcp_server.tool()
async def get_check_fixer(
provider_id: str,
check_id: str,
) -> dict[str, Any]:
"""
Fetch the check fixer Python code from Prowler, if it exists.
Args:
provider_id: Prowler provider ID (e.g., "aws", "azure").
check_id: Prowler check ID (e.g., "opensearch_service_domains_not_publicly_accessible").
Returns:
Dict with fixer content as text if present, existence flag.
"""
if provider_id and check_id:
url = github_check_path(provider_id, check_id, "_fixer.py")
try:
resp = github_client.get(url)
if resp.status_code == 404:
return {
"error": f"Fixer not found for check {check_id}",
}
resp.raise_for_status()
return {
"content": resp.text,
}
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
return {
"error": f"Check {check_id} not found in Prowler",
}
else:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {
"error": str(e),
}
else:
return {
"error": "Provider ID and check ID are required",
}
@hub_mcp_server.tool()
async def search_checks(term: str) -> dict[str, Any]:
"""
Search the term across all text properties of check metadata.
Args:
term: Search term to find in check titles, descriptions, and other text fields
Returns:
List of checks matching the search term
"""
try:
response = client.get("/check/search", params={"term": term})
response.raise_for_status()
checks = response.json()
return {
"count": len(checks),
"checks": checks,
}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {"error": str(e)}
# Compliance Framework Tools
@hub_mcp_server.tool()
async def get_compliance_frameworks(
provider: Optional[str] = None,
fields: Optional[
str
] = "id,framework,provider,description,total_checks,total_requirements",
) -> dict[str, Any]:
"""
List and filter compliance frameworks. The list can be filtered by the parameters defined for the tool.
Args:
provider: Filter by one Prowler provider ID. Example: "aws". Use the tool `list_providers` to get the available providers IDs.
fields: Specify which fields to return (id is always included). Example: "id,provider,description,version".
It is recommended to run with the default parameters because the full response is too large.
Available values are "id", "framework", "provider", "description", "total_checks", "total_requirements", "created_at", "updated_at".
The default parameters are "id,framework,provider,description,total_checks,total_requirements".
If null, all fields will be returned.
Returns:
List of compliance frameworks. The structure is as follows:
{
"count": N,
"frameworks": {
"framework_id": {
"id": "framework_id",
"provider": "provider_id",
"description": "framework_description",
"version": "framework_version"
}
}
}
"""
params = {}
if provider:
params["provider"] = provider
if fields:
params["fields"] = fields
try:
response = client.get("/compliance", params=params)
response.raise_for_status()
frameworks = response.json()
frameworks_dict = {}
for framework in frameworks:
framework_data = {}
# Always include the id field as it's mandatory for the response structure
if "id" in framework:
framework_data["id"] = framework["id"]
# Include other requested fields
for field in fields.split(","):
if (
field != "id" and field in framework
): # Skip id since it's already added
framework_data[field] = framework[field]
frameworks_dict[framework["id"]] = framework_data
return {"count": len(frameworks), "frameworks": frameworks_dict}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {"error": str(e)}
@hub_mcp_server.tool()
async def search_compliance_frameworks(term: str) -> dict[str, Any]:
"""
Search compliance frameworks by term.
Args:
term: Search term to find in framework names and descriptions
Returns:
List of compliance frameworks matching the search term
"""
try:
response = client.get("/compliance/search", params={"term": term})
response.raise_for_status()
frameworks = response.json()
return {
"count": len(frameworks),
"search_term": term,
"frameworks": frameworks,
}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {"error": str(e)}
# Provider Tools
@hub_mcp_server.tool()
async def list_providers() -> dict[str, Any]:
"""
Get all available Prowler providers and their associated services.
Returns:
List of Prowler providers with their associated services. The structure is as follows:
{
"count": N,
"providers": {
"provider_id": {
"name": "provider_name",
"services": ["service_id_1", "service_id_2", "service_id_3", ...]
}
}
}
"""
try:
response = client.get("/providers")
response.raise_for_status()
providers = response.json()
providers_dict = {}
for provider in providers:
providers_dict[provider["id"]] = {
"name": provider.get("name", ""),
"services": provider.get("services", []),
}
return {"count": len(providers), "providers": providers_dict}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {"error": str(e)}
# Analytics Tools
@hub_mcp_server.tool()
async def get_artifacts_count() -> dict[str, Any]:
"""
Get total count of security artifacts (checks + compliance frameworks).
Returns:
Total number of artifacts in the Prowler Hub.
"""
try:
response = client.get("/n_artifacts")
response.raise_for_status()
data = response.json()
return {
"total_artifacts": data.get("n", 0),
"details": "Total count includes both security checks and compliance frameworks",
}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {"error": str(e)}

View File

@@ -0,0 +1,18 @@
from fastmcp import FastMCP
# Initialize main Prowler MCP server
prowler_mcp_server = FastMCP("prowler-mcp-server")
async def setup_main_server():
"""Set up the main Prowler MCP server with all available integrations."""
# Import Prowler Hub tools with prowler_hub_ prefix
try:
from prowler_mcp_server.prowler_hub.server import hub_mcp_server
await prowler_mcp_server.import_server(hub_mcp_server, prefix="prowler_hub")
except Exception:
# TODO: Add error logging
pass

20
mcp_server/pyproject.toml Normal file
View File

@@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "prowler-mcp"
version = "0.1.0"
description = "MCP server for Prowler ecosystem"
readme = "README.md"
requires-python = ">=3.12"
dependencies = [
"fastmcp>=2.11.3",
"httpx>=0.27.0",
]
[project.scripts]
prowler-mcp = "prowler_mcp_server.main:main"
[tool.uv]
package = true

View File

@@ -70,6 +70,7 @@ nav:
- Integrations:
- Amazon S3: tutorials/prowler-app-s3-integration.md
- AWS Security Hub: tutorials/prowler-app-security-hub-integration.md
- Jira: tutorials/prowler-app-jira-integration.md
- Lighthouse AI: tutorials/prowler-app-lighthouse.md
- Tutorials:
- SSO with Entra: tutorials/prowler-app-sso-entra.md
@@ -99,7 +100,7 @@ nav:
- AWS:
- Getting Started: tutorials/aws/getting-started-aws.md
- Authentication: tutorials/aws/authentication.md
- Assume Role: tutorials/aws/role-assumption.md
- Assume Role (CLI): tutorials/aws/role-assumption.md
- AWS Organizations: tutorials/aws/organizations.md
- AWS Regions and Partitions: tutorials/aws/regions-and-partitions.md
- Tag-based Scan: tutorials/aws/tag-based-scan.md
@@ -161,6 +162,7 @@ nav:
- Integration Tests: developer-guide/integration-testing.md
- Debugging: developer-guide/debugging.md
- Configurable Checks: developer-guide/configurable-checks.md
- Check Metadata Writting Guidelines: developer-guide/check-metadata-guidelines.md
- Security: security.md
- Contact Us: contact.md
- Troubleshooting: troubleshooting.md

View File

@@ -5,8 +5,13 @@ All notable changes to the **Prowler SDK** are documented in this file.
## [v5.13.0] (Prowler UNRELEASED)
### Added
- Support for AdditionalURLs in outputs [(#8651)](https://github.com/prowler-cloud/prowler/pull/8651)
- Support for markdown metadata fields in Dashboard [(#8667)](https://github.com/prowler-cloud/prowler/pull/8667)
- Equality validation for CheckID, filename and classname [(#8690)](https://github.com/prowler-cloud/prowler/pull/8690)
### Changed
- Update AWS Neptune service metadata to new format [(#8494)](https://github.com/prowler-cloud/prowler/pull/8494)
- Update AWS Config service metadata to new format [(#8641)](https://github.com/prowler-cloud/prowler/pull/8641)
### Fixed
@@ -14,6 +19,8 @@ All notable changes to the **Prowler SDK** are documented in this file.
### Fixed
- Replaced old check id with new ones for compliance files [(#8682)](https://github.com/prowler-cloud/prowler/pull/8682)
- `firehose_stream_encrypted_at_rest` check false positives and new api call in kafka service [(#8599)](https://github.com/prowler-cloud/prowler/pull/8599)
- Replace defender rules policies key to use old name [(#8702)](https://github.com/prowler-cloud/prowler/pull/8702)
## [v5.12.0] (Prowler v5.12.0)

View File

@@ -8,6 +8,7 @@ from enum import Enum
from typing import Any, Dict, Optional, Set
from pydantic.v1 import BaseModel, Field, ValidationError, validator
from pydantic.v1.error_wrappers import ErrorWrapper
from prowler.config.config import Provider
from prowler.lib.check.compliance_models import Compliance
@@ -436,17 +437,31 @@ class Check(ABC, CheckMetadata):
def __init__(self, **data):
"""Check's init function. Calls the CheckMetadataModel init."""
file_path = os.path.abspath(sys.modules[self.__module__].__file__)[:-3]
# Parse the Check's metadata file
metadata_file = (
os.path.abspath(sys.modules[self.__module__].__file__)[:-3]
+ ".metadata.json"
)
metadata_file = file_path + ".metadata.json"
# Store it to validate them with Pydantic
data = CheckMetadata.parse_file(metadata_file).dict()
# Calls parents init function
super().__init__(**data)
# TODO: verify that the CheckID is the same as the filename and classname
# to mimic the test done at test_<provider>_checks_metadata_is_valid
# Verify names consistency
check_id = self.CheckID
class_name = self.__class__.__name__
file_name = file_path.split(sep="/")[-1]
errors = []
if check_id != class_name:
errors.append(f"CheckID '{check_id}' != class name '{class_name}'")
if check_id != file_name:
errors.append(f"CheckID '{check_id}' != file name '{file_name}'")
if errors:
formatted_errors = [
ErrorWrapper(ValueError(err), loc=("CheckID",)) for err in errors
]
raise ValidationError(formatted_errors, model=CheckMetadata)
def metadata(self) -> dict:
"""Return the JSON representation of the check's metadata"""

View File

@@ -79,6 +79,9 @@ class CSV(Output):
finding_dict["RELATED_TO"] = unroll_list(finding.metadata.RelatedTo)
finding_dict["NOTES"] = finding.metadata.Notes
finding_dict["PROWLER_VERSION"] = finding.prowler_version
finding_dict["ADDITIONAL_URLS"] = unroll_list(
finding.metadata.AdditionalURLs
)
self._data.append(finding_dict)
except Exception as error:
logger.error(

View File

@@ -92,10 +92,6 @@ class OCSF(Output):
filter(
None,
[
finding.metadata.Remediation.Code.NativeIaC,
finding.metadata.Remediation.Code.Terraform,
finding.metadata.Remediation.Code.CLI,
finding.metadata.Remediation.Code.Other,
finding.metadata.Remediation.Recommendation.Url,
],
)
@@ -163,6 +159,7 @@ class OCSF(Output):
"categories": finding.metadata.Categories,
"depends_on": finding.metadata.DependsOn,
"related_to": finding.metadata.RelatedTo,
"additional_urls": finding.metadata.AdditionalURLs,
"notes": finding.metadata.Notes,
"compliance": finding.compliance,
},

View File

@@ -1,31 +1,39 @@
{
"Provider": "aws",
"CheckID": "config_recorder_all_regions_enabled",
"CheckTitle": "Ensure AWS Config is enabled in all regions.",
"CheckTitle": "AWS Config recorder is enabled and not in failure state or disabled",
"CheckType": [
"Logging and Monitoring"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
],
"ServiceName": "config",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:access-recorder:region:account-id:recorder/resource-id",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "Other",
"Description": "Ensure AWS Config is enabled in all regions.",
"Risk": "The AWS configuration item history captured by AWS Config enables security analysis, resource change tracking and compliance auditing.",
"RelatedUrl": "https://aws.amazon.com/blogs/mt/aws-config-best-practices/",
"Description": "**AWS accounts** have **AWS Config recorders** active and healthy in each Region. It identifies Regions with no recorder, a disabled recorder, or a recorder in a failure state.",
"Risk": "**Gaps in Config recording** create **blind spots**. Changes in unmonitored Regions aren't captured, weakening **integrity** and **auditability**. Adversaries can alter resources or stage assets unnoticed, enabling misconfigurations and delaying **incident response**.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://repost.aws/es/questions/QUGcgeerhcTamRkwgdwh_tLQ/enable-aws-config",
"https://www.tenable.com/audits/items/CIS_Amazon_Web_Services_Foundations_v1.5.0_L2.audit:6a5136528bd329139e5969f8f1e5ffbc",
"https://aws.amazon.com/blogs/mt/aws-config-best-practices/"
],
"Remediation": {
"Code": {
"CLI": "aws configservice subscribe --s3-bucket <S3_BUCKET> --sns-topic <TOPIC_ARN>--iam-role <ROLE_ARN> ",
"NativeIaC": "",
"Other": "https://docs.prowler.com/checks/aws/logging-policies/logging_5-enable-aws-config-regions",
"Terraform": "https://docs.prowler.com/checks/aws/logging-policies/logging_5-enable-aws-config-regions#terraform"
"CLI": "",
"NativeIaC": "```yaml\nResources:\n example_resource_recorder:\n Type: AWS::Config::ConfigurationRecorder\n Properties:\n Name: example_resource\n RoleARN: !Sub arn:aws:iam::${AWS::AccountId}:role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig\n\n example_resource_channel:\n Type: AWS::Config::DeliveryChannel\n Properties:\n S3BucketName: example_resource\n\n example_resource_status:\n Type: AWS::Config::ConfigurationRecorderStatus\n Properties:\n Name: example_resource\n Recording: true # This line fixes the security issue\n DependsOn:\n - example_resource_channel\n```",
"Other": "1. In the AWS Console, go to Config\n2. Click Set up AWS Config (or Settings)\n3. Select a resource recording option (any) and choose an existing S3 bucket for delivery\n4. Keep the default AWSServiceRoleForConfig role\n5. Click Confirm/Turn on to start recording\n6. Verify on the Settings page that Status shows Recording and not Failure",
"Terraform": "```hcl\nresource \"aws_iam_service_linked_role\" \"example_resource\" {\n aws_service_name = \"config.amazonaws.com\"\n}\n\nresource \"aws_config_configuration_recorder\" \"example_resource\" {\n name = \"example_resource\"\n role_arn = aws_iam_service_linked_role.example_resource.arn\n}\n\nresource \"aws_config_delivery_channel\" \"example_resource\" {\n s3_bucket_name = \"example_resource\"\n}\n\nresource \"aws_config_configuration_recorder_status\" \"example_resource\" {\n name = aws_config_configuration_recorder.example_resource.name\n is_recording = true # This line fixes the security issue\n depends_on = [aws_config_delivery_channel.example_resource]\n}\n```"
},
"Recommendation": {
"Text": "It is recommended to enable AWS Config in all regions.",
"Url": "https://aws.amazon.com/blogs/mt/aws-config-best-practices/"
"Text": "Enable **AWS Config** in every Region with continuous recording and maintain healthy recorder status.",
"Url": "https://hub.prowler.com/check/config_recorder_all_regions_enabled"
}
},
"Categories": [
"logging",
"forensics-ready"
],
"DependsOn": [],

View File

@@ -1,31 +1,38 @@
{
"Provider": "aws",
"CheckID": "config_recorder_using_aws_service_role",
"CheckTitle": "Ensure Config Recorder is using service-linked AWS Config role",
"CheckTitle": "AWS Config recorder uses the AWSServiceRoleForConfig service-linked role",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices/AWS Foundational Security Best Practices"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
],
"ServiceName": "config",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:access-recorder:region:account-id:recorder/resource-id",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "Other",
"Description": "AWS Config uses an IAM role to access other AWS services. This role should be AWSServiceRoleForConfig, not a custom role. Using AWSServiceRoleForConfig ensures that the Config recorder has the necessary permissions to record configuration changes and that the role is managed by AWS, reducing the risk of misconfiguration.",
"Risk": "If the Config recorder is not using AWSServiceRoleForConfig, it may not have the necessary permissions to record configuration changes, which could lead in not following the principle of least privilege, which could lead to misconfiguration and potential security vulnerabilities.",
"RelatedUrl": "https://docs.aws.amazon.com/config/latest/developerguide/using-service-linked-roles.html",
"Description": "**AWS Config recorders** are evaluated for use of the servicelinked IAM role `AWSServiceRoleForConfig` linked to `config.amazonaws.com` rather than a custom role.\n\nThe evaluation inspects active recorders and their role ARN to confirm the AWSmanaged servicelinked role is in use.",
"Risk": "Using a custom or incorrect role can break recording or create blind spots, undermining the **integrity** and **availability** of configuration history. Overprivileged roles weaken **least privilege**, increasing risk of unauthorized access, stealthy changes, and delayed incident response.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/config-controls.html#config-1",
"https://docs.aws.amazon.com/config/latest/developerguide/using-service-linked-roles.html"
],
"Remediation": {
"Code": {
"CLI": "aws configservice put-configuration-recorder --configuration-recorder- name=<recorder-name>,roleARN=arn:<audited_partition>:iam::<account_number>:role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/config-controls.html#config-1",
"Terraform": ""
"CLI": "aws configservice put-configuration-recorder --configuration-recorder name=<RECORDER_NAME>,roleARN=arn:<PARTITION>:iam::<ACCOUNT_ID>:role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig",
"NativeIaC": "```yaml\nResources:\n example_resource:\n Type: AWS::Config::ConfigurationRecorder\n Properties:\n Name: example_resource\n RoleARN: arn:<PARTITION>:iam::<ACCOUNT_ID>:role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig # This line fixes the security issue\n```",
"Other": "1. Open the AWS Console and go to AWS Config\n2. Choose Settings (or Recording) and click Edit\n3. For IAM role, select Use service-linked role (AWSServiceRoleForConfig)\n4. Save changes",
"Terraform": "```hcl\nresource \"aws_config_configuration_recorder\" \"example_resource\" {\n name = \"example_resource\"\n role_arn = \"arn:<PARTITION>:iam::<ACCOUNT_ID>:role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig\" # This line fixes the security issue\n}\n```"
},
"Recommendation": {
"Text": "Use service-linked role AWSServiceRoleForConfig for AWS Config recorders.",
"Url": "https://docs.aws.amazon.com/config/latest/developerguide/using-service-linked-roles.html"
"Text": "Use the AWSmanaged servicelinked role `AWSServiceRoleForConfig` for all recorders to enforce **least privilege** and consistent trust.\n\nAvoid custom roles; restrict who can modify the recorder or role; monitor for drift and ensure recording remains enabled as part of **defense in depth**.",
"Url": "https://hub.prowler.com/check/config_recorder_using_aws_service_role"
}
},
"Categories": [],
"Categories": [
"identity-access"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -3,6 +3,7 @@ from typing import List
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.firehose.firehose_client import firehose_client
from prowler.providers.aws.services.firehose.firehose_service import EncryptionStatus
from prowler.providers.aws.services.kafka.kafka_client import kafka_client
from prowler.providers.aws.services.kinesis.kinesis_client import kinesis_client
from prowler.providers.aws.services.kinesis.kinesis_service import EncryptionType
@@ -37,7 +38,28 @@ class firehose_stream_encrypted_at_rest(Check):
report.status = "PASS"
report.status_extended = f"Firehose Stream {stream.name} does not have at rest encryption enabled but the source stream {source_stream.name} has at rest encryption enabled."
# Check if the stream has encryption enabled directly
# MSK source - check if the MSK cluster has encryption at rest with CMK
elif stream.delivery_stream_type == "MSKAsSource":
msk_cluster_arn = stream.source.msk.msk_cluster_arn
if msk_cluster_arn:
msk_cluster = None
for cluster in kafka_client.clusters.values():
if cluster.arn == msk_cluster_arn:
msk_cluster = cluster
break
if msk_cluster:
# All MSK clusters (both provisioned and serverless) always have encryption at rest enabled by AWS
# AWS MSK always encrypts data at rest - either with AWS managed keys or CMK
report.status = "PASS"
if msk_cluster.kafka_version == "SERVERLESS":
report.status_extended = f"Firehose Stream {stream.name} uses MSK serverless source which always has encryption at rest enabled by default."
else:
report.status_extended = f"Firehose Stream {stream.name} uses MSK provisioned source which always has encryption at rest enabled by AWS (either with AWS managed keys or CMK)."
else:
report.status_extended = f"Firehose Stream {stream.name} uses MSK source which always has encryption at rest enabled by AWS."
# Check if the stream has encryption enabled directly (DirectPut or DatabaseAsSource cases)
elif stream.kms_encryption == EncryptionStatus.ENABLED:
report.status = "PASS"
report.status_extended = f"Firehose Stream {stream.name} does have at rest encryption enabled."

View File

@@ -12,7 +12,12 @@ class kafka_cluster_encryption_at_rest_uses_cmk(Check):
report.status = "FAIL"
report.status_extended = f"Kafka cluster '{cluster.name}' does not have encryption at rest enabled with a CMK."
if any(
# Serverless clusters always have encryption at rest enabled by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and always has encryption at rest enabled by default."
# For provisioned clusters, check if they use a customer managed KMS key
elif any(
(
cluster.data_volume_kms_key_id == key.arn
and getattr(key, "manager", "") == "CUSTOMER"

View File

@@ -13,7 +13,12 @@ class kafka_cluster_enhanced_monitoring_enabled(Check):
f"Kafka cluster '{cluster.name}' has enhanced monitoring enabled."
)
if cluster.enhanced_monitoring == "DEFAULT":
# Serverless clusters always have enhanced monitoring enabled by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and always has enhanced monitoring enabled by default."
# For provisioned clusters, check the enhanced monitoring configuration
elif cluster.enhanced_monitoring == "DEFAULT":
report.status = "FAIL"
report.status_extended = f"Kafka cluster '{cluster.name}' does not have enhanced monitoring enabled."

View File

@@ -11,7 +11,12 @@ class kafka_cluster_in_transit_encryption_enabled(Check):
report.status = "FAIL"
report.status_extended = f"Kafka cluster '{cluster.name}' does not have encryption in transit enabled."
if (
# Serverless clusters always have encryption in transit enabled by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and always has encryption in transit enabled by default."
# For provisioned clusters, check the encryption configuration
elif (
cluster.encryption_in_transit.client_broker == "TLS"
and cluster.encryption_in_transit.in_cluster
):

View File

@@ -13,7 +13,12 @@ class kafka_cluster_is_public(Check):
f"Kafka cluster {cluster.name} is publicly accessible."
)
if not cluster.public_access:
# Serverless clusters are always private by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster {cluster.name} is serverless and always private by default."
# For provisioned clusters, check the public access configuration
elif not cluster.public_access:
report.status = "PASS"
report.status_extended = (
f"Kafka cluster {cluster.name} is not publicly accessible."

View File

@@ -11,7 +11,12 @@ class kafka_cluster_mutual_tls_authentication_enabled(Check):
report.status = "FAIL"
report.status_extended = f"Kafka cluster '{cluster.name}' does not have mutual TLS authentication enabled."
if cluster.tls_authentication:
# Serverless clusters always have TLS authentication enabled by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and always has TLS authentication enabled by default."
# For provisioned clusters, check the TLS configuration
elif cluster.tls_authentication:
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' has mutual TLS authentication enabled."

View File

@@ -13,7 +13,12 @@ class kafka_cluster_unrestricted_access_disabled(Check):
f"Kafka cluster '{cluster.name}' has unrestricted access enabled."
)
if not cluster.unauthentication_access:
# Serverless clusters always require authentication by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and always requires authentication by default."
# For provisioned clusters, check the unauthenticated access configuration
elif not cluster.unauthentication_access:
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' does not have unrestricted access enabled."

View File

@@ -13,7 +13,12 @@ class kafka_cluster_uses_latest_version(Check):
f"Kafka cluster '{cluster.name}' is using the latest version."
)
if cluster.kafka_version != kafka_client.kafka_versions[-1].version:
# Serverless clusters don't have specific Kafka versions - AWS manages them automatically
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and AWS automatically manages the Kafka version."
# For provisioned clusters, check if they're using the latest version
elif cluster.kafka_version != kafka_client.kafka_versions[-1].version:
report.status = "FAIL"
report.status_extended = (
f"Kafka cluster '{cluster.name}' is not using the latest version."

View File

@@ -15,61 +15,133 @@ class Kafka(AWSService):
self.__threading_call__(self._list_kafka_versions)
def _list_clusters(self, regional_client):
logger.info(f"Kafka - Listing clusters in region {regional_client.region}...")
try:
cluster_paginator = regional_client.get_paginator("list_clusters")
# Use list_clusters_v2 to support both provisioned and serverless clusters
cluster_paginator = regional_client.get_paginator("list_clusters_v2")
logger.info(
f"Kafka - Paginator created for region {regional_client.region}"
)
for page in cluster_paginator.paginate():
logger.info(
f"Kafka - Processing page with {len(page.get('ClusterInfoList', []))} clusters in region {regional_client.region}"
)
for cluster in page["ClusterInfoList"]:
logger.info(
f"Kafka - Found cluster: {cluster.get('ClusterName', 'Unknown')} in region {regional_client.region}"
)
arn = cluster.get(
"ClusterArn",
f"{self.account_arn_template}/{cluster.get('ClusterName', '')}",
)
cluster_type = cluster.get("ClusterType", "UNKNOWN")
if not self.audit_resources or is_resource_filtered(
arn, self.audit_resources
):
self.clusters[cluster.get("ClusterArn", "")] = Cluster(
id=arn.split(":")[-1].split("/")[-1],
name=cluster.get("ClusterName", ""),
arn=arn,
region=regional_client.region,
tags=list(cluster.get("Tags", {})),
state=cluster.get("State", ""),
kafka_version=cluster.get(
"CurrentBrokerSoftwareInfo", {}
).get("KafkaVersion", ""),
data_volume_kms_key_id=cluster.get("EncryptionInfo", {})
.get("EncryptionAtRest", {})
.get("DataVolumeKMSKeyId", ""),
encryption_in_transit=EncryptionInTransit(
client_broker=cluster.get("EncryptionInfo", {})
.get("EncryptionInTransit", {})
.get("ClientBroker", "PLAINTEXT"),
in_cluster=cluster.get("EncryptionInfo", {})
.get("EncryptionInTransit", {})
.get("InCluster", False),
),
tls_authentication=cluster.get("ClientAuthentication", {})
.get("Tls", {})
.get("Enabled", False),
public_access=cluster.get("BrokerNodeGroupInfo", {})
.get("ConnectivityInfo", {})
.get("PublicAccess", {})
.get("Type", "SERVICE_PROVIDED_EIPS")
!= "DISABLED",
unauthentication_access=cluster.get(
"ClientAuthentication", {}
# Handle provisioned clusters
if cluster_type == "PROVISIONED" and "Provisioned" in cluster:
provisioned = cluster["Provisioned"]
self.clusters[cluster.get("ClusterArn", "")] = Cluster(
id=arn.split(":")[-1].split("/")[-1],
name=cluster.get("ClusterName", ""),
arn=arn,
region=regional_client.region,
tags=(
list(cluster.get("Tags", {}).values())
if cluster.get("Tags")
else []
),
state=cluster.get("State", ""),
kafka_version=provisioned.get(
"CurrentBrokerSoftwareInfo", {}
).get("KafkaVersion", ""),
data_volume_kms_key_id=provisioned.get(
"EncryptionInfo", {}
)
.get("EncryptionAtRest", {})
.get("DataVolumeKMSKeyId", ""),
encryption_in_transit=EncryptionInTransit(
client_broker=provisioned.get("EncryptionInfo", {})
.get("EncryptionInTransit", {})
.get("ClientBroker", "PLAINTEXT"),
in_cluster=provisioned.get("EncryptionInfo", {})
.get("EncryptionInTransit", {})
.get("InCluster", False),
),
tls_authentication=provisioned.get(
"ClientAuthentication", {}
)
.get("Tls", {})
.get("Enabled", False),
public_access=provisioned.get("BrokerNodeGroupInfo", {})
.get("ConnectivityInfo", {})
.get("PublicAccess", {})
.get("Type", "SERVICE_PROVIDED_EIPS")
!= "DISABLED",
unauthentication_access=provisioned.get(
"ClientAuthentication", {}
)
.get("Unauthenticated", {})
.get("Enabled", False),
enhanced_monitoring=provisioned.get(
"EnhancedMonitoring", "DEFAULT"
),
)
.get("Unauthenticated", {})
.get("Enabled", False),
enhanced_monitoring=cluster.get(
"EnhancedMonitoring", "DEFAULT"
),
logger.info(
f"Kafka - Added provisioned cluster {cluster.get('ClusterName', 'Unknown')} to clusters dict"
)
# Handle serverless clusters
elif cluster_type == "SERVERLESS" and "Serverless" in cluster:
# For serverless clusters, encryption is always enabled by default
# We'll create a Cluster object with default encryption values
self.clusters[cluster.get("ClusterArn", "")] = Cluster(
id=arn.split(":")[-1].split("/")[-1],
name=cluster.get("ClusterName", ""),
arn=arn,
region=regional_client.region,
tags=(
list(cluster.get("Tags", {}).values())
if cluster.get("Tags")
else []
),
state=cluster.get("State", ""),
kafka_version="SERVERLESS", # Serverless doesn't have specific Kafka version
data_volume_kms_key_id="AWS_MANAGED", # Serverless uses AWS managed keys
encryption_in_transit=EncryptionInTransit(
client_broker="TLS", # Serverless always has TLS enabled
in_cluster=True, # Serverless always has in-cluster encryption
),
tls_authentication=True, # Serverless always has TLS authentication
public_access=False, # Serverless clusters are always private
unauthentication_access=False, # Serverless requires authentication
enhanced_monitoring="DEFAULT",
)
logger.info(
f"Kafka - Added serverless cluster {cluster.get('ClusterName', 'Unknown')} to clusters dict"
)
else:
logger.warning(
f"Kafka - Unknown cluster type {cluster_type} for cluster {cluster.get('ClusterName', 'Unknown')}"
)
else:
logger.info(
f"Kafka - Cluster {cluster.get('ClusterName', 'Unknown')} filtered out by audit_resources"
)
logger.info(
f"Kafka - Total clusters found in region {regional_client.region}: {len(self.clusters)}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
logger.error(
f"Kafka - Error details in region {regional_client.region}: {str(error)}"
)
def _list_kafka_versions(self, regional_client):
try:

View File

@@ -1,29 +1,39 @@
{
"Provider": "aws",
"CheckID": "neptune_cluster_backup_enabled",
"CheckTitle": "Check for Neptune Clusters Backup Retention Period.",
"CheckType": [],
"CheckTitle": "Neptune cluster has automated backups enabled with retention period equal to or greater than the configured minimum",
"CheckType": [
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
],
"ServiceName": "neptune",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rds:region:account-id:db-instance",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsRdsDbCluster",
"Description": "Check if Neptune Clusters have backup enabled.",
"Risk": "Ensure that your Amazon Neptune graph database clusters have set a minimum backup retention period of 7 days or greater in order to achieve your organization compliance requirements. The retention period represents the number of days to retain automated snapshots.",
"RelatedUrl": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-5",
"Description": "Neptune DB cluster automated backup is enabled and retention days are more than the required minimum retention period (default to `7` days).",
"Risk": "**Insufficient backup retention** reduces the ability to recover from data corruption, accidental deletion, or ransomware, impacting **availability** and **integrity**.\n\n- Prevents point-in-time recovery to required dates\n- Increases downtime, irreversible data loss, and compliance violations",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-5",
"https://trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/sufficient-backup-retention-period.html",
"https://support.icompaas.com/support/solutions/articles/62000233327-check-for-neptune-clusters-backup-retention-period",
"https://asecure.cloud/a/p_configrule_neptune_cluster_backup_retention_check/"
],
"Remediation": {
"Code": {
"CLI": "aws neptune modify-db-cluster --db-cluster-identifier <DB_CLUSTER_ID> --backup-retention-period 7",
"NativeIaC": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/sufficient-backup-retention-period.html#",
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/sufficient-backup-retention-period.html#",
"Terraform": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/sufficient-backup-retention-period.html#"
"CLI": "aws neptune modify-db-cluster --db-cluster-identifier <DB_CLUSTER_ID> --backup-retention-period 7 --apply-immediately",
"NativeIaC": "```yaml\nParameters:\n DBClusterId:\n Type: String\nResources:\n NeptuneCluster:\n Type: AWS::Neptune::DBCluster\n Properties:\n DBClusterIdentifier: !Ref DBClusterId\n BackupRetentionPeriod: 7 # Enable automated backups with 7-day retention minimum\n```",
"Terraform": "```hcl\nresource \"aws_neptune_cluster\" \"example_resource\" {\n cluster_identifier = var.cluster_id\n backup_retention_period = 7 # Enable automated backups with 7-day retention minimum\n}\n```",
"Other": "1. Sign in to the AWS Management Console\n2. Services → Amazon Neptune → Databases\n3. Select the DB cluster and click Modify\n4. In Backup retention period set the value to 7 (or higher)\n5. Choose Apply immediately and click Modify cluster"
},
"Recommendation": {
"Text": "Enable automated backup for production data. Define a retention period and periodically test backup restoration. A Disaster Recovery process should be in place to govern Data Protection approach.",
"Url": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-5"
"Text": "Ensure automated backups are enabled and retention aligns with your **RPO/RTO** and regulatory requirements (at least `7` days).\n\n- Define backup lifecycle and storage retention policies\n- Regularly test restore procedures and monitor backup health\n- Incorporate backups into Disaster Recovery and retention governance",
"Url": "https://hub.prowler.com/check/neptune_cluster_backup_enabled"
}
},
"Categories": [],
"Categories": [
"resilience"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -1,33 +1,37 @@
{
"Provider": "aws",
"CheckID": "neptune_cluster_copy_tags_to_snapshots",
"CheckTitle": "Check if Neptune DB clusters are configured to copy tags to snapshots.",
"CheckTitle": "Neptune DB cluster is configured to copy tags to snapshots.",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"ServiceName": "neptune",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rds:region:account-id:cluster:db-cluster-identifier",
"ResourceIdTemplate": "",
"Severity": "low",
"ResourceType": "AwsRdsDbCluster",
"Description": "This check ensures that Neptune DB clusters are configured to copy all tags to snapshots when the snapshots are created.",
"Risk": "If tags are not copied to snapshots, the snapshots may lack necessary metadata for identification, governance, and access control, leading to potential mismanagement and security risks.",
"RelatedUrl": "https://docs.aws.amazon.com/neptune/latest/userguide/tagging.html#tagging-overview",
"Description": "Neptune DB cluster is configured to copy all tags to snapshots when snapshots are created.",
"Risk": "**Missing snapshot tags** weakens governance across confidentiality, integrity, and availability.\n\n- **Access control**: Tag-based IAM conditions may not apply to snapshots, enabling unauthorized restore or copy\n- **Operational**: Recovery, retention, and cost tracking can fail due to unidentifiable or orphaned snapshots",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/neptune/latest/userguide/tagging.html#tagging-overview",
"https://www.cloudanix.com/docs/aws/audit/rdsmonitoring/rules/neptune_cluster_copy_tags_to_snapshot_enabled",
"https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-8",
"https://docs.prismacloud.io/en/enterprise-edition/policy-reference/aws-policies/aws-general-policies/bc-aws-2-60"
],
"Remediation": {
"Code": {
"CLI": "aws neptune modify-db-cluster --db-cluster-identifier <db-cluster-identifier> --copy-tags-to-snapshot --apply-immediately",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-8",
"Terraform": ""
"CLI": "aws neptune modify-db-cluster --db-cluster-identifier <DB_CLUSTER_ID> --copy-tags-to-snapshot --apply-immediately",
"NativeIaC": "```yaml\nResources:\n NeptuneCluster:\n Type: AWS::RDS::DBCluster\n Properties:\n DBClusterIdentifier: <DB_CLUSTER_ID>\n EngineVersion: neptune\n CopyTagsToSnapshot: true # Inherit tags for snapshot governance and access control\n```",
"Terraform": "```hcl\nresource \"aws_neptune_cluster\" \"example_resource\" {\n cluster_identifier = \"<DB_CLUSTER_ID>\"\n copy_tags_to_snapshot = true # Inherit tags for snapshot governance and access control\n}\n```",
"Other": "1. Sign in to the AWS Management Console and open Amazon Neptune\n2. Click Clusters and select the cluster\n3. Click Modify\n4. In Backup, enable \"Copy tags to snapshots\"\n5. Check \"Apply immediately\"\n6. Click Modify Cluster"
},
"Recommendation": {
"Text": "Configure your Neptune DB clusters to copy tags to snapshots when the snapshots are created.",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/tagging.html#tagging-overview"
"Text": "Preserve metadata by enabling tag inheritance for snapshots and enforcing a consistent tagging strategy.\n\n- Adopt a standardized tag taxonomy\n- Use tag-based access controls and apply least privilege\n- Automate tagging and policy checks in provisioning to prevent untagged snapshots",
"Url": "https://hub.prowler.com/check/neptune_cluster_copy_tags_to_snapshots"
}
},
"Categories": [
"trustboundaries"
],
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -1,29 +1,38 @@
{
"Provider": "aws",
"CheckID": "neptune_cluster_deletion_protection",
"CheckTitle": "Check if Neptune Clusters storage has deletion protection enabled.",
"CheckType": [],
"CheckTitle": "Neptune cluster has deletion protection enabled",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices",
"Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Effects/Data Destruction"
],
"ServiceName": "neptune",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rds:region:account-id:db-cluster",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsRdsDbCluster",
"Description": "Check if Neptune Clusters storage has deletion protection enabled.",
"Risk": "Enabling cluster deletion protection offers an additional layer of protection against accidental database deletion or deletion by an unauthorized user. A Neptune DB cluster can't be deleted while deletion protection is enabled. You must first disable deletion protection before a delete request can succeed.",
"RelatedUrl": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-4",
"ResourceType": "Other",
"Description": "Neptune DB cluster has **deletion protection** enabled.",
"Risk": "Absence of **deletion protection** weakens **availability** and **integrity**: clusters can be removed by accidental admin actions, rogue automation, or compromised credentials.\n\nCluster deletion causes immediate service outage, potential permanent data loss, and extended recovery time if backups or restores are insufficient.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-4"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "",
"Terraform": ""
"CLI": "aws neptune modify-db-cluster --db-cluster-identifier <DB_CLUSTER_IDENTIFIER> --deletion-protection --apply-immediately",
"NativeIaC": "```yaml\nResources:\n NeptuneCluster:\n Type: AWS::Neptune::DBCluster\n Properties:\n DBClusterIdentifier: <CLUSTER_ID>\n DeletionProtection: true # Prevent accidental or malicious cluster deletion\n```",
"Terraform": "```hcl\nresource \"aws_neptune_cluster\" \"example_resource\" {\n cluster_identifier = \"<CLUSTER_ID>\"\n deletion_protection = true # Prevent accidental or malicious cluster deletion\n}\n```",
"Other": "1. Sign in to the AWS Management Console and open Amazon Neptune\n2. In the navigation pane, choose Databases\n3. Select the DB cluster and choose Modify\n4. Enable Deletion protection\n5. Choose Apply immediately (if shown) and then Modify DB cluster"
},
"Recommendation": {
"Text": "Enable deletion protection for production Neptune Clusters.",
"Url": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-4"
"Text": "Enable **deletion protection** for production Neptune clusters and apply the principles of **least privilege** and **separation of duties** for delete operations.\n\nEnforce change-control approvals, restrict delete permissions to audited roles, and limit automated workflows that can perform destructive actions to prevent accidental or malicious deletions.",
"Url": "https://hub.prowler.com/check/neptune_cluster_deletion_protection"
}
},
"Categories": [],
"Categories": [
"resilience"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -1,29 +1,41 @@
{
"Provider": "aws",
"CheckID": "neptune_cluster_iam_authentication_enabled",
"CheckTitle": "Check if Neptune Clusters have IAM authentication enabled.",
"CheckType": [],
"CheckTitle": "Neptune cluster has IAM authentication enabled",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"TTPs/Credential Access"
],
"ServiceName": "neptune",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rds:region:account-id:db-cluster",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsRdsDbCluster",
"Description": "Check if Neptune Clusters have IAM authentication enabled.",
"Risk": "Ensure that IAM Database Authentication feature is enabled for your Amazon Neptune database clusters in order to make use of AWS Identity and Access Management (IAM) service to manage database access.",
"RelatedUrl": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-7",
"Description": "Neptune DB clusters are evaluated for **IAM database authentication**. \n\nIf this setting is enabled, the cluster supports IAM-based authentication.\nIf disabled, the cluster requires traditional database credentials instead.",
"Risk": "**Disabled IAM database authentication** weakens confidentiality and integrity of the database.\n\n- Static or embedded DB credentials can be stolen or reused, enabling unauthorized queries and data exfiltration\n- Attackers may bypass centralized access controls, escalate privileges, and move laterally without IAM-based audit trails",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-7",
"https://docs.aws.amazon.com/config/latest/developerguide/neptune-cluster-iam-database-authentication.html",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/iam-db-authentication.html#",
"https://hub.steampipe.io/plugins/turbot/terraform/queries/neptune/neptune_cluster_iam_authentication_enabled"
],
"Remediation": {
"Code": {
"CLI": "aws neptune modify-db-cluster --db-cluster-identifier <DB_CLUSTER_ID> --enable-iam-database-authentication",
"NativeIaC": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/iam-db-authentication.html#",
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/iam-db-authentication.html#",
"Terraform": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/iam-db-authentication.html#"
"CLI": "aws neptune modify-db-cluster --db-cluster-identifier <DB_CLUSTER_ID> --enable-iam-database-authentication --apply-immediately",
"NativeIaC": "```yaml\nResources:\n NeptuneCluster:\n Type: AWS::Neptune::DBCluster\n Properties:\n DBClusterIdentifier: <DB_CLUSTER_ID>\n IamAuthEnabled: true # Enable IAM authentication instead of static DB credentials\n```",
"Terraform": "```hcl\nresource \"aws_neptune_cluster\" \"example_resource\" {\n cluster_identifier = \"<DB_CLUSTER_ID>\"\n iam_database_authentication_enabled = true # Enable IAM authentication instead of static DB credentials\n}\n```",
"Other": "1. Sign in to the AWS Management Console and open Amazon Neptune > Databases\n2. Select the DB cluster and choose **Actions** > **Modify**\n3. In **Authentication**, enable **IAM DB authentication** and check **Apply immediately**\n4. Click **Continue** then **Modify DB cluster**"
},
"Recommendation": {
"Text": "Enable IAM authentication for Neptune Clusters.",
"Url": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-7"
"Text": "Adopt **IAM database authentication** and centralized identity management to remove static DB credentials and improve auditability.\n\n- Enforce **least privilege** for database roles\n- Use short-lived credentials, centralized rotation and logging\n- Apply defense-in-depth and integrate DB access with IAM for accountability",
"Url": "https://hub.prowler.com/check/neptune_cluster_iam_authentication_enabled"
}
},
"Categories": [],
"Categories": [
"identity-access"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -1,32 +1,40 @@
{
"Provider": "aws",
"CheckID": "neptune_cluster_integration_cloudwatch_logs",
"CheckTitle": "Check if Neptune Clusters have audit cloudwatch logs enabled.",
"CheckTitle": "Neptune cluster has CloudWatch audit logs enabled",
"CheckType": [
"Software and Configuration Checks, AWS Security Best Practices"
"Software and Configuration Checks/AWS Security Best Practices/Runtime Behavior Analysis",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
],
"ServiceName": "neptune",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rds:region:account-id:db-cluster",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsRdsDbCluster",
"Description": "Check if Neptune Clusters have audit cloudwatch logs enabled.",
"Risk": "If audit logs are not enabled, it is difficult to determine the root cause of security incidents.",
"RelatedUrl": "https://docs.aws.amazon.com/neptune/latest/userguide/auditing.html",
"ResourceType": "Other",
"Description": "Neptune DB cluster is inspected for CloudWatch export of **audit** events. The finding indicates whether the cluster publishes `audit` logs to CloudWatch; a failed status in the report means the `audit` export is not enabled and audit records are not being forwarded to CloudWatch for centralized logging and review.",
"Risk": "Missing **audit logs** reduces **detectability** and **accountability**: \n\n- Investigators cannot reconstruct queries, client origins, or timeline\n- Unauthorized queries, data exfiltration, or privilege misuse may go undetected\n\nThis degrades confidentiality and integrity and slows incident response.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/neptune/latest/userguide/auditing.html",
"https://docs.aws.amazon.com/neptune/latest/userguide/cloudwatch-logs.html",
"https://cloudanix.com/docs/aws/audit/rdsmonitoring/rules/neptune_cluster_cloudwatch_log_export_enabled_remediation",
"https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-2"
],
"Remediation": {
"Code": {
"CLI": "aws neptune modify-db-cluster --db-cluster-identifier <value> --cloudwatch-logs-export-configuration '{\"EnableLogTypes\":[\"audit\"]}'",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-2",
"Terraform": ""
"CLI": "aws neptune modify-db-cluster --db-cluster-identifier <DB_CLUSTER_IDENTIFIER> --cloudwatch-logs-export-configuration '{\"EnableLogTypes\":[\"audit\"]}'",
"NativeIaC": "```yaml\nResources:\n NeptuneCluster:\n Type: AWS::Neptune::DBCluster\n Properties:\n DBClusterIdentifier: \"<DB_CLUSTER_IDENTIFIER>\"\n EnableCloudwatchLogsExports:\n - audit # Export audit logs to CloudWatch for monitoring and forensics\n```",
"Terraform": "```hcl\nresource \"aws_neptune_cluster\" \"example_resource\" {\n cluster_identifier = \"<db_cluster_identifier>\"\n enabled_cloudwatch_logs_exports = [\"audit\"] # Export audit logs to CloudWatch for monitoring and forensics\n}\n```",
"Other": "1. Sign in to the AWS Management Console and open Amazon Neptune\n2. Go to Databases and select the Neptune DB cluster\n3. Actions > Modify\n4. In Log exports, check \"Audit\"\n5. Continue > Modify DB Cluster"
},
"Recommendation": {
"Text": "Enable audit logs for Neptune Clusters.",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/cloudwatch-logs.html"
"Text": "Enable and centralize **audit logging** for Neptune by exporting `audit` events to CloudWatch Logs and integrating with monitoring or SIEM.\n\n- Enforce **least privilege** on log access\n- Configure retention, encryption, and alerting for anomalous queries\n\nThis supports proactive detection and forensic readiness.",
"Url": "https://hub.prowler.com/check/neptune_cluster_integration_cloudwatch_logs"
}
},
"Categories": [
"logging"
"logging",
"forensics-ready"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -1,30 +1,38 @@
{
"Provider": "aws",
"CheckID": "neptune_cluster_multi_az",
"CheckTitle": "Check if Neptune Clusters have multi-AZ enabled.",
"CheckType": [],
"CheckTitle": "Neptune cluster has Multi-AZ enabled",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Effects/Denial of Service"
],
"ServiceName": "neptune",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rds:region:account-id:db-cluster",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsRdsDbCluster",
"Description": "Check if Neptune Clusters have multi-AZ enabled.",
"Risk": "Ensure that your Amazon Neptune graph database clusters are using Multi-AZ deployment configurations to enhance High Availability (HA) through automatic failover to read replicas in the event of a failure such as an Availability Zone (AZ) outage, an internal hardware or network outage, a software failure or in case of planned system maintenance.",
"RelatedUrl": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-9",
"Description": "Amazon Neptune DB clusters are evaluated for `Multi-AZ` deployment by checking whether the cluster has read-replica instances distributed across multiple Availability Zones.\n\nA failing result indicates the cluster is deployed in a single AZ and lacks read-replicas that enable automatic promotion and cross-AZ failover.",
"Risk": "**Single-AZ deployment** creates a clear availability single point of failure.\n\n- **Availability**: AZ outage or maintenance can cause prolonged downtime until the primary is rebuilt.\n- **Integrity/Recovery**: Manual recovery increases risk of configuration errors and longer RTOs, impacting operations and compliance.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-9",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/multi-az.html#"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/multi-az.html#",
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/multi-az.html#",
"Terraform": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Neptune/multi-az.html#"
"NativeIaC": "```yaml\nResources:\n NeptuneCluster:\n Type: AWS::Neptune::DBCluster\n Properties:\n DBClusterIdentifier: \"<DB_CLUSTER_IDENTIFIER>\"\n # Deploy across multiple AZs for high availability and failover\n AvailabilityZones:\n - \"<AZ_1>\"\n - \"<AZ_2>\"\n - \"<AZ_3>\"\n```",
"Terraform": "```hcl\nresource \"aws_neptune_cluster\" \"example\" {\n cluster_identifier = \"<db_cluster_identifier>\"\n availability_zones = [\"<AZ_1>\", \"<AZ_2>\", \"<AZ_3>\"] # Deploy across multiple AZs for high availability\n}\n```",
"Other": ""
},
"Recommendation": {
"Text": "Enable multi-AZ deployment for production Neptune Clusters.",
"Url": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-9"
"Text": "Adopt a **high availability** deployment model for production Neptune clusters by placing read-replicas in separate Availability Zones to avoid single points of failure.\n\nRegularly test automated failover and combine HA with robust backup and recovery practices as part of a defense-in-depth strategy.",
"Url": "https://hub.prowler.com/check/neptune_cluster_multi_az"
}
},
"Categories": [
"redundancy"
"resilience"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -1,26 +1,34 @@
{
"Provider": "aws",
"CheckID": "neptune_cluster_public_snapshot",
"CheckTitle": "Check if NeptuneDB manual cluster snapshot is public.",
"CheckType": [],
"CheckTitle": "NeptuneDB cluster snapshot is not publicly shared",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability",
"Effects/Data Exposure",
"TTPs/Initial Access/Unauthorized Access"
],
"ServiceName": "neptune",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "critical",
"ResourceType": "AwsRdsDbClusterSnapshot",
"Description": "Check if NeptuneDB manual cluster snapshot is public.",
"Risk": "If you share an unencrypted manual snapshot as public, the snapshot is available to all AWS accounts. Public snapshots may result in unintended data exposure.",
"RelatedUrl": "https://docs.aws.amazon.com/neptune/latest/userguide/security-considerations.html",
"Description": "Neptune DB manual cluster snapshot is evaluated to determine if its restore attributes allow access to all AWS accounts *(public)*.\n\nA failed status in the report means the snapshot is publicly shared and can be copied or restored by any AWS account; **PASS** means it is not shared publicly.",
"Risk": "**Public snapshots** compromise confidentiality of stored data and metadata.\n\nAttackers or third parties can:\n- Copy or restore snapshots to external accounts.\n- Access sensitive data contained in the snapshot.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-3",
"https://docs.aws.amazon.com/config/latest/developerguide/neptune-cluster-snapshot-public-prohibited.html"
],
"Remediation": {
"Code": {
"CLI": "aws neptune modify-db-cluster-snapshot-attribute --db-cluster-snapshot-identifier <snapshot_id> --attribute-name restore --values-to-remove all",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-3",
"Terraform": ""
"Terraform": "",
"Other": "1. Sign in to the AWS Management Console and open the Amazon RDS console\n2. In the left navigation, choose Snapshots > DB cluster snapshots\n3. Select the snapshot, choose Actions > Manage snapshot permissions\n4. In the permissions dialog remove the Public/all-accounts permission and click Save"
},
"Recommendation": {
"Text": "To remove public access from a manual snapshot, follow the AWS documentation on NeptuneDB snapshots.",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/security-considerations.html"
"Text": "Avoid public sharing and apply **least privilege** when granting snapshot access: share only with specific AWS accounts or roles.\n\nUse **encryption**, enforce automated policies and regular audits, and apply **separation of duties** and tagging to control and track snapshot access.",
"Url": "https://hub.prowler.com/check/neptune_cluster_public_snapshot"
}
},
"Categories": [

View File

@@ -1,28 +1,34 @@
{
"Provider": "aws",
"CheckID": "neptune_cluster_snapshot_encrypted",
"CheckTitle": "Check if Neptune DB cluster snapshots are encrypted at rest.",
"CheckTitle": "Neptune DB cluster snapshot is encrypted at rest",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices"
"Software and Configuration Checks/AWS Security Best Practices/Encryption at Rest",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Effects/Data Exposure"
],
"ServiceName": "neptune",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rds:region:account-id:cluster-snapshot:db-cluster-snapshot-identifier",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsRdsDbClusterSnapshot",
"Description": "This check ensures that Neptune DB cluster snapshots are encrypted at rest to protect sensitive data from unauthorized access.",
"Risk": "If Neptune DB cluster snapshots are not encrypted, sensitive data might be exposed in case of unauthorized access, leading to potential data breaches and non-compliance with data protection regulations.",
"RelatedUrl": "https://docs.aws.amazon.com/neptune/latest/userguide/backup-restore-create-snapshot.html",
"Description": "Neptune DB cluster snapshot is encrypted at rest. The evaluation looks at whether each snapshot's encrypted attribute is enabled, confirming that the data is protected while stored.",
"Risk": "**Unencrypted Neptune snapshots** undermine data confidentiality. If accessed or shared due to compromised credentials or misconfiguration, attackers can restore or download snapshot contents, enabling **data exfiltration**, and exposure of sensitive records. This weakens overall data protection posture.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-6",
"https://docs.aws.amazon.com/neptune/latest/userguide/backup-restore-share-snapshot.html"
],
"Remediation": {
"Code": {
"CLI": "aws rds copy-db-cluster-snapshot --source-db-cluster-snapshot-identifier <source-snapshot> --target-db-cluster-snapshot-identifier <encrypted-snapshot> --kms-key-id <kms-key-id>",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-6",
"Terraform": ""
"Terraform": "```hcl\nresource \"aws_neptune_cluster\" \"restored\" {\n cluster_identifier = \"restored-cluster\"\n snapshot_identifier = \"<source-snapshot>\"\n storage_encrypted = true # Ensure restored cluster from snapshot is encrypted\n}\n```",
"Other": "1. Sign in to the AWS Management Console and open Amazon Neptune\n2. In the left pane choose **Snapshots**\n3. Select the unencrypted snapshot and click **Actions** > **Restore snapshot**\n4. In the Restore page enable **Encryption** and select a KMS key\n5. Click **Restore DB cluster**\n6. After the cluster is restored, create a new snapshot of the restored (encrypted) cluster"
},
"Recommendation": {
"Text": "Ensure that all Neptune DB cluster snapshots are encrypted at rest by enabling encryption on the cluster before creating snapshots or by copying unencrypted snapshots to encrypted ones.",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/backup-restore-create-snapshot.html"
"Text": "Protect snapshot data by enforcing **encryption at rest** and strong key governance.\n\n- Use **customer-managed keys** with controlled lifecycle and rotation\n- Apply **least privilege** to snapshot access and sharing\n- Prevent creation of unencrypted snapshots via organizational configuration and policy controls",
"Url": "https://hub.prowler.com/check/neptune_cluster_snapshot_encrypted"
}
},
"Categories": [

View File

@@ -1,29 +1,38 @@
{
"Provider": "aws",
"CheckID": "neptune_cluster_storage_encrypted",
"CheckTitle": "Check if Neptune Clusters storage is encrypted at rest.",
"CheckType": [],
"CheckTitle": "Neptune cluster storage is encrypted at rest",
"CheckType": [
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Sensitive Data Identifications/Security"
],
"ServiceName": "neptune",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rds:region:account-id:db-cluster",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "AwsRdsDbCluster",
"Description": "Check if Neptune Clusters storage is encrypted at rest.",
"Risk": "Ensure that the data available on your Amazon Neptune database instances is encrypted in order to meet regulatory requirements and prevent unauthorized users from accessing sensitive information. Encryption provides an additional layer of protection by securing your Neptune databases from unauthorized access to the underlying storage. Neptune is a fast, scalable, highly secure and fully-managed graph database service that makes it easy to build and run applications that work with deeply connected datasets.",
"RelatedUrl": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-1",
"ResourceType": "Other",
"Description": "Neptune DB cluster is evaluated for **encryption at rest**. Indicating the cluster's underlying storage is not encrypted.",
"Risk": "**Unencrypted Neptune storage** reduces confidentiality of stored data and metadata and increases attack surface.\n\nPossible impacts:\n- Unauthorized access or data exfiltration from underlying volumes or snapshots\n- Greater blast radius from leaked or shared snapshots",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-1",
"https://docs.aws.amazon.com/neptune/latest/userguide/encrypt.html"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "https://docs.prowler.com/checks/aws/general-policies/general_18#cloudformation",
"Other": "https://docs.prowler.com/checks/aws/general-policies/general_18/",
"Terraform": "https://docs.prowler.com/checks/aws/general-policies/general_18#terraform"
"NativeIaC": "```yaml\nResources:\n EncryptedNeptuneCluster:\n Type: AWS::Neptune::DBCluster\n Properties:\n DBClusterIdentifier: !Sub ${DBClusterIdentifier}\n StorageEncrypted: true # Enable encryption at rest for data protection\n```",
"Terraform": "```hcl\nresource \"aws_neptune_cluster\" \"example_resource\" {\n cluster_identifier = \"<cluster-id>\"\n storage_encrypted = true # Enable encryption at rest for data protection\n}\n```",
"Other": ""
},
"Recommendation": {
"Text": "Enable Encryption. Use a CMK where possible. It will provide additional management and privacy benefits.",
"Url": "https://docs.aws.amazon.com/securityhub/latest/userguide/neptune-controls.html#neptune-1"
"Text": "Provision all new Neptune DB clusters with **encryption at rest** and prefer **Customer-Managed Keys (CMK)** for key ownership and auditability.\n\nEnforce **least privilege** on KMS keys, implement key lifecycle practices (rotation, revocation) and ensure backups/snapshots remain encrypted to prevent exposure.",
"Url": "https://hub.prowler.com/check/neptune_cluster_storage_encrypted"
}
},
"Categories": [],
"Categories": [
"encryption"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""

View File

@@ -1,30 +1,38 @@
{
"Provider": "aws",
"CheckID": "neptune_cluster_uses_public_subnet",
"CheckTitle": "Ensure Neptune Cluster is not using a public subnet",
"CheckType": [],
"CheckTitle": "Neptune cluster is not using public subnets",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability",
"TTPs/Initial Access/Unauthorized Access"
],
"ServiceName": "neptune",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rds:<region>:<account>:cluster:<resource_name>",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsRdsDbCluster",
"Description": "Ensure Neptune Cluster is not using a public subnet",
"Risk": "There is a risk of exposing sensitive data if Neptune Cluster uses a public subnet.",
"RelatedUrl": "https://docs.aws.amazon.com/neptune/latest/userguide/get-started-vpc.html",
"Description": "Neptune cluster is associated with one or more **public subnets**.",
"Risk": "A Neptune cluster in a **public subnet** increases exposure across the CIA triad:\n\n- **Confidentiality**: Direct access enables credential attacks and data exfiltration\n- **Integrity**: Attackers may modify or inject graph data\n- **Availability**: Public reachability allows DDoS or remote exploitation, causing downtime",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/neptune/latest/userguide/get-started-vpc.html",
"https://docs.aws.amazon.com/neptune/latest/userguide/feature-overview-endpoints.html"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "",
"Terraform": ""
"NativeIaC": "```yaml\nResources:\n NeptuneSubnetGroup:\n Type: AWS::Neptune::DBSubnetGroup\n Properties:\n DBSubnetGroupDescription: \"Private subnets for Neptune\"\n SubnetIds: # Use only private subnet IDs to prevent public access\n - <PRIVATE_SUBNET_ID_1>\n - <PRIVATE_SUBNET_ID_2>\n\n NeptuneDBCluster:\n Type: AWS::Neptune::DBCluster\n Properties:\n DBSubnetGroupName: !Ref NeptuneSubnetGroup # Associate cluster with private subnet group\n```",
"Terraform": "```hcl\nresource \"aws_neptune_subnet_group\" \"neptune\" {\n name = \"neptune-private-subnets\"\n subnet_ids = [\"<PRIVATE_SUBNET_ID_1>\", \"<PRIVATE_SUBNET_ID_2>\"] # Use only private subnet IDs to prevent public access\n}\n\nresource \"aws_neptune_cluster\" \"example_cluster\" {\n neptune_subnet_group_name = aws_neptune_subnet_group.neptune.name # Associate cluster with private subnet group\n}\n```",
"Other": "1. Open the AWS Console and go to Amazon Neptune > Subnet groups\n2. Click Create DB Subnet Group\n3. Enter a name and description, select the VPC, and add only private subnet IDs (at least two)\n4. Click Create\n5. Go to Amazon Neptune > DB clusters > Select the cluster > Actions > Modify\n6. Set DB subnet group to the newly created subnet group and save (Apply immediately if required)\n7. Verify the cluster subnet group now lists only private subnets"
},
"Recommendation": {
"Text": "To ensure your Neptune cluster is not using a public subnet, follow the recommended remediation steps based on your preferred method.",
"Url": "https://docs.aws.amazon.com/neptune/latest/userguide/get-started-vpc.html"
"Text": "Place Neptune clusters in **private subnets** and remove public routability to reduce attack surface.\n\n- Apply **least privilege** and network segmentation\n- Restrict inbound access with scoped network controls and minimal trusted paths\n- Enforce logging, monitoring, and private connectivity for administrative and application access",
"Url": "https://hub.prowler.com/check/neptune_cluster_uses_public_subnet"
}
},
"Categories": [
"internet-exposed"
"internet-exposed",
"trust-boundaries"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -91,7 +91,7 @@ class Defender(M365Service):
malware_rule = [malware_rule]
for rule in malware_rule:
if rule:
malware_rules[rule.get("Name", "")] = MalwareRule(
malware_rules[rule.get("MalwareFilterPolicy", "")] = MalwareRule(
state=rule.get("State", ""),
priority=rule.get("Priority", 0),
users=rule.get("SentTo", None),
@@ -152,12 +152,14 @@ class Defender(M365Service):
antiphishing_rule = [antiphishing_rule]
for rule in antiphishing_rule:
if rule:
antiphishing_rules[rule.get("Name", "")] = AntiphishingRule(
state=rule.get("State", ""),
priority=rule.get("Priority", 0),
users=rule.get("SentTo", None),
groups=rule.get("SentToMemberOf", None),
domains=rule.get("RecipientDomainIs", None),
antiphishing_rules[rule.get("AntiPhishPolicy", "")] = (
AntiphishingRule(
state=rule.get("State", ""),
priority=rule.get("Priority", 0),
users=rule.get("SentTo", None),
groups=rule.get("SentToMemberOf", None),
domains=rule.get("RecipientDomainIs", None),
)
)
except Exception as error:
logger.error(
@@ -250,7 +252,9 @@ class Defender(M365Service):
outbound_spam_rule = [outbound_spam_rule]
for rule in outbound_spam_rule:
if rule:
outbound_spam_rules[rule.get("Name", "")] = OutboundSpamRule(
outbound_spam_rules[
rule.get("HostedOutboundSpamFilterPolicy", "")
] = OutboundSpamRule(
state=rule.get("State", "Disabled"),
priority=rule.get("Priority", 0),
users=rule.get("From", None),
@@ -330,12 +334,14 @@ class Defender(M365Service):
inbound_spam_rule = [inbound_spam_rule]
for rule in inbound_spam_rule:
if rule:
inbound_spam_rules[rule.get("Name", "")] = InboundSpamRule(
state=rule.get("State", "Disabled"),
priority=rule.get("Priority", 0),
users=rule.get("SentTo", None),
groups=rule.get("SentToMemberOf", None),
domains=rule.get("RecipientDomainIs", None),
inbound_spam_rules[rule.get("HostedContentFilterPolicy", "")] = (
InboundSpamRule(
state=rule.get("State", "Disabled"),
priority=rule.get("Priority", 0),
users=rule.get("SentTo", None),
groups=rule.get("SentToMemberOf", None),
domains=rule.get("RecipientDomainIs", None),
)
)
except Exception as error:
logger.error(

View File

@@ -1,9 +1,10 @@
import sys
from unittest import mock
import pytest
from pydantic.v1 import ValidationError
from prowler.lib.check.models import CheckMetadata
from prowler.lib.check.models import Check, CheckMetadata
from tests.lib.check.compliance_check_test import custom_compliance_metadata
mock_metadata = CheckMetadata(
@@ -716,3 +717,96 @@ class TestCheckMetada:
)
# Should contain the validation error we set in the validator
assert "AdditionalURLs must be a list" in str(exc_info.value)
class TestCheck:
@mock.patch("prowler.lib.check.models.CheckMetadata.parse_file")
def test_verify_names_consistency_all_match(self, mock_parse_file):
"""Case where everything matches: CheckID == class_name == file_name"""
mock_parse_file.return_value = mock_metadata.copy(
update={
"CheckID": "accessanalyzer_enabled",
"ServiceName": "accessanalyzer",
}
)
class accessanalyzer_enabled(Check):
def execute(self):
pass
fake_module = mock.Mock()
fake_module.__file__ = "/path/to/accessanalyzer_enabled.py"
sys.modules[accessanalyzer_enabled.__module__] = fake_module
accessanalyzer_enabled()
@mock.patch("prowler.lib.check.models.CheckMetadata.parse_file")
def test_verify_names_consistency_class_mismatch(self, mock_parse_file):
"""CheckID != class name, but matches file_name"""
mock_parse_file.return_value = mock_metadata.copy(
update={
"CheckID": "accessanalyzer_enabled",
"ServiceName": "accessanalyzer",
}
)
class WrongClass(Check):
def execute(self):
pass
fake_module = mock.Mock()
fake_module.__file__ = "/path/to/accessanalyzer_enabled.py"
sys.modules[WrongClass.__module__] = fake_module
with pytest.raises(ValidationError) as excinfo:
WrongClass()
assert "!= class name" in str(excinfo.value)
@mock.patch("prowler.lib.check.models.CheckMetadata.parse_file")
def test_verify_names_consistency_file_mismatch(self, mock_parse_file):
"""CheckID == class name, but != file_name"""
mock_parse_file.return_value = mock_metadata.copy(
update={
"CheckID": "accessanalyzer_enabled",
"ServiceName": "accessanalyzer",
}
)
class accessanalyzer_enabled(Check):
def execute(self):
pass
fake_module = mock.Mock()
fake_module.__file__ = "/path/to/OtherFile.py"
sys.modules[accessanalyzer_enabled.__module__] = fake_module
with pytest.raises(ValidationError) as excinfo:
accessanalyzer_enabled()
assert "!= file name" in str(excinfo.value)
@mock.patch("prowler.lib.check.models.CheckMetadata.parse_file")
def test_verify_names_consistency_both_mismatch(self, mock_parse_file):
"""Neither class name nor file name match the CheckID"""
mock_parse_file.return_value = mock_metadata.copy(
update={
"CheckID": "accessanalyzer_enabled",
"ServiceName": "accessanalyzer",
}
)
class WrongClass(Check):
def execute(self):
pass
fake_module = mock.Mock()
fake_module.__file__ = "/path/to/OtherFile.py"
sys.modules[WrongClass.__module__] = fake_module
with pytest.raises(ValidationError) as excinfo:
WrongClass()
msg = str(excinfo.value)
assert "!= class name" in msg
assert "!= file name" in msg

View File

@@ -105,7 +105,9 @@ class TestASFF:
resource_uid="test-arn",
resource_tags={"key1": "value1"},
)
finding.metadata.Remediation.Recommendation.Url = ""
finding.metadata.Remediation.Recommendation.Url = (
"https://hub.prowler.com/check/check-id"
)
timestamp = timestamp_utc.strftime("%Y-%m-%dT%H:%M:%SZ")
@@ -147,7 +149,7 @@ class TestASFF:
Remediation=Remediation(
Recommendation=Recommendation(
Text=finding.metadata.Remediation.Recommendation.Text,
Url="https://docs.aws.amazon.com/securityhub/latest/userguide/what-is-securityhub.html",
Url="https://hub.prowler.com/check/check-id",
)
),
Description=finding.status_extended,
@@ -170,7 +172,9 @@ class TestASFF:
resource_name="test-resource",
resource_uid="test-arn",
)
finding.metadata.Remediation.Recommendation.Url = ""
finding.metadata.Remediation.Recommendation.Url = (
"https://hub.prowler.com/check/check-id"
)
timestamp = timestamp_utc.strftime("%Y-%m-%dT%H:%M:%SZ")
@@ -212,7 +216,7 @@ class TestASFF:
Remediation=Remediation(
Recommendation=Recommendation(
Text=finding.metadata.Remediation.Recommendation.Text,
Url="https://docs.aws.amazon.com/securityhub/latest/userguide/what-is-securityhub.html",
Url="https://hub.prowler.com/check/check-id",
)
),
Description=finding.status_extended,
@@ -238,7 +242,9 @@ class TestASFF:
resource_uid="test-arn",
resource_tags={"key1": "value1"},
)
finding.metadata.Remediation.Recommendation.Url = ""
finding.metadata.Remediation.Recommendation.Url = (
"https://hub.prowler.com/check/check-id"
)
finding.metadata.Remediation.Recommendation.Text = "x" * 513
timestamp = timestamp_utc.strftime("%Y-%m-%dT%H:%M:%SZ")
@@ -281,7 +287,7 @@ class TestASFF:
Remediation=Remediation(
Recommendation=Recommendation(
Text=f"{'x' * 509}...",
Url="https://docs.aws.amazon.com/securityhub/latest/userguide/what-is-securityhub.html",
Url="https://hub.prowler.com/check/check-id",
)
),
Description=finding.status_extended,
@@ -517,7 +523,9 @@ class TestASFF:
resource_uid="test-arn",
resource_tags={"key1": "value1"},
)
finding.metadata.Remediation.Recommendation.Url = ""
finding.metadata.Remediation.Recommendation.Url = (
"https://hub.prowler.com/check/check-id"
)
timestamp = timestamp_utc.strftime("%Y-%m-%dT%H:%M:%SZ")
@@ -560,7 +568,7 @@ class TestASFF:
"Remediation": {
"Recommendation": {
"Text": "",
"Url": "https://docs.aws.amazon.com/securityhub/latest/userguide/what-is-securityhub.html",
"Url": "https://hub.prowler.com/check/check-id",
}
},
}

View File

@@ -40,6 +40,10 @@ class TestCSV:
categories=["categorya", "categoryb"],
depends_on=["dependency"],
related_to=["related"],
additional_urls=[
"https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/best-practices.html",
"https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/introduction.html",
],
notes="Notes about the finding",
)
]
@@ -97,6 +101,10 @@ class TestCSV:
assert output_data["CATEGORIES"] == "categorya | categoryb"
assert output_data["DEPENDS_ON"] == "dependency"
assert output_data["RELATED_TO"] == "related"
assert (
output_data["ADDITIONAL_URLS"]
== "https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/best-practices.html | https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/introduction.html"
)
assert output_data["NOTES"] == "Notes about the finding"
assert output_data["PROWLER_VERSION"] == prowler_version
@@ -113,7 +121,7 @@ class TestCSV:
output.batch_write_data_to_file()
mock_file.seek(0)
expected_csv = f"AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION\r\nprofile: default;{datetime.now()};123456789012;123456789012;;test-organization-id;test-organization;test-tag:test-value;test-unique-finding;aws;service_test_check_id;service_test_check_id;test-type;PASS;;False;service;;high;test-resource;;;;;aws;eu-west-1;check description;test-risk;test-url;;;;;;;test-compliance: test-compliance;test-category;test-dependency;test-related-to;test-notes;{prowler_version}\r\n"
expected_csv = f"AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION;ADDITIONAL_URLS\r\nprofile: default;{datetime.now()};123456789012;123456789012;;test-organization-id;test-organization;test-tag:test-value;test-unique-finding;aws;service_test_check_id;service_test_check_id;test-type;PASS;;False;service;;high;test-resource;;;;;aws;eu-west-1;check description;test-risk;test-url;;;;;;;test-compliance: test-compliance;test-category;test-dependency;test-related-to;test-notes;{prowler_version};https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/best-practices.html | https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/introduction.html\r\n"
content = mock_file.read()
assert content == expected_csv
@@ -191,7 +199,7 @@ class TestCSV:
with patch.object(temp_file, "close", return_value=None):
csv.batch_write_data_to_file()
expected_csv = f"AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION\nprofile: default;{datetime.now()};123456789012;123456789012;;test-organization-id;test-organization;test-tag:test-value;test-unique-finding;aws;service_test_check_id;service_test_check_id;test-type;PASS;;False;service;;high;test-resource;;;;;aws;eu-west-1;check description;test-risk;test-url;;;;;;;test-compliance: test-compliance;test-category;test-dependency;test-related-to;test-notes;{prowler_version}\n"
expected_csv = f"AUTH_METHOD;TIMESTAMP;ACCOUNT_UID;ACCOUNT_NAME;ACCOUNT_EMAIL;ACCOUNT_ORGANIZATION_UID;ACCOUNT_ORGANIZATION_NAME;ACCOUNT_TAGS;FINDING_UID;PROVIDER;CHECK_ID;CHECK_TITLE;CHECK_TYPE;STATUS;STATUS_EXTENDED;MUTED;SERVICE_NAME;SUBSERVICE_NAME;SEVERITY;RESOURCE_TYPE;RESOURCE_UID;RESOURCE_NAME;RESOURCE_DETAILS;RESOURCE_TAGS;PARTITION;REGION;DESCRIPTION;RISK;RELATED_URL;REMEDIATION_RECOMMENDATION_TEXT;REMEDIATION_RECOMMENDATION_URL;REMEDIATION_CODE_NATIVEIAC;REMEDIATION_CODE_TERRAFORM;REMEDIATION_CODE_CLI;REMEDIATION_CODE_OTHER;COMPLIANCE;CATEGORIES;DEPENDS_ON;RELATED_TO;NOTES;PROWLER_VERSION;ADDITIONAL_URLS\nprofile: default;{datetime.now()};123456789012;123456789012;;test-organization-id;test-organization;test-tag:test-value;test-unique-finding;aws;service_test_check_id;service_test_check_id;test-type;PASS;;False;service;;high;test-resource;;;;;aws;eu-west-1;check description;test-risk;test-url;;;;;;;test-compliance: test-compliance;test-category;test-dependency;test-related-to;test-notes;{prowler_version};https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/best-practices.html | https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/introduction.html\n"
temp_file.seek(0)

View File

@@ -36,6 +36,10 @@ def generate_finding_output(
depends_on: list[str] = ["test-dependency"],
related_to: list[str] = ["test-related-to"],
notes: str = "test-notes",
additional_urls: list[str] = [
"https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/best-practices.html",
"https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/introduction.html",
],
service_name: str = "service",
check_id: str = "service_test_check_id",
check_title: str = "service_test_check_id",
@@ -90,6 +94,7 @@ def generate_finding_output(
RelatedTo=related_to,
Categories=categories,
Notes=notes,
AdditionalURLs=additional_urls,
),
prowler_version=prowler_version,
)

View File

@@ -40,7 +40,7 @@ pass_html_finding = """
<td></td>
<td></td>
<td><p class="show-read-more">test-risk</p></td>
<td><p class="show-read-more"></p> <a class="read-more" href=""><i class="fas fa-external-link-alt"></i></a></td>
<td><p class="show-read-more"></p> <a class="read-more" href="https://hub.prowler.com/check/check-id"><i class="fas fa-external-link-alt"></i></a></td>
<td><p class="show-read-more">
&#x2022;test-compliance: test-compliance
</p></td>
@@ -62,7 +62,7 @@ fail_html_finding = """
</td>
<td>test-status-extended</td>
<td><p class="show-read-more">test-risk</p></td>
<td><p class="show-read-more">test-remediation-recommendation-text</p> <a class="read-more" href=""><i class="fas fa-external-link-alt"></i></a></td>
<td><p class="show-read-more">test-remediation-recommendation-text</p> <a class="read-more" href="https://hub.prowler.com/check/check-id"><i class="fas fa-external-link-alt"></i></a></td>
<td><p class="show-read-more">
&#x2022;test-compliance: test-compliance
</p></td>
@@ -80,7 +80,7 @@ muted_html_finding = """
<td></td>
<td></td>
<td><p class="show-read-more">test-risk</p></td>
<td><p class="show-read-more"></p> <a class="read-more" href=""><i class="fas fa-external-link-alt"></i></a></td>
<td><p class="show-read-more"></p> <a class="read-more" href="https://hub.prowler.com/check/check-id"><i class="fas fa-external-link-alt"></i></a></td>
<td><p class="show-read-more">
&#x2022;test-compliance: test-compliance
</p></td>
@@ -98,7 +98,7 @@ manual_html_finding = """
<td></td>
<td></td>
<td><p class="show-read-more">test-risk</p></td>
<td><p class="show-read-more"></p> <a class="read-more" href=""><i class="fas fa-external-link-alt"></i></a></td>
<td><p class="show-read-more"></p> <a class="read-more" href="https://hub.prowler.com/check/check-id"><i class="fas fa-external-link-alt"></i></a></td>
<td><p class="show-read-more">
&#x2022;test-compliance: test-compliance
</p></td>
@@ -573,6 +573,7 @@ class TestHTML:
status_extended="test-status-extended",
risk="test-risk",
remediation_recommendation_text="test-remediation-recommendation-text",
remediation_recommendation_url="https://hub.prowler.com/check/check-id",
compliance={"test-compliance": "test-compliance"},
)
]
@@ -583,21 +584,35 @@ class TestHTML:
assert output_data == fail_html_finding
def test_transform_pass_finding(self):
findings = [generate_finding_output()]
findings = [
generate_finding_output(
remediation_recommendation_url="https://hub.prowler.com/check/check-id"
)
]
html = HTML(findings)
output_data = html.data[0]
assert isinstance(output_data, str)
assert output_data == pass_html_finding
def test_transform_muted_finding(self):
findings = [generate_finding_output(muted=True)]
findings = [
generate_finding_output(
muted=True,
remediation_recommendation_url="https://hub.prowler.com/check/check-id",
)
]
html = HTML(findings)
output_data = html.data[0]
assert isinstance(output_data, str)
assert output_data == muted_html_finding
def test_transform_manual_finding(self):
findings = [generate_finding_output(status="MANUAL")]
findings = [
generate_finding_output(
status="MANUAL",
remediation_recommendation_url="https://hub.prowler.com/check/check-id",
)
]
html = HTML(findings)
output_data = html.data[0]
assert isinstance(output_data, str)
@@ -605,7 +620,11 @@ class TestHTML:
def test_batch_write_data_to_file(self):
mock_file = StringIO()
findings = [generate_finding_output()]
findings = [
generate_finding_output(
remediation_recommendation_url="https://hub.prowler.com/check/check-id"
)
]
output = HTML(findings)
output._file_descriptor = mock_file
provider = set_mocked_aws_provider(audited_regions=[AWS_REGION_EU_WEST_1])
@@ -623,7 +642,11 @@ class TestHTML:
def test_write_header(self):
mock_file = StringIO()
findings = [generate_finding_output()]
findings = [
generate_finding_output(
remediation_recommendation_url="https://hub.prowler.com/check/check-id"
)
]
output = HTML(findings)
output._file_descriptor = mock_file
provider = set_mocked_aws_provider(audited_regions=[AWS_REGION_EU_WEST_1])
@@ -637,7 +660,11 @@ class TestHTML:
def test_write_footer(self):
mock_file = StringIO()
findings = [generate_finding_output()]
findings = [
generate_finding_output(
remediation_recommendation_url="https://hub.prowler.com/check/check-id"
)
]
output = HTML(findings)
output._file_descriptor = mock_file
@@ -648,7 +675,11 @@ class TestHTML:
assert content == html_footer
def test_aws_get_assessment_summary(self):
findings = [generate_finding_output()]
findings = [
generate_finding_output(
remediation_recommendation_url="https://hub.prowler.com/check/check-id"
)
]
output = HTML(findings)
provider = set_mocked_aws_provider(audited_regions=[AWS_REGION_EU_WEST_1])
@@ -657,7 +688,11 @@ class TestHTML:
assert summary == aws_html_assessment_summary
def test_azure_get_assessment_summary(self):
findings = [generate_finding_output()]
findings = [
generate_finding_output(
remediation_recommendation_url="https://hub.prowler.com/check/check-id"
)
]
output = HTML(findings)
provider = set_mocked_azure_provider()
@@ -666,7 +701,11 @@ class TestHTML:
assert summary == summary
def test_gcp_get_assessment_summary(self):
findings = [generate_finding_output()]
findings = [
generate_finding_output(
remediation_recommendation_url="https://hub.prowler.com/check/check-id"
)
]
output = HTML(findings)
provider = set_mocked_gcp_provider(project_ids=[GCP_PROJECT_ID])
@@ -675,7 +714,11 @@ class TestHTML:
assert summary == gcp_html_assessment_summary
def test_kubernetes_get_assessment_summary(self):
findings = [generate_finding_output()]
findings = [
generate_finding_output(
remediation_recommendation_url="https://hub.prowler.com/check/check-id"
)
]
output = HTML(findings)
provider = set_mocked_kubernetes_provider()
@@ -684,7 +727,11 @@ class TestHTML:
assert summary == kubernetes_html_assessment_summary
def test_m365_get_assessment_summary(self):
findings = [generate_finding_output()]
findings = [
generate_finding_output(
remediation_recommendation_url="https://hub.prowler.com/check/check-id"
)
]
output = HTML(findings)
provider = set_mocked_m365_provider()
@@ -695,7 +742,11 @@ class TestHTML:
def test_github_personal_access_token_get_assessment_summary(self):
"""Test GitHub HTML assessment summary generation with Personal Access Token authentication."""
findings = [generate_finding_output()]
findings = [
generate_finding_output(
remediation_recommendation_url="https://hub.prowler.com/check/check-id"
)
]
output = HTML(findings)
provider = set_mocked_github_provider(auth_method="Personal Access Token")
@@ -710,7 +761,11 @@ class TestHTML:
def test_github_app_get_assessment_summary(self):
"""Test GitHub HTML assessment summary generation with GitHub App authentication."""
findings = [generate_finding_output()]
findings = [
generate_finding_output(
remediation_recommendation_url="https://hub.prowler.com/check/check-id"
)
]
output = HTML(findings)
provider = set_mocked_github_provider(

View File

@@ -104,6 +104,7 @@ class TestOCSF:
"categories": findings[0].metadata.Categories,
"depends_on": findings[0].metadata.DependsOn,
"related_to": findings[0].metadata.RelatedTo,
"additional_urls": findings[0].metadata.AdditionalURLs,
"notes": findings[0].metadata.Notes,
"compliance": findings[0].compliance,
}
@@ -189,6 +190,10 @@ class TestOCSF:
"categories": ["test-category"],
"depends_on": ["test-dependency"],
"related_to": ["test-related-to"],
"additional_urls": [
"https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/best-practices.html",
"https://docs.aws.amazon.com/prescriptive-guidance/latest/migration-operations-integration/introduction.html",
],
"notes": "test-notes",
"compliance": {"test-compliance": "test-compliance"},
},
@@ -316,6 +321,7 @@ class TestOCSF:
"categories": finding_output.metadata.Categories,
"depends_on": finding_output.metadata.DependsOn,
"related_to": finding_output.metadata.RelatedTo,
"additional_urls": finding_output.metadata.AdditionalURLs,
"notes": finding_output.metadata.Notes,
"compliance": finding_output.compliance,
}

View File

@@ -162,3 +162,64 @@ class Test_kafka_cluster_encryption_at_rest_uses_cmk:
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1
def test_kafka_cluster_serverless_encryption_at_rest(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
kms_client = MagicMock
kms_client.keys = []
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_cluster_encryption_at_rest_uses_cmk.kafka_cluster_encryption_at_rest_uses_cmk.kms_client",
new=kms_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_encryption_at_rest_uses_cmk.kafka_cluster_encryption_at_rest_uses_cmk import (
kafka_cluster_encryption_at_rest_uses_cmk,
)
check = kafka_cluster_encryption_at_rest_uses_cmk()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and always has encryption at rest enabled by default."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1

View File

@@ -4,7 +4,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
Cluster,
EncryptionInTransit,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_enhanced_monitoring_enabled:
@@ -14,11 +14,11 @@ class Test_kafka_cluster_enhanced_monitoring_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -56,11 +56,11 @@ class Test_kafka_cluster_enhanced_monitoring_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -110,11 +110,11 @@ class Test_kafka_cluster_enhanced_monitoring_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -138,3 +138,57 @@ class Test_kafka_cluster_enhanced_monitoring_enabled:
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1
def test_kafka_cluster_serverless_enhanced_monitoring(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_enhanced_monitoring_enabled.kafka_cluster_enhanced_monitoring_enabled import (
kafka_cluster_enhanced_monitoring_enabled,
)
check = kafka_cluster_enhanced_monitoring_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and always has enhanced monitoring enabled by default."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1

View File

@@ -4,7 +4,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
Cluster,
EncryptionInTransit,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_in_transit_encryption_enabled:
@@ -14,11 +14,11 @@ class Test_kafka_cluster_in_transit_encryption_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -56,11 +56,11 @@ class Test_kafka_cluster_in_transit_encryption_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -110,11 +110,11 @@ class Test_kafka_cluster_in_transit_encryption_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -164,11 +164,11 @@ class Test_kafka_cluster_in_transit_encryption_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -191,3 +191,57 @@ class Test_kafka_cluster_in_transit_encryption_enabled:
== "arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
)
assert result[0].region == AWS_REGION_US_EAST_1
def test_kafka_cluster_serverless_in_transit_encryption(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_in_transit_encryption_enabled.kafka_cluster_in_transit_encryption_enabled import (
kafka_cluster_in_transit_encryption_enabled,
)
check = kafka_cluster_in_transit_encryption_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and always has encryption in transit enabled by default."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []

View File

@@ -4,7 +4,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
Cluster,
EncryptionInTransit,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_is_public:
@@ -14,11 +14,11 @@ class Test_kafka_cluster_is_public:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -56,11 +56,11 @@ class Test_kafka_cluster_is_public:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -110,11 +110,11 @@ class Test_kafka_cluster_is_public:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -138,3 +138,57 @@ class Test_kafka_cluster_is_public:
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []
def test_kafka_cluster_serverless_public(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_is_public.kafka_cluster_is_public import (
kafka_cluster_is_public,
)
check = kafka_cluster_is_public()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster serverless-cluster-1 is serverless and always private by default."
)
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []

View File

@@ -4,7 +4,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
Cluster,
EncryptionInTransit,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_mutual_tls_authentication_enabled:
@@ -14,11 +14,11 @@ class Test_kafka_cluster_mutual_tls_authentication_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -56,11 +56,11 @@ class Test_kafka_cluster_mutual_tls_authentication_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -110,11 +110,11 @@ class Test_kafka_cluster_mutual_tls_authentication_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -138,3 +138,57 @@ class Test_kafka_cluster_mutual_tls_authentication_enabled:
)
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []
def test_kafka_cluster_serverless_mutual_tls_authentication(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_mutual_tls_authentication_enabled.kafka_cluster_mutual_tls_authentication_enabled import (
kafka_cluster_mutual_tls_authentication_enabled,
)
check = kafka_cluster_mutual_tls_authentication_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and always has TLS authentication enabled by default."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []

View File

@@ -4,7 +4,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
Cluster,
EncryptionInTransit,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_unrestricted_access_disabled:
@@ -14,11 +14,11 @@ class Test_kafka_cluster_unrestricted_access_disabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -56,11 +56,11 @@ class Test_kafka_cluster_unrestricted_access_disabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -110,11 +110,11 @@ class Test_kafka_cluster_unrestricted_access_disabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -138,3 +138,57 @@ class Test_kafka_cluster_unrestricted_access_disabled:
)
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []
def test_kafka_cluster_serverless_unrestricted_access_disabled(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_unrestricted_access_disabled.kafka_cluster_unrestricted_access_disabled import (
kafka_cluster_unrestricted_access_disabled,
)
check = kafka_cluster_unrestricted_access_disabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and always requires authentication by default."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []

View File

@@ -5,7 +5,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
EncryptionInTransit,
KafkaVersion,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_latest_version:
@@ -15,11 +15,11 @@ class Test_kafka_cluster_latest_version:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -62,11 +62,11 @@ class Test_kafka_cluster_latest_version:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -121,11 +121,11 @@ class Test_kafka_cluster_latest_version:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -149,3 +149,62 @@ class Test_kafka_cluster_latest_version:
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1
def test_kafka_cluster_serverless_uses_latest_version(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
kafka_client.kafka_versions = [
KafkaVersion(version="1.0.0", status="DEPRECATED"),
KafkaVersion(version="2.8.0", status="ACTIVE"),
]
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_uses_latest_version.kafka_cluster_uses_latest_version import (
kafka_cluster_uses_latest_version,
)
check = kafka_cluster_uses_latest_version()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and AWS automatically manages the Kafka version."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1

View File

@@ -13,47 +13,67 @@ make_api_call = botocore.client.BaseClient._make_api_call
def mock_make_api_call(self, operation_name, kwarg):
if operation_name == "ListClusters":
if operation_name == "ListClustersV2":
return {
"ClusterInfoList": [
{
"BrokerNodeGroupInfo": {
"BrokerAZDistribution": "DEFAULT",
"ClientSubnets": ["subnet-cbfff283", "subnet-6746046b"],
"InstanceType": "kafka.m5.large",
"SecurityGroups": ["sg-f839b688"],
"StorageInfo": {"EbsStorageInfo": {"VolumeSize": 100}},
},
"ClusterType": "PROVISIONED",
"ClusterArn": f"arn:aws:kafka:{AWS_REGION_US_EAST_1}:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5",
"ClusterName": "demo-cluster-1",
"CreationTime": "2020-07-09T02:31:36.223000+00:00",
"CurrentBrokerSoftwareInfo": {"KafkaVersion": "2.2.1"},
"CurrentVersion": "K3AEGXETSR30VB",
"EncryptionInfo": {
"EncryptionAtRest": {
"DataVolumeKMSKeyId": f"arn:aws:kms:{AWS_REGION_US_EAST_1}:123456789012:key/a7ca56d5-0768-4b64-a670-339a9fbef81c"
},
"EncryptionInTransit": {
"ClientBroker": "TLS_PLAINTEXT",
"InCluster": True,
},
},
"ClientAuthentication": {
"Tls": {"CertificateAuthorityArnList": [], "Enabled": True},
"Unauthenticated": {"Enabled": False},
},
"EnhancedMonitoring": "DEFAULT",
"OpenMonitoring": {
"Prometheus": {
"JmxExporter": {"EnabledInBroker": False},
"NodeExporter": {"EnabledInBroker": False},
}
},
"NumberOfBrokerNodes": 2,
"State": "ACTIVE",
"Tags": {},
"ZookeeperConnectString": f"z-2.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181,z-1.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181,z-3.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181",
}
"Provisioned": {
"BrokerNodeGroupInfo": {
"BrokerAZDistribution": "DEFAULT",
"ClientSubnets": ["subnet-cbfff283", "subnet-6746046b"],
"InstanceType": "kafka.m5.large",
"SecurityGroups": ["sg-f839b688"],
"StorageInfo": {"EbsStorageInfo": {"VolumeSize": 100}},
"ConnectivityInfo": {
"PublicAccess": {"Type": "SERVICE_PROVIDED_EIPS"}
},
},
"CurrentBrokerSoftwareInfo": {"KafkaVersion": "2.2.1"},
"CurrentVersion": "K3AEGXETSR30VB",
"EncryptionInfo": {
"EncryptionAtRest": {
"DataVolumeKMSKeyId": f"arn:aws:kms:{AWS_REGION_US_EAST_1}:123456789012:key/a7ca56d5-0768-4b64-a670-339a9fbef81c"
},
"EncryptionInTransit": {
"ClientBroker": "TLS_PLAINTEXT",
"InCluster": True,
},
},
"ClientAuthentication": {
"Tls": {"CertificateAuthorityArnList": [], "Enabled": True},
"Unauthenticated": {"Enabled": False},
},
"EnhancedMonitoring": "DEFAULT",
"OpenMonitoring": {
"Prometheus": {
"JmxExporter": {"EnabledInBroker": False},
"NodeExporter": {"EnabledInBroker": False},
}
},
"NumberOfBrokerNodes": 2,
"ZookeeperConnectString": f"z-2.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181,z-1.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181,z-3.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181",
},
},
{
"ClusterType": "SERVERLESS",
"ClusterArn": f"arn:aws:kafka:{AWS_REGION_US_EAST_1}:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
"ClusterName": "serverless-cluster-1",
"State": "ACTIVE",
"Tags": {},
"Serverless": {
"VpcConfigs": [
{
"SubnetIds": ["subnet-cbfff283", "subnet-6746046b"],
"SecurityGroups": ["sg-f839b688"],
}
],
},
},
]
}
elif operation_name == "ListKafkaVersions":
@@ -86,32 +106,53 @@ class TestKafkaService:
assert kafka.__class__.__name__ == "Kafka"
assert kafka.session.__class__.__name__ == "Session"
assert kafka.audited_account == AWS_ACCOUNT_NUMBER
# Clusters assertions
assert len(kafka.clusters) == 1
cluster_arn = f"arn:aws:kafka:{AWS_REGION_US_EAST_1}:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
assert cluster_arn in kafka.clusters
# Clusters assertions - should now include both provisioned and serverless
assert len(kafka.clusters) == 2
# Check provisioned cluster
provisioned_cluster_arn = f"arn:aws:kafka:{AWS_REGION_US_EAST_1}:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
assert provisioned_cluster_arn in kafka.clusters
provisioned_cluster = kafka.clusters[provisioned_cluster_arn]
assert provisioned_cluster.id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
assert provisioned_cluster.arn == provisioned_cluster_arn
assert provisioned_cluster.name == "demo-cluster-1"
assert provisioned_cluster.region == AWS_REGION_US_EAST_1
assert provisioned_cluster.tags == []
assert provisioned_cluster.state == "ACTIVE"
assert provisioned_cluster.kafka_version == "2.2.1"
assert (
kafka.clusters[cluster_arn].id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
)
assert kafka.clusters[cluster_arn].arn == cluster_arn
assert kafka.clusters[cluster_arn].name == "demo-cluster-1"
assert kafka.clusters[cluster_arn].region == AWS_REGION_US_EAST_1
assert kafka.clusters[cluster_arn].tags == []
assert kafka.clusters[cluster_arn].state == "ACTIVE"
assert kafka.clusters[cluster_arn].kafka_version == "2.2.1"
assert (
kafka.clusters[cluster_arn].data_volume_kms_key_id
provisioned_cluster.data_volume_kms_key_id
== f"arn:aws:kms:{AWS_REGION_US_EAST_1}:123456789012:key/a7ca56d5-0768-4b64-a670-339a9fbef81c"
)
assert (
kafka.clusters[cluster_arn].encryption_in_transit.client_broker
== "TLS_PLAINTEXT"
provisioned_cluster.encryption_in_transit.client_broker == "TLS_PLAINTEXT"
)
assert kafka.clusters[cluster_arn].encryption_in_transit.in_cluster
assert kafka.clusters[cluster_arn].enhanced_monitoring == "DEFAULT"
assert kafka.clusters[cluster_arn].tls_authentication
assert kafka.clusters[cluster_arn].public_access
assert not kafka.clusters[cluster_arn].unauthentication_access
assert provisioned_cluster.encryption_in_transit.in_cluster
assert provisioned_cluster.enhanced_monitoring == "DEFAULT"
assert provisioned_cluster.tls_authentication
assert provisioned_cluster.public_access
assert not provisioned_cluster.unauthentication_access
# Check serverless cluster
serverless_cluster_arn = f"arn:aws:kafka:{AWS_REGION_US_EAST_1}:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert serverless_cluster_arn in kafka.clusters
serverless_cluster = kafka.clusters[serverless_cluster_arn]
assert serverless_cluster.id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert serverless_cluster.arn == serverless_cluster_arn
assert serverless_cluster.name == "serverless-cluster-1"
assert serverless_cluster.region == AWS_REGION_US_EAST_1
assert serverless_cluster.tags == []
assert serverless_cluster.state == "ACTIVE"
assert serverless_cluster.kafka_version == "SERVERLESS"
assert serverless_cluster.data_volume_kms_key_id == "AWS_MANAGED"
assert serverless_cluster.encryption_in_transit.client_broker == "TLS"
assert serverless_cluster.encryption_in_transit.in_cluster
assert serverless_cluster.enhanced_monitoring == "DEFAULT"
assert serverless_cluster.tls_authentication
assert not serverless_cluster.public_access
assert not serverless_cluster.unauthentication_access
# Kafka versions assertions
assert len(kafka.kafka_versions) == 2
assert kafka.kafka_versions[0].version == "1.0.0"

View File

@@ -1 +1,2 @@
package-lock=true
save-exact=true

View File

@@ -2,6 +2,18 @@
All notable changes to the **Prowler UI** are documented in this file.
## [1.12.1] (Prowler v5.12.1)
### 🚀 Added
- `Prowler Hub` menu item with tooltip [(#8692)] (https://github.com/prowler-cloud/prowler/pull/8692)
- Copy link button to finding detail page [(#8685)] (https://github.com/prowler-cloud/prowler/pull/8685)
### 🐞 Fixed
- Field-level email validation message [(#8698)] (https://github.com/prowler-cloud/prowler/pull/8698)
- POST method on auth form [(#8699)] (https://github.com/prowler-cloud/prowler/pull/8699)
## [1.12.0] (Prowler v5.12.0)
### 🚀 Added

View File

@@ -57,7 +57,6 @@ WORKDIR /app
# Set up environment for production
ENV NODE_ENV=production
ENV NEXT_TELEMETRY_DISABLED=1
ENV NEXT_CACHE_DIR=/app/.next/cache
RUN addgroup --system --gid 1001 nodejs &&\
adduser --system --uid 1001 nextjs
@@ -66,10 +65,6 @@ COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
COPY --from=builder --chown=nextjs:nodejs /app/public ./public
# Ensure Next.js cache directory exists and is writable
RUN mkdir -p /app/.next/cache && \
chown -R nextjs:nodejs /app/.next
USER nextjs
EXPOSE 3000

View File

@@ -35,7 +35,7 @@ export async function authenticate(
message: "Credentials error",
errors: {
...defaultValues,
credentials: "Incorrect email or password",
credentials: "Invalid email or password",
},
};
case "CallbackRouteError":

View File

@@ -12,7 +12,7 @@ import { authenticate, createNewUser } from "@/actions/auth";
import { initiateSamlAuth } from "@/actions/integrations/saml";
import { PasswordRequirementsMessage } from "@/components/auth/oss/password-validator";
import { SocialButtons } from "@/components/auth/oss/social-buttons";
import { NotificationIcon, ProwlerExtended } from "@/components/icons";
import { ProwlerExtended } from "@/components/icons";
import { ThemeSwitch } from "@/components/ThemeSwitch";
import { useToast } from "@/components/ui";
import { CustomButton, CustomInput } from "@/components/ui/custom";
@@ -65,6 +65,8 @@ export const AuthForm = ({
const form = useForm<z.infer<typeof formSchema>>({
resolver: zodResolver(formSchema),
mode: "onSubmit",
reValidateMode: "onSubmit",
defaultValues: {
email: "",
password: "",
@@ -111,10 +113,11 @@ export const AuthForm = ({
if (result?.message === "Success") {
router.push("/");
} else if (result?.errors && "credentials" in result.errors) {
form.setError("email", {
type: "server",
message: result.errors.credentials ?? "Incorrect email or password",
});
const message =
result.errors.credentials ?? "Invalid email or password";
form.setError("email", { type: "server", message });
form.setError("password", { type: "server", message });
} else if (result?.message === "User email is not verified") {
router.push("/email-verification");
} else {
@@ -206,6 +209,8 @@ export const AuthForm = ({
<Form {...form}>
<form
noValidate
method="post"
className="flex flex-col gap-4"
onSubmit={form.handleSubmit(onSubmit)}
>
@@ -237,7 +242,8 @@ export const AuthForm = ({
label="Email"
placeholder="Enter your email"
isInvalid={!!form.formState.errors.email}
showFormMessage={type !== "sign-in"}
// Always show field validation message, including on sign-in
showFormMessage
/>
{!isSamlMode && (
<>
@@ -245,10 +251,8 @@ export const AuthForm = ({
control={form.control}
name="password"
password
isInvalid={
!!form.formState.errors.password ||
!!form.formState.errors.email
}
// Only mark invalid when the password field has an error
isInvalid={!!form.formState.errors.password}
/>
{type === "sign-up" && (
<PasswordRequirementsMessage
@@ -319,12 +323,7 @@ export const AuthForm = ({
)}
</>
)}
{type === "sign-in" && form.formState.errors?.email && (
<div className="flex flex-row items-center text-system-error">
<NotificationIcon size={16} />
<p className="text-small">Invalid email or password</p>
</div>
)}
<CustomButton
type="submit"
ariaLabel={type === "sign-in" ? "Log in" : "Sign up"}

View File

@@ -5,7 +5,11 @@ import { Snippet } from "@nextui-org/react";
import { CodeSnippet } from "@/components/ui/code-snippet/code-snippet";
import { CustomSection } from "@/components/ui/custom";
import { CustomLink } from "@/components/ui/custom/custom-link";
import { EntityInfoShort, InfoField } from "@/components/ui/entities";
import {
CopyLinkButton,
EntityInfoShort,
InfoField,
} from "@/components/ui/entities";
import { DateWithTime } from "@/components/ui/entities/date-with-time";
import { SeverityBadge } from "@/components/ui/table/severity-badge";
import { FindingProps, ProviderType } from "@/types";
@@ -42,6 +46,10 @@ export const FindingDetail = ({
const resource = finding.relationships.resource.attributes;
const scan = finding.relationships.scan.attributes;
const providerDetails = finding.relationships.provider.attributes;
const currentUrl = new URL(window.location.href);
const params = new URLSearchParams(currentUrl.search);
params.set("id", findingDetails.id);
const url = `${window.location.origin}${currentUrl.pathname}?${params.toString()}`;
return (
<div className="flex flex-col gap-6 rounded-lg">
@@ -50,6 +58,7 @@ export const FindingDetail = ({
<div>
<h2 className="line-clamp-2 text-lg font-medium leading-tight text-gray-800 dark:text-prowler-theme-pale/90">
{renderValue(attributes.check_metadata.checktitle)}
<CopyLinkButton url={url} />
</h2>
</div>
<div className="flex items-center gap-x-4">

View File

@@ -134,7 +134,7 @@ export const Chat = ({ hasConfig, isActive }: ChatProps) => {
// Global keyboard shortcut handler
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if ((e.metaKey || e.ctrlKey) && e.key === "Enter") {
if (e.key === "Enter" && !e.shiftKey) {
e.preventDefault();
if (messageValue?.trim()) {
onFormSubmit();
@@ -146,16 +146,6 @@ export const Chat = ({ hasConfig, isActive }: ChatProps) => {
return () => document.removeEventListener("keydown", handleKeyDown);
}, [messageValue, onFormSubmit]);
useEffect(() => {
if (messagesContainerRef.current && latestUserMsgRef.current) {
const container = messagesContainerRef.current;
const userMsg = latestUserMsgRef.current;
const containerPadding = 16; // p-4 in Tailwind = 16px
container.scrollTop =
userMsg.offsetTop - container.offsetTop - containerPadding;
}
}, [messages]);
const suggestedActions: SuggestedAction[] = [
{
title: "Are there any exposed S3",

View File

@@ -0,0 +1,41 @@
"use client";
import { Tooltip } from "@nextui-org/react";
import { CheckCheck, ExternalLink } from "lucide-react";
import { useState } from "react";
type CopyLinkButtonProps = {
url: string;
};
export const CopyLinkButton = ({ url }: CopyLinkButtonProps) => {
const [copied, setCopied] = useState(false);
const handleCopy = async () => {
try {
await navigator.clipboard.writeText(url);
setCopied(true);
setTimeout(() => setCopied(false), 500);
} catch (err) {
console.error("Failed to copy URL to clipboard:", err);
}
};
return (
<Tooltip content="Copy URL to clipboard" size="sm">
<button
type="button"
onClick={handleCopy}
className="ml-2 cursor-pointer p-0"
aria-label="Copy URL to clipboard"
>
{copied ? (
<CheckCheck size={16} className="inline" />
) : (
<ExternalLink size={16} className="inline" />
)}
</button>
</Tooltip>
);
};

View File

@@ -1,3 +1,4 @@
export * from "./copy-link-button";
export * from "./date-with-time";
export * from "./entity-info-short";
export * from "./get-provider-logo";

View File

@@ -105,7 +105,7 @@ export const Menu = ({ isOpen }: { isOpen: boolean }) => {
className={cn(
"w-full",
groupLabel ? "pt-2" : "",
"last:!mt-auto",
index === filteredMenuList.length - 2 && "!mt-auto",
)}
key={index}
>
@@ -138,8 +138,9 @@ export const Menu = ({ isOpen }: { isOpen: boolean }) => {
active,
submenus,
defaultOpen,
target,
tooltip,
} = menu;
return !submenus || submenus.length === 0 ? (
<div className="w-full" key={index}>
<TooltipProvider disableHoverableContent>
@@ -156,7 +157,7 @@ export const Menu = ({ isOpen }: { isOpen: boolean }) => {
className="mb-1 h-8 w-full justify-start"
asChild
>
<Link href={href}>
<Link href={href} target={target}>
<span
className={cn(isOpen === false ? "" : "mr-4")}
>
@@ -175,9 +176,9 @@ export const Menu = ({ isOpen }: { isOpen: boolean }) => {
</Link>
</Button>
</TooltipTrigger>
{isOpen === false && (
{tooltip && (
<TooltipContent side="right">
{label}
{tooltip}
</TooltipContent>
)}
</Tooltip>

634
ui/dependency-log.json Normal file
View File

@@ -0,0 +1,634 @@
[
{
"section": "dependencies",
"name": "@hookform/resolvers",
"from": "3.10.0",
"to": "3.10.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@langchain/langgraph-supervisor",
"from": "0.0.12",
"to": "0.0.12",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@langchain/openai",
"from": "0.6.9",
"to": "0.6.9",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@next/third-parties",
"from": "15.3.5",
"to": "15.3.5",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@nextui-org/react",
"from": "2.4.8",
"to": "2.4.8",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@nextui-org/system",
"from": "2.2.1",
"to": "2.2.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@nextui-org/theme",
"from": "2.2.5",
"to": "2.2.5",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@radix-ui/react-alert-dialog",
"from": "1.1.14",
"to": "1.1.14",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@radix-ui/react-dialog",
"from": "1.1.14",
"to": "1.1.14",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@radix-ui/react-dropdown-menu",
"from": "2.1.15",
"to": "2.1.15",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@radix-ui/react-icons",
"from": "1.3.2",
"to": "1.3.2",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@radix-ui/react-label",
"from": "2.1.7",
"to": "2.1.7",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@radix-ui/react-select",
"from": "2.2.5",
"to": "2.2.5",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@radix-ui/react-slot",
"from": "1.2.3",
"to": "1.2.3",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@radix-ui/react-toast",
"from": "1.2.14",
"to": "1.2.14",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@react-aria/ssr",
"from": "3.9.4",
"to": "3.9.4",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@react-aria/visually-hidden",
"from": "3.8.12",
"to": "3.8.12",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@tailwindcss/typography",
"from": "0.5.16",
"to": "0.5.16",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@tanstack/react-table",
"from": "8.21.3",
"to": "8.21.3",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "@types/js-yaml",
"from": "4.0.9",
"to": "4.0.9",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "ai",
"from": "4.3.16",
"to": "4.3.16",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "alert",
"from": "6.0.2",
"to": "6.0.2",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "class-variance-authority",
"from": "0.7.1",
"to": "0.7.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "clsx",
"from": "2.1.1",
"to": "2.1.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "date-fns",
"from": "4.1.0",
"to": "4.1.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "framer-motion",
"from": "11.18.2",
"to": "11.18.2",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "intl-messageformat",
"from": "10.7.16",
"to": "10.7.16",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "jose",
"from": "5.10.0",
"to": "5.10.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "js-yaml",
"from": "4.1.0",
"to": "4.1.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "jwt-decode",
"from": "4.0.0",
"to": "4.0.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "lucide-react",
"from": "0.543.0",
"to": "0.543.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "marked",
"from": "15.0.12",
"to": "15.0.12",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "next",
"from": "14.2.32",
"to": "14.2.32",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "next-auth",
"from": "5.0.0-beta.29",
"to": "5.0.0-beta.29",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "next-themes",
"from": "0.2.1",
"to": "0.2.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "radix-ui",
"from": "1.4.2",
"to": "1.4.2",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "react",
"from": "18.3.1",
"to": "18.3.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "react-dom",
"from": "18.3.1",
"to": "18.3.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "react-hook-form",
"from": "7.62.0",
"to": "7.62.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "react-markdown",
"from": "10.1.0",
"to": "10.1.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "recharts",
"from": "2.15.4",
"to": "2.15.4",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "rss-parser",
"from": "3.13.0",
"to": "3.13.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "server-only",
"from": "0.0.1",
"to": "0.0.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "shadcn",
"from": "3.2.1",
"to": "3.2.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "sharp",
"from": "0.33.5",
"to": "0.33.5",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "tailwind-merge",
"from": "3.3.1",
"to": "3.3.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "tailwindcss-animate",
"from": "1.0.7",
"to": "1.0.7",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "uuid",
"from": "11.1.0",
"to": "11.1.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "zod",
"from": "3.25.73",
"to": "3.25.73",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "zustand",
"from": "4.5.7",
"to": "4.5.7",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "devDependencies",
"name": "@iconify/react",
"from": "5.2.1",
"to": "5.2.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "@playwright/test",
"from": "1.53.2",
"to": "1.53.2",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "@types/node",
"from": "20.5.7",
"to": "20.5.7",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "@types/react",
"from": "18.3.3",
"to": "18.3.3",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "@types/react-dom",
"from": "18.3.0",
"to": "18.3.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "@types/uuid",
"from": "10.0.0",
"to": "10.0.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "@typescript-eslint/eslint-plugin",
"from": "7.18.0",
"to": "7.18.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "@typescript-eslint/parser",
"from": "7.18.0",
"to": "7.18.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "autoprefixer",
"from": "10.4.19",
"to": "10.4.19",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint",
"from": "8.57.1",
"to": "8.57.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint-config-next",
"from": "14.2.32",
"to": "14.2.32",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint-config-prettier",
"from": "10.1.5",
"to": "10.1.5",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint-plugin-import",
"from": "2.32.0",
"to": "2.32.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint-plugin-jsx-a11y",
"from": "6.10.2",
"to": "6.10.2",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint-plugin-node",
"from": "11.1.0",
"to": "11.1.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint-plugin-prettier",
"from": "5.5.1",
"to": "5.5.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint-plugin-react",
"from": "7.37.5",
"to": "7.37.5",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint-plugin-react-hooks",
"from": "4.6.2",
"to": "4.6.2",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint-plugin-security",
"from": "3.0.1",
"to": "3.0.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint-plugin-simple-import-sort",
"from": "12.1.1",
"to": "12.1.1",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "eslint-plugin-unused-imports",
"from": "3.2.0",
"to": "3.2.0",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "husky",
"from": "9.1.7",
"to": "9.1.7",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "lint-staged",
"from": "15.5.2",
"to": "15.5.2",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "postcss",
"from": "8.4.38",
"to": "8.4.38",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "prettier",
"from": "3.6.2",
"to": "3.6.2",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "prettier-plugin-tailwindcss",
"from": "0.6.13",
"to": "0.6.13",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "tailwind-variants",
"from": "0.1.20",
"to": "0.1.20",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "tailwindcss",
"from": "3.4.3",
"to": "3.4.3",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
},
{
"section": "devDependencies",
"name": "typescript",
"from": "5.5.4",
"to": "5.5.4",
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.554Z"
}
]

View File

@@ -1,4 +1,3 @@
import { produce } from "immer";
import { create } from "zustand";
import { createJSONStorage, persist } from "zustand/middleware";
@@ -11,7 +10,6 @@ type SidebarStore = {
setIsOpen: (isOpen: boolean) => void;
setIsHover: (isHover: boolean) => void;
getOpenState: () => boolean;
setSettings: (settings: Partial<SidebarSettings>) => void;
};
export const useSidebar = create(
@@ -33,13 +31,6 @@ export const useSidebar = create(
const state = get();
return state.isOpen || (state.settings.isHoverOpen && state.isHover);
},
setSettings: (settings: Partial<SidebarSettings>) => {
set(
produce((state: SidebarStore) => {
state.settings = { ...state.settings, ...settings };
}),
);
},
}),
{
name: "sidebar",

View File

@@ -21,6 +21,7 @@ import {
Warehouse,
} from "lucide-react";
import { ProwlerShort } from "@/components/icons";
import {
APIdocIcon,
AWSIcon,
@@ -197,6 +198,18 @@ export const getMenuList = ({
},
],
},
{
groupLabel: "",
menus: [
{
href: "https://hub.prowler.com/",
label: "Prowler Hub",
icon: ProwlerShort,
target: "_blank",
tooltip: "Looking for all available checks? learn more.",
},
],
},
{
groupLabel: "",
menus: [

3205
ui/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,93 +1,4 @@
{
"dependencies": {
"@hookform/resolvers": "^3.9.0",
"@langchain/langgraph-supervisor": "^0.0.12",
"@langchain/openai": "^0.6.9",
"@next/third-parties": "^15.3.3",
"@nextui-org/react": "2.4.8",
"@nextui-org/system": "2.2.1",
"@nextui-org/theme": "2.2.5",
"@radix-ui/react-alert-dialog": "^1.1.1",
"@radix-ui/react-dialog": "^1.1.2",
"@radix-ui/react-dropdown-menu": "^2.1.1",
"@radix-ui/react-icons": "^1.3.0",
"@radix-ui/react-label": "^2.1.0",
"@radix-ui/react-select": "^2.1.1",
"@radix-ui/react-slot": "^1.1.1",
"@radix-ui/react-toast": "^1.2.4",
"@react-aria/ssr": "3.9.4",
"@react-aria/visually-hidden": "3.8.12",
"@tailwindcss/typography": "^0.5.16",
"@tanstack/react-table": "^8.19.3",
"@types/js-yaml": "^4.0.9",
"add": "^2.0.6",
"ai": "^4.3.16",
"alert": "^6.0.2",
"bcryptjs": "^2.4.3",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"cmdk": "^1.0.0",
"date-fns": "^4.1.0",
"framer-motion": "^11.16.0",
"immer": "^10.1.1",
"intl-messageformat": "^10.5.0",
"jose": "^5.9.3",
"js-yaml": "^4.1.0",
"jwt-decode": "^4.0.0",
"lucide-react": "^0.471.0",
"marked": "^15.0.12",
"next": "^14.2.32",
"next-auth": "^5.0.0-beta.25",
"next-themes": "^0.2.1",
"radix-ui": "^1.1.3",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-hook-form": "^7.52.2",
"react-markdown": "^10.1.0",
"recharts": "^2.15.2",
"rss-parser": "^3.13.0",
"server-only": "^0.0.1",
"shadcn-ui": "^0.2.3",
"sharp": "^0.33.5",
"tailwind-merge": "^3.2.0",
"tailwindcss-animate": "^1.0.7",
"uuid": "^11.0.5",
"zod": "^3.23.8",
"zustand": "^4.5.5"
},
"devDependencies": {
"@iconify/react": "^5.2.0",
"@playwright/test": "^1.53.1",
"@types/bcryptjs": "^2.4.6",
"@types/node": "20.5.7",
"@types/react": "18.3.3",
"@types/react-dom": "18.3.0",
"@types/uuid": "^10.0.0",
"@typescript-eslint/eslint-plugin": "^7.10.0",
"@typescript-eslint/parser": "^7.10.0",
"autoprefixer": "10.4.19",
"eslint": "^8.56.0",
"eslint-config-next": "^14.2.32",
"eslint-config-prettier": "^10.0.1",
"eslint-plugin-import": "^2.31.0",
"eslint-plugin-jsx-a11y": "^6.9.0",
"eslint-plugin-node": "^11.1.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-react": "^7.23.2",
"eslint-plugin-react-hooks": "^4.6.0",
"eslint-plugin-security": "^3.0.1",
"eslint-plugin-simple-import-sort": "^12.1.1",
"eslint-plugin-unused-imports": "^3.2.0",
"husky": "^9.0.11",
"lint-staged": "^15.2.7",
"postcss": "8.4.38",
"prettier": "^3.3.3",
"prettier-plugin-tailwindcss": "^0.6.8",
"tailwind-variants": "0.1.20",
"tailwindcss": "3.4.3",
"typescript": "5.5.4",
"usehooks-ts": "^3.1.0"
},
"name": "prowler-next-app",
"private": true,
"scripts": {
@@ -95,6 +6,8 @@
"build": "next build",
"start": "next start",
"start:standalone": "node .next/standalone/server.js",
"deps:log": "node scripts/update-dependency-log.js",
"postinstall": "node -e \"const fs=require('fs'); if(fs.existsSync('scripts/update-dependency-log.js')) require('./scripts/update-dependency-log.js'); else console.log('skip deps:log (script missing)');\"",
"typecheck": "tsc",
"healthcheck": "npm run typecheck && npm run lint:check",
"lint:check": "eslint . --ext .ts,.tsx -c .eslintrc.cjs",
@@ -109,6 +22,89 @@
"test:e2e:report": "playwright show-report",
"test:e2e:install": "playwright install"
},
"dependencies": {
"@hookform/resolvers": "3.10.0",
"@langchain/langgraph-supervisor": "0.0.12",
"@langchain/openai": "0.6.9",
"@next/third-parties": "15.3.5",
"@nextui-org/react": "2.4.8",
"@nextui-org/system": "2.2.1",
"@nextui-org/theme": "2.2.5",
"@radix-ui/react-alert-dialog": "1.1.14",
"@radix-ui/react-dialog": "1.1.14",
"@radix-ui/react-dropdown-menu": "2.1.15",
"@radix-ui/react-icons": "1.3.2",
"@radix-ui/react-label": "2.1.7",
"@radix-ui/react-select": "2.2.5",
"@radix-ui/react-slot": "1.2.3",
"@radix-ui/react-toast": "1.2.14",
"@react-aria/ssr": "3.9.4",
"@react-aria/visually-hidden": "3.8.12",
"@tailwindcss/typography": "0.5.16",
"@tanstack/react-table": "8.21.3",
"@types/js-yaml": "4.0.9",
"ai": "4.3.16",
"alert": "6.0.2",
"class-variance-authority": "0.7.1",
"clsx": "2.1.1",
"date-fns": "4.1.0",
"framer-motion": "11.18.2",
"intl-messageformat": "10.7.16",
"jose": "5.10.0",
"js-yaml": "4.1.0",
"jwt-decode": "4.0.0",
"lucide-react": "0.543.0",
"marked": "15.0.12",
"next": "14.2.32",
"next-auth": "5.0.0-beta.29",
"next-themes": "0.2.1",
"radix-ui": "1.4.2",
"react": "18.3.1",
"react-dom": "18.3.1",
"react-hook-form": "7.62.0",
"react-markdown": "10.1.0",
"recharts": "2.15.4",
"rss-parser": "3.13.0",
"server-only": "0.0.1",
"shadcn": "3.2.1",
"sharp": "0.33.5",
"tailwind-merge": "3.3.1",
"tailwindcss-animate": "1.0.7",
"uuid": "11.1.0",
"zod": "3.25.73",
"zustand": "4.5.7"
},
"devDependencies": {
"@iconify/react": "5.2.1",
"@playwright/test": "1.53.2",
"@types/node": "20.5.7",
"@types/react": "18.3.3",
"@types/react-dom": "18.3.0",
"@types/uuid": "10.0.0",
"@typescript-eslint/eslint-plugin": "7.18.0",
"@typescript-eslint/parser": "7.18.0",
"autoprefixer": "10.4.19",
"eslint": "8.57.1",
"eslint-config-next": "14.2.32",
"eslint-config-prettier": "10.1.5",
"eslint-plugin-import": "2.32.0",
"eslint-plugin-jsx-a11y": "6.10.2",
"eslint-plugin-node": "11.1.0",
"eslint-plugin-prettier": "5.5.1",
"eslint-plugin-react": "7.37.5",
"eslint-plugin-react-hooks": "4.6.2",
"eslint-plugin-security": "3.0.1",
"eslint-plugin-simple-import-sort": "12.1.1",
"eslint-plugin-unused-imports": "3.2.0",
"husky": "9.1.7",
"lint-staged": "15.5.2",
"postcss": "8.4.38",
"prettier": "3.6.2",
"prettier-plugin-tailwindcss": "0.6.13",
"tailwind-variants": "0.1.20",
"tailwindcss": "3.4.3",
"typescript": "5.5.4"
},
"overrides": {
"@react-types/shared": "3.26.0"
},

View File

@@ -0,0 +1,95 @@
const fs = require('fs');
const path = require('path');
function readJSON(p) {
return JSON.parse(fs.readFileSync(p, 'utf8'));
}
function getInstalledVersion(pkgName) {
try {
const parts = pkgName.split('/');
const pkgPath = path.join('node_modules', ...parts, 'package.json');
const meta = readJSON(pkgPath);
return meta.version;
} catch (e) {
return null;
}
}
function collect(sectionName, obj) {
if (!obj) return [];
return Object.entries(obj).map(([name, declared]) => {
const installed = getInstalledVersion(name);
return {
section: sectionName,
name,
from: declared,
to: installed || null,
strategy: 'installed',
};
});
}
function main() {
// If node_modules is missing, skip to avoid generating noisy diffs
if (!fs.existsSync('node_modules')) {
console.log('Skip: node_modules not found. Run npm install first.');
return;
}
const pkg = readJSON('package.json');
const entries = [
...collect('dependencies', pkg.dependencies),
...collect('devDependencies', pkg.devDependencies),
];
// Stable sort by section then name
entries.sort((a, b) =>
a.section === b.section ? a.name.localeCompare(b.name) : a.section.localeCompare(b.section)
);
const outPath = path.join(process.cwd(), 'dependency-log.json');
// Merge with previous to preserve generatedAt when unchanged
let prevMap = new Map();
if (fs.existsSync(outPath)) {
try {
const prev = JSON.parse(fs.readFileSync(outPath, 'utf8'));
for (const e of prev) {
prevMap.set(`${e.section}::${e.name}`, e);
}
} catch {}
}
const now = new Date().toISOString();
const merged = entries.map((e) => {
const key = `${e.section}::${e.name}`;
const prev = prevMap.get(key);
if (!prev) {
// New entry: keep declared as from
return { ...e, generatedAt: now };
}
// If installed version changed, set from to previous installed version
if (prev.to !== e.to) {
return { ...e, from: prev.to, generatedAt: now };
}
// Otherwise preserve previous 'from' and timestamp
return { ...e, from: prev.from, generatedAt: prev.generatedAt || now };
});
const nextContent = JSON.stringify(merged, null, 2) + '\n';
if (fs.existsSync(outPath)) {
try {
const prevContent = fs.readFileSync(outPath, 'utf8');
if (prevContent === nextContent) {
console.log(`No changes for ${outPath} (entries: ${entries.length}).`);
return;
}
} catch {}
}
fs.writeFileSync(outPath, nextContent);
console.log(`Updated ${outPath} with ${entries.length} entries.`);
}
main();

View File

@@ -48,22 +48,20 @@ test.describe("Login Flow", () => {
test("should handle empty form submission", async ({ page }) => {
// Submit empty form
await submitLoginForm(page);
await verifyLoginError(page, ERROR_MESSAGES.INVALID_CREDENTIALS);
await verifyLoginError(page, ERROR_MESSAGES.INVALID_EMAIL);
// Verify we're still on login page
await expect(page).toHaveURL(URLS.LOGIN);
});
/*
TODO: This test is failing, need UI work before.
test("should validate email format", async ({ page }) => {
// Attempt login with invalid email format
await login(page, TEST_CREDENTIALS.INVALID_EMAIL_FORMAT);
// Verify error message (application shows generic error for invalid email format too)
await verifyLoginError(page, ERROR_MESSAGES.INVALID_CREDENTIALS);
// Verify field-level email validation message
await verifyLoginError(page, ERROR_MESSAGES.INVALID_EMAIL);
// Verify we're still on login page
await expect(page).toHaveURL(URLS.LOGIN);
});
*/
test("should toggle SAML SSO mode", async ({ page }) => {
// Toggle to SAML mode

View File

@@ -2,6 +2,7 @@ import { Page, expect } from "@playwright/test";
export const ERROR_MESSAGES = {
INVALID_CREDENTIALS: "Invalid email or password",
INVALID_EMAIL: "Please enter a valid email address.",
} as const;
export const URLS = {
@@ -69,7 +70,8 @@ export async function verifyLoginError(
page: Page,
errorMessage = "Invalid email or password",
) {
await expect(page.getByText(errorMessage)).toBeVisible();
// There may be multiple field-level errors with the same text; assert at least one is visible
await expect(page.getByText(errorMessage).first()).toBeVisible();
await expect(page).toHaveURL("/sign-in");
}

View File

@@ -96,7 +96,12 @@ export const authFormSchema = (type: string) =>
}),
// Fields for Sign In and Sign Up
email: z.string().email(),
// Trim and normalize email, and provide consistent message
email: z
.string()
.trim()
.toLowerCase()
.email({ message: "Please enter a valid email address." }),
password: type === "sign-in" ? z.string() : validatePassword(),
isSamlMode: z.boolean().optional(),
})

View File

@@ -31,6 +31,8 @@ export type MenuProps = {
icon: IconComponent;
submenus?: SubmenuProps[];
defaultOpen?: boolean;
target?: string;
tooltip?: string;
};
export type GroupProps = {