Compare commits

...

34 Commits

Author SHA1 Message Date
pedrooot 261841e786 fix(docs): rewrite introduction 2025-07-28 14:18:15 +02:00
pedrooot f598a15ab9 fix(docs): remove typo from getting started - github 2025-07-28 14:14:52 +02:00
Aviad Levy a85b89ffb5 fix(ec2): add check that protocol is matched in security group checks (#8374)
Co-authored-by: MrCloudSec <hello@mistercloudsec.com>
2025-07-28 19:53:08 +08:00
César Arroba 87da11b712 chore(gha): delete repo limitation for bump workflow (#8379) 2025-07-28 13:22:19 +02:00
César Arroba 8b57f178e0 chore(gha): improve e2e pipeline (#8378) 2025-07-28 13:22:12 +02:00
Prowler Bot 7830ed8b9f chore(regions_update): Changes in regions for AWS services (#8376)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2025-07-28 17:56:48 +08:00
Kay Agahd d4e66c4a6f chore(sqs): clean up code (#8366) 2025-07-25 20:10:34 +08:00
Rubén De la Torre Vico 1cfe610d47 feat(azure/vm): add new check vm_scaleset_not_empty (#8192)
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-07-25 18:42:03 +08:00
Rubén De la Torre Vico d9a9236ab7 feat(azure/vm): add new check vm_desired_sku_size (#8191)
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-07-25 17:51:01 +08:00
Hugo Pereira Brito 285aea3458 fix(docs): change Exchange Administrator role to Global Reader for M365 (#8360) 2025-07-25 15:45:30 +08:00
César Arroba b051aeeb64 chore(gha): automate e2e tests with new workflow (#8361) 2025-07-24 16:54:01 +02:00
Pedro Martín b99dce6a43 feat(azure): add CIS 4.0 (#7782) 2025-07-24 22:29:46 +08:00
Andoni Alonso 04749c1da1 fix(aws): sns_topics_not_publicly_accessible false positive with aws:SourceArn conditions (#8340)
Co-authored-by: MrCloudSec <hello@mistercloudsec.com>
2025-07-24 18:03:30 +08:00
Chandrapal Badshah 44d70f8467 fix(lighthouse): update prompt and tool schema for checks tool (#8265)
Co-authored-by: Chandrapal Badshah <12944530+Chan9390@users.noreply.github.com>
2025-07-24 10:50:36 +02:00
Andoni Alonso 95791a9909 chore(aws): replace known errors with warnings (#8347)
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-07-24 15:34:45 +08:00
sumit-tft ad0b8a4208 feat(ui): create CustomLink component and refactor links to use it (#8341)
Co-authored-by: alejandrobailo <alejandrobailo94@gmail.com>
2025-07-23 19:10:51 +02:00
Cole Murray 5669a42039 fix(wazuh): patch command injection vulnerability in prowler-wrapper.py (#8331)
Co-authored-by: Test User <test@example.com>
Co-authored-by: MrCloudSec <hello@mistercloudsec.com>
2025-07-23 16:06:55 +02:00
Kay Agahd 83b328ea92 fix(aws): avoid false positives in SQS encryption check for ephemeral queues (#8330)
Co-authored-by: Hugo Pereira Brito <101209179+HugoPBrito@users.noreply.github.com>
2025-07-23 21:03:02 +08:00
Alejandro Bailo a6c88c0d9e test: timeout updated for E2E (#8351) 2025-07-23 13:11:32 +02:00
Sergio Garcia 922f9d2f91 docs(gcp): update GCP permissions (#8350) 2025-07-23 17:43:42 +08:00
Rubén De la Torre Vico a69d0d16c0 fix(azure/storage): handle when Azure API set values to None (#8325)
Co-authored-by: Pedro Martín <pedromarting3@gmail.com>
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-07-23 17:11:04 +08:00
Alejandro Bailo 676cc44fe2 feat: env keys behavior updated (#8348) 2025-07-23 10:44:28 +02:00
Alejandro Bailo 3840e40870 test(e2e): Sign-in (#8337)
Co-authored-by: César Arroba <cesar@prowler.com>
2025-07-22 18:04:54 +02:00
dependabot[bot] ab2d57554a chore(deps): bump form-data from 4.0.3 to 4.0.4 in /ui (#8346)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-22 17:53:32 +02:00
César Arroba cbb5b21e6c chore(gha): e2e tests pipeline with API services (#8338) 2025-07-22 17:49:23 +02:00
Sergio Garcia 1efd5668ce feat(api): add GitHub provider support (#8271) 2025-07-22 23:26:02 +08:00
Sergio Garcia ca86aeb1d7 feat(aws): new check bedrock_api_key_no_administrative_privileges (#8321) 2025-07-22 22:06:17 +08:00
Víctor Fernández Poyatos 4f2a8b71bb feat(performance): resources scenario (#8345) 2025-07-22 13:01:19 +02:00
Prowler Bot 3b0cb3db85 chore(regions_update): Changes in regions for AWS services (#8333)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2025-07-22 17:23:24 +08:00
Víctor Fernández Poyatos 00c527ff79 chore: update Prowler changelog for v5.9.2 (#8342) 2025-07-22 10:53:22 +02:00
Víctor Fernández Poyatos ab348d5752 feat(resources): Optimize findings prefetching during resource views (#8336) 2025-07-21 16:33:07 +02:00
Daniel Barranquero dd713351dc fix(defender): avoid duplicated findings in check defender_domain_dkim_enabled (#8334) 2025-07-21 13:07:26 +02:00
sumit-tft fa722f1dc7 feat(ui): add 32-character limit validation for scan name in create a… (#8319)
Co-authored-by: alejandrobailo <alejandrobailo94@gmail.com>
2025-07-21 10:00:25 +02:00
Pedro Martín b0cc3978d0 feat(docs): add info about updating Prowler App (#8320) 2025-07-21 07:44:07 +02:00
118 changed files with 7757 additions and 410 deletions
-1
View File
@@ -12,7 +12,6 @@ env:
jobs:
bump-version:
name: Bump Version
if: github.repository == 'prowler-cloud/prowler'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+98
View File
@@ -0,0 +1,98 @@
name: UI - E2E Tests
on:
pull_request:
branches:
- master
- "v5.*"
paths:
- '.github/workflows/ui-e2e-tests.yml'
- 'ui/**'
jobs:
e2e-tests:
if: github.repository == 'prowler-cloud/prowler'
runs-on: ubuntu-latest
env:
AUTH_SECRET: 'fallback-ci-secret-for-testing'
AUTH_TRUST_HOST: true
NEXTAUTH_URL: 'http://localhost:3000'
NEXT_PUBLIC_API_BASE_URL: 'http://localhost:8080/api/v1'
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Start API services
run: |
# Override docker-compose image tag to use latest instead of stable
# This overrides any PROWLER_API_VERSION set in .env file
export PROWLER_API_VERSION=latest
echo "Using PROWLER_API_VERSION=${PROWLER_API_VERSION}"
docker compose up -d api worker worker-beat
- name: Wait for API to be ready
run: |
echo "Waiting for prowler-api..."
timeout=150 # 5 minutes max
elapsed=0
while [ $elapsed -lt $timeout ]; do
if curl -s ${NEXT_PUBLIC_API_BASE_URL}/docs >/dev/null 2>&1; then
echo "Prowler API is ready!"
exit 0
fi
echo "Waiting for prowler-api... (${elapsed}s elapsed)"
sleep 5
elapsed=$((elapsed + 5))
done
echo "Timeout waiting for prowler-api to start"
exit 1
- name: Load database fixtures for E2E tests
run: |
docker compose exec -T api sh -c '
echo "Loading all fixtures from api/fixtures/dev/..."
for fixture in api/fixtures/dev/*.json; do
if [ -f "$fixture" ]; then
echo "Loading $fixture"
poetry run python manage.py loaddata "$fixture" --database admin
fi
done
echo "All database fixtures loaded successfully!"
'
- name: Setup Node.js environment
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
with:
node-version: '20.x'
cache: 'npm'
cache-dependency-path: './ui/package-lock.json'
- name: Install UI dependencies
working-directory: ./ui
run: npm ci
- name: Build UI application
working-directory: ./ui
run: npm run build
- name: Cache Playwright browsers
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
id: playwright-cache
with:
path: ~/.cache/ms-playwright
key: ${{ runner.os }}-playwright-${{ hashFiles('ui/package-lock.json') }}
restore-keys: |
${{ runner.os }}-playwright-
- name: Install Playwright browsers
working-directory: ./ui
if: steps.playwright-cache.outputs.cache-hit != 'true'
run: npm run test:e2e:install
- name: Run E2E tests
working-directory: ./ui
run: npm run test:e2e
- name: Upload test reports
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
if: failure()
with:
name: playwright-report
path: ui/playwright-report/
retention-days: 30
- name: Cleanup services
if: always()
run: |
echo "Shutting down services..."
docker compose down -v || true
echo "Cleanup completed"
-46
View File
@@ -46,52 +46,6 @@ jobs:
working-directory: ./ui
run: npm run build
e2e-tests:
runs-on: ubuntu-latest
env:
AUTH_SECRET: 'fallback-ci-secret-for-testing'
AUTH_TRUST_HOST: true
NEXTAUTH_URL: http://localhost:3000
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: Setup Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
with:
node-version: '20.x'
cache: 'npm'
cache-dependency-path: './ui/package-lock.json'
- name: Install dependencies
working-directory: ./ui
run: npm ci
- name: Cache Playwright browsers
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
id: playwright-cache
with:
path: ~/.cache/ms-playwright
key: ${{ runner.os }}-playwright-${{ hashFiles('ui/package-lock.json') }}
restore-keys: |
${{ runner.os }}-playwright-
- name: Install Playwright browsers
working-directory: ./ui
if: steps.playwright-cache.outputs.cache-hit != 'true'
run: npm run test:e2e:install
- name: Build the application
working-directory: ./ui
run: npm run build
- name: Run Playwright tests
working-directory: ./ui
run: npm run test:e2e
- name: Upload Playwright report
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
if: failure()
with:
name: playwright-report
path: ui/playwright-report/
retention-days: 30
test-container-build:
runs-on: ubuntu-latest
steps:
+1 -1
View File
@@ -88,7 +88,7 @@ prowler dashboard
|---|---|---|---|---|
| AWS | 567 | 82 | 36 | 10 |
| GCP | 79 | 13 | 10 | 3 |
| Azure | 142 | 18 | 10 | 3 |
| Azure | 142 | 18 | 11 | 3 |
| Kubernetes | 83 | 7 | 5 | 7 |
| GitHub | 16 | 2 | 1 | 0 |
| M365 | 69 | 7 | 3 | 2 |
+14
View File
@@ -2,6 +2,20 @@
All notable changes to the **Prowler API** are documented in this file.
## [1.11.0] (Prowler UNRELEASED)
### Added
- Github provider support [(#8271)](https://github.com/prowler-cloud/prowler/pull/8271)
---
## [1.10.2] (Prowler v5.9.2)
### Changed
- Optimized queries for resources views [(#8336)](https://github.com/prowler-cloud/prowler/pull/8336)
---
## [v1.10.1] (Prowler v5.9.1)
### Fixed
+1 -1
View File
@@ -38,7 +38,7 @@ name = "prowler-api"
package-mode = false
# Needed for the SDK compatibility
requires-python = ">=3.11,<3.13"
version = "1.10.1"
version = "1.10.2"
[project.scripts]
celery = "src.backend.config.settings.celery"
@@ -24,5 +24,18 @@
"is_active": true,
"date_joined": "2024-09-18T09:04:20.850Z"
}
},
{
"model": "api.user",
"pk": "6d4f8a91-3c2e-4b5a-8f7d-1e9c5b2a4d6f",
"fields": {
"password": "pbkdf2_sha256$870000$Z63pGJ7nre48hfcGbk5S0O$rQpKczAmijs96xa+gPVJifpT3Fetb8DOusl5Eq6gxac=",
"last_login": null,
"name": "E2E Test User",
"email": "e2e@prowler.com",
"company_name": "Prowler E2E Tests",
"is_active": true,
"date_joined": "2024-01-01T00:00:00.850Z"
}
}
]
@@ -46,5 +46,24 @@
"role": "member",
"date_joined": "2024-09-19T11:03:59.712Z"
}
},
{
"model": "api.tenant",
"pk": "7c8f94a3-e2d1-4b3a-9f87-2c4d5e6f1a2b",
"fields": {
"inserted_at": "2024-01-01T00:00:00Z",
"updated_at": "2024-01-01T00:00:00Z",
"name": "E2E Test Tenant"
}
},
{
"model": "api.membership",
"pk": "9b1a2c3d-4e5f-6789-abc1-23456789def0",
"fields": {
"user": "6d4f8a91-3c2e-4b5a-8f7d-1e9c5b2a4d6f",
"tenant": "7c8f94a3-e2d1-4b3a-9f87-2c4d5e6f1a2b",
"role": "owner",
"date_joined": "2024-01-01T00:00:00.000Z"
}
}
]
@@ -149,5 +149,32 @@
"user": "8b38e2eb-6689-4f1e-a4ba-95b275130200",
"inserted_at": "2024-11-20T15:36:14.302Z"
}
},
{
"model": "api.role",
"pk": "a5b6c7d8-9e0f-1234-5678-90abcdef1234",
"fields": {
"tenant": "7c8f94a3-e2d1-4b3a-9f87-2c4d5e6f1a2b",
"name": "e2e_admin",
"manage_users": true,
"manage_account": true,
"manage_billing": true,
"manage_providers": true,
"manage_integrations": true,
"manage_scans": true,
"unlimited_visibility": true,
"inserted_at": "2024-01-01T00:00:00.000Z",
"updated_at": "2024-01-01T00:00:00.000Z"
}
},
{
"model": "api.userrolerelationship",
"pk": "f1e2d3c4-b5a6-9876-5432-10fedcba9876",
"fields": {
"tenant": "7c8f94a3-e2d1-4b3a-9f87-2c4d5e6f1a2b",
"role": "a5b6c7d8-9e0f-1234-5678-90abcdef1234",
"user": "6d4f8a91-3c2e-4b5a-8f7d-1e9c5b2a4d6f",
"inserted_at": "2024-01-01T00:00:00.000Z"
}
}
]
@@ -0,0 +1,30 @@
from functools import partial
from django.db import migrations
from api.db_utils import create_index_on_partitions, drop_index_on_partitions
class Migration(migrations.Migration):
atomic = False
dependencies = [
("api", "0039_resource_resources_failed_findings_idx"),
]
operations = [
migrations.RunPython(
partial(
create_index_on_partitions,
parent_table="resource_finding_mappings",
index_name="rfm_tenant_resource_idx",
columns="tenant_id, resource_id",
method="BTREE",
),
reverse_code=partial(
drop_index_on_partitions,
parent_table="resource_finding_mappings",
index_name="rfm_tenant_resource_idx",
),
),
]
@@ -0,0 +1,17 @@
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0040_rfm_tenant_resource_index_partitions"),
]
operations = [
migrations.AddIndex(
model_name="resourcefindingmapping",
index=models.Index(
fields=["tenant_id", "resource_id"],
name="rfm_tenant_resource_idx",
),
),
]
@@ -0,0 +1,23 @@
from django.contrib.postgres.operations import AddIndexConcurrently
from django.db import migrations, models
class Migration(migrations.Migration):
atomic = False
dependencies = [
("api", "0041_rfm_tenant_resource_parent_partitions"),
("django_celery_beat", "0019_alter_periodictasks_options"),
]
operations = [
AddIndexConcurrently(
model_name="scan",
index=models.Index(
condition=models.Q(("state", "completed")),
fields=["tenant_id", "provider_id", "-inserted_at"],
include=("id",),
name="scans_prov_ins_desc_idx",
),
),
]
@@ -0,0 +1,33 @@
# Generated by Django 5.1.7 on 2025-07-09 14:44
from django.db import migrations
import api.db_utils
class Migration(migrations.Migration):
dependencies = [
("api", "0042_scan_scans_prov_ins_desc_idx"),
]
operations = [
migrations.AlterField(
model_name="provider",
name="provider",
field=api.db_utils.ProviderEnumField(
choices=[
("aws", "AWS"),
("azure", "Azure"),
("gcp", "GCP"),
("kubernetes", "Kubernetes"),
("m365", "M365"),
("github", "GitHub"),
],
default="aws",
),
),
migrations.RunSQL(
"ALTER TYPE provider ADD VALUE IF NOT EXISTS 'github';",
reverse_sql=migrations.RunSQL.noop,
),
]
+22
View File
@@ -205,6 +205,7 @@ class Provider(RowLevelSecurityProtectedModel):
GCP = "gcp", _("GCP")
KUBERNETES = "kubernetes", _("Kubernetes")
M365 = "m365", _("M365")
GITHUB = "github", _("GitHub")
@staticmethod
def validate_aws_uid(value):
@@ -265,6 +266,16 @@ class Provider(RowLevelSecurityProtectedModel):
pointer="/data/attributes/uid",
)
@staticmethod
def validate_github_uid(value):
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9-]{0,38}$", value):
raise ModelValidationError(
detail="GitHub provider ID must be a valid GitHub username or organization name (1-39 characters, "
"starting with alphanumeric, containing only alphanumeric characters and hyphens).",
code="github-uid",
pointer="/data/attributes/uid",
)
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
@@ -476,6 +487,13 @@ class Scan(RowLevelSecurityProtectedModel):
condition=Q(state=StateChoices.COMPLETED),
name="scans_prov_state_ins_desc_idx",
),
# TODO This might replace `scans_prov_state_ins_desc_idx` completely. Review usage
models.Index(
fields=["tenant_id", "provider_id", "-inserted_at"],
condition=Q(state=StateChoices.COMPLETED),
include=["id"],
name="scans_prov_ins_desc_idx",
),
]
class JSONAPIMeta:
@@ -860,6 +878,10 @@ class ResourceFindingMapping(PostgresPartitionedModel, RowLevelSecurityProtected
fields=["tenant_id", "finding_id"],
name="rfm_tenant_finding_idx",
),
models.Index(
fields=["tenant_id", "resource_id"],
name="rfm_tenant_resource_idx",
),
]
constraints = [
models.UniqueConstraint(
+161 -1
View File
@@ -1,7 +1,7 @@
openapi: 3.0.3
info:
title: Prowler API
version: 1.10.1
version: 1.11.0
description: |-
Prowler API specification.
@@ -544,6 +544,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -552,6 +553,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
- in: query
name: filter[provider_type__in]
schema:
@@ -562,6 +564,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -572,6 +575,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
explode: false
style: form
- in: query
@@ -1061,6 +1065,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -1069,6 +1074,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
- in: query
name: filter[provider_type__in]
schema:
@@ -1079,6 +1085,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -1089,6 +1096,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
explode: false
style: form
- in: query
@@ -1486,6 +1494,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -1494,6 +1503,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
- in: query
name: filter[provider_type__in]
schema:
@@ -1504,6 +1514,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -1514,6 +1525,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
explode: false
style: form
- in: query
@@ -1909,6 +1921,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -1917,6 +1930,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
- in: query
name: filter[provider_type__in]
schema:
@@ -1927,6 +1941,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -1937,6 +1952,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
explode: false
style: form
- in: query
@@ -2320,6 +2336,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -2328,6 +2345,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
- in: query
name: filter[provider_type__in]
schema:
@@ -2338,6 +2356,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -2348,6 +2367,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
explode: false
style: form
- in: query
@@ -3121,6 +3141,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -3129,6 +3150,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
- in: query
name: filter[provider_type__in]
schema:
@@ -3139,6 +3161,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -3149,6 +3172,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
explode: false
style: form
- in: query
@@ -3282,6 +3306,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -3290,6 +3315,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
- in: query
name: filter[provider_type__in]
schema:
@@ -3300,6 +3326,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -3310,6 +3337,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
explode: false
style: form
- in: query
@@ -3459,6 +3487,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -3467,6 +3496,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
- in: query
name: filter[provider_type__in]
schema:
@@ -3477,6 +3507,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -3487,6 +3518,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
explode: false
style: form
- in: query
@@ -4165,6 +4197,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -4173,6 +4206,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
- in: query
name: filter[provider__in]
schema:
@@ -4746,6 +4780,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -4754,6 +4789,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
- in: query
name: filter[provider_type__in]
schema:
@@ -4764,6 +4800,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -4774,6 +4811,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
explode: false
style: form
- in: query
@@ -6457,6 +6495,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -6465,6 +6504,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
- in: query
name: filter[provider_type__in]
schema:
@@ -6475,6 +6515,7 @@ paths:
- aws
- azure
- gcp
- github
- kubernetes
- m365
description: |-
@@ -6485,6 +6526,7 @@ paths:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
explode: false
style: form
- in: query
@@ -11130,6 +11172,34 @@ components:
encoded as a string.
required:
- kubeconfig_content
- type: object
title: GitHub Personal Access Token
properties:
personal_access_token:
type: string
description: GitHub personal access token for authentication.
required:
- personal_access_token
- type: object
title: GitHub OAuth App Token
properties:
oauth_app_token:
type: string
description: GitHub OAuth App token for authentication.
required:
- oauth_app_token
- type: object
title: GitHub App Credentials
properties:
github_app_id:
type: integer
description: GitHub App ID for authentication.
github_app_key:
type: string
description: Path to the GitHub App private key file.
required:
- github_app_id
- github_app_key
writeOnly: true
required:
- secret
@@ -12035,6 +12105,7 @@ components:
- gcp
- kubernetes
- m365
- github
type: string
description: |-
* `aws` - AWS
@@ -12042,6 +12113,7 @@ components:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
uid:
type: string
title: Unique identifier for the provider, set by the provider
@@ -12149,6 +12221,7 @@ components:
- gcp
- kubernetes
- m365
- github
type: string
description: |-
* `aws` - AWS
@@ -12156,6 +12229,7 @@ components:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
uid:
type: string
title: Unique identifier for the provider, set by the provider
@@ -12194,6 +12268,7 @@ components:
- gcp
- kubernetes
- m365
- github
type: string
description: |-
* `aws` - AWS
@@ -12201,6 +12276,7 @@ components:
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
uid:
type: string
minLength: 3
@@ -12852,6 +12928,34 @@ components:
as a string.
required:
- kubeconfig_content
- type: object
title: GitHub Personal Access Token
properties:
personal_access_token:
type: string
description: GitHub personal access token for authentication.
required:
- personal_access_token
- type: object
title: GitHub OAuth App Token
properties:
oauth_app_token:
type: string
description: GitHub OAuth App token for authentication.
required:
- oauth_app_token
- type: object
title: GitHub App Credentials
properties:
github_app_id:
type: integer
description: GitHub App ID for authentication.
github_app_key:
type: string
description: Path to the GitHub App private key file.
required:
- github_app_id
- github_app_key
writeOnly: true
required:
- secret_type
@@ -13071,6 +13175,34 @@ components:
encoded as a string.
required:
- kubeconfig_content
- type: object
title: GitHub Personal Access Token
properties:
personal_access_token:
type: string
description: GitHub personal access token for authentication.
required:
- personal_access_token
- type: object
title: GitHub OAuth App Token
properties:
oauth_app_token:
type: string
description: GitHub OAuth App token for authentication.
required:
- oauth_app_token
- type: object
title: GitHub App Credentials
properties:
github_app_id:
type: integer
description: GitHub App ID for authentication.
github_app_key:
type: string
description: Path to the GitHub App private key file.
required:
- github_app_id
- github_app_key
writeOnly: true
required:
- secret_type
@@ -13305,6 +13437,34 @@ components:
as a string.
required:
- kubeconfig_content
- type: object
title: GitHub Personal Access Token
properties:
personal_access_token:
type: string
description: GitHub personal access token for authentication.
required:
- personal_access_token
- type: object
title: GitHub OAuth App Token
properties:
oauth_app_token:
type: string
description: GitHub OAuth App token for authentication.
required:
- oauth_app_token
- type: object
title: GitHub App Credentials
properties:
github_app_id:
type: integer
description: GitHub App ID for authentication.
github_app_key:
type: string
description: Path to the GitHub App private key file.
required:
- github_app_id
- github_app_key
writeOnly: true
required:
- secret
+61
View File
@@ -966,6 +966,31 @@ class TestProviderViewSet:
"uid": "subdomain1.subdomain2.subdomain3.subdomain4.domain.net",
"alias": "test",
},
{
"provider": "github",
"uid": "test-user",
"alias": "test",
},
{
"provider": "github",
"uid": "test-organization",
"alias": "GitHub Org",
},
{
"provider": "github",
"uid": "prowler-cloud",
"alias": "Prowler",
},
{
"provider": "github",
"uid": "microsoft",
"alias": "Microsoft",
},
{
"provider": "github",
"uid": "a12345678901234567890123456789012345678",
"alias": "Long Username",
},
]
),
)
@@ -1079,6 +1104,42 @@ class TestProviderViewSet:
"m365-uid",
"uid",
),
(
{
"provider": "github",
"uid": "-invalid-start",
"alias": "test",
},
"github-uid",
"uid",
),
(
{
"provider": "github",
"uid": "invalid@username",
"alias": "test",
},
"github-uid",
"uid",
),
(
{
"provider": "github",
"uid": "invalid_username",
"alias": "test",
},
"github-uid",
"uid",
),
(
{
"provider": "github",
"uid": "a" * 40,
"alias": "test",
},
"github-uid",
"uid",
),
]
),
)
+22 -5
View File
@@ -13,6 +13,7 @@ from prowler.providers.aws.aws_provider import AwsProvider
from prowler.providers.azure.azure_provider import AzureProvider
from prowler.providers.common.models import Connection
from prowler.providers.gcp.gcp_provider import GcpProvider
from prowler.providers.github.github_provider import GithubProvider
from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
from prowler.providers.m365.m365_provider import M365Provider
@@ -55,14 +56,21 @@ def merge_dicts(default_dict: dict, replacement_dict: dict) -> dict:
def return_prowler_provider(
provider: Provider,
) -> [AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider]:
) -> [
AwsProvider
| AzureProvider
| GcpProvider
| GithubProvider
| KubernetesProvider
| M365Provider
]:
"""Return the Prowler provider class based on the given provider type.
Args:
provider (Provider): The provider object containing the provider type and associated secrets.
Returns:
AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider: The corresponding provider class.
AwsProvider | AzureProvider | GcpProvider | GithubProvider | KubernetesProvider | M365Provider: The corresponding provider class.
Raises:
ValueError: If the provider type specified in `provider.provider` is not supported.
@@ -78,6 +86,8 @@ def return_prowler_provider(
prowler_provider = KubernetesProvider
case Provider.ProviderChoices.M365.value:
prowler_provider = M365Provider
case Provider.ProviderChoices.GITHUB.value:
prowler_provider = GithubProvider
case _:
raise ValueError(f"Provider type {provider.provider} not supported")
return prowler_provider
@@ -120,7 +130,14 @@ def get_prowler_provider_kwargs(
def initialize_prowler_provider(
provider: Provider,
mutelist_processor: Processor | None = None,
) -> AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider:
) -> (
AwsProvider
| AzureProvider
| GcpProvider
| GithubProvider
| KubernetesProvider
| M365Provider
):
"""Initialize a Prowler provider instance based on the given provider type.
Args:
@@ -128,8 +145,8 @@ def initialize_prowler_provider(
mutelist_processor (Processor): The mutelist processor object containing the mutelist configuration.
Returns:
AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider: An instance of the corresponding provider class
(`AwsProvider`, `AzureProvider`, `GcpProvider`, `KubernetesProvider` or `M365Provider`) initialized with the
AwsProvider | AzureProvider | GcpProvider | GithubProvider | KubernetesProvider | M365Provider: An instance of the corresponding provider class
(`AwsProvider`, `AzureProvider`, `GcpProvider`, `GithubProvider`, `KubernetesProvider` or `M365Provider`) initialized with the
provider's secrets.
"""
prowler_provider = return_prowler_provider(provider)
@@ -176,6 +176,43 @@ from rest_framework_json_api import serializers
},
"required": ["kubeconfig_content"],
},
{
"type": "object",
"title": "GitHub Personal Access Token",
"properties": {
"personal_access_token": {
"type": "string",
"description": "GitHub personal access token for authentication.",
}
},
"required": ["personal_access_token"],
},
{
"type": "object",
"title": "GitHub OAuth App Token",
"properties": {
"oauth_app_token": {
"type": "string",
"description": "GitHub OAuth App token for authentication.",
}
},
"required": ["oauth_app_token"],
},
{
"type": "object",
"title": "GitHub App Credentials",
"properties": {
"github_app_id": {
"type": "integer",
"description": "GitHub App ID for authentication.",
},
"github_app_key": {
"type": "string",
"description": "Path to the GitHub App private key file.",
},
},
"required": ["github_app_id", "github_app_key"],
},
]
}
)
+12
View File
@@ -1217,6 +1217,8 @@ class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
serializer = AzureProviderSecret(data=secret)
elif provider_type == Provider.ProviderChoices.GCP.value:
serializer = GCPProviderSecret(data=secret)
elif provider_type == Provider.ProviderChoices.GITHUB.value:
serializer = GithubProviderSecret(data=secret)
elif provider_type == Provider.ProviderChoices.KUBERNETES.value:
serializer = KubernetesProviderSecret(data=secret)
elif provider_type == Provider.ProviderChoices.M365.value:
@@ -1296,6 +1298,16 @@ class KubernetesProviderSecret(serializers.Serializer):
resource_name = "provider-secrets"
class GithubProviderSecret(serializers.Serializer):
personal_access_token = serializers.CharField(required=False)
oauth_app_token = serializers.CharField(required=False)
github_app_id = serializers.IntegerField(required=False)
github_app_key_content = serializers.CharField(required=False)
class Meta:
resource_name = "provider-secrets"
class AWSRoleAssumptionProviderSecret(serializers.Serializer):
role_arn = serializers.CharField()
external_id = serializers.CharField()
+35 -8
View File
@@ -22,7 +22,7 @@ from django.conf import settings as django_settings
from django.contrib.postgres.aggregates import ArrayAgg
from django.contrib.postgres.search import SearchQuery
from django.db import transaction
from django.db.models import Count, F, Prefetch, Q, Sum
from django.db.models import Count, F, Prefetch, Q, Subquery, Sum
from django.db.models.functions import Coalesce
from django.http import HttpResponse
from django.shortcuts import redirect
@@ -292,7 +292,7 @@ class SchemaView(SpectacularAPIView):
def get(self, request, *args, **kwargs):
spectacular_settings.TITLE = "Prowler API"
spectacular_settings.VERSION = "1.10.1"
spectacular_settings.VERSION = "1.10.2"
spectacular_settings.DESCRIPTION = (
"Prowler API specification.\n\nThis file is auto-generated."
)
@@ -1994,6 +1994,21 @@ class ResourceViewSet(PaginateByPkMixin, BaseRLSViewSet):
)
)
def _should_prefetch_findings(self) -> bool:
fields_param = self.request.query_params.get("fields[resources]", "")
include_param = self.request.query_params.get("include", "")
return (
fields_param == ""
or "findings" in fields_param.split(",")
or "findings" in include_param.split(",")
)
def _get_findings_prefetch(self):
findings_queryset = Finding.all_objects.defer("scan", "resources").filter(
tenant_id=self.request.tenant_id
)
return [Prefetch("findings", queryset=findings_queryset)]
def get_serializer_class(self):
if self.action in ["metadata", "metadata_latest"]:
return ResourceMetadataSerializer
@@ -2017,7 +2032,11 @@ class ResourceViewSet(PaginateByPkMixin, BaseRLSViewSet):
filtered_queryset,
manager=Resource.all_objects,
select_related=["provider"],
prefetch_related=["findings"],
prefetch_related=(
self._get_findings_prefetch()
if self._should_prefetch_findings()
else []
),
)
def retrieve(self, request, *args, **kwargs):
@@ -2042,14 +2061,18 @@ class ResourceViewSet(PaginateByPkMixin, BaseRLSViewSet):
tenant_id = request.tenant_id
filtered_queryset = self.filter_queryset(self.get_queryset())
latest_scan_ids = (
Scan.all_objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
latest_scans = (
Scan.all_objects.filter(
tenant_id=tenant_id,
state=StateChoices.COMPLETED,
)
.order_by("provider_id", "-inserted_at")
.distinct("provider_id")
.values_list("id", flat=True)
.values("provider_id")
)
filtered_queryset = filtered_queryset.filter(
tenant_id=tenant_id, provider__scan__in=latest_scan_ids
provider_id__in=Subquery(latest_scans)
)
return self.paginate_by_pk(
@@ -2057,7 +2080,11 @@ class ResourceViewSet(PaginateByPkMixin, BaseRLSViewSet):
filtered_queryset,
manager=Resource.all_objects,
select_related=["provider"],
prefetch_related=["findings"],
prefetch_related=(
self._get_findings_prefetch()
if self._should_prefetch_findings()
else []
),
)
@action(detail=False, methods=["get"], url_name="metadata")
+4
View File
@@ -20,6 +20,7 @@ from prowler.lib.outputs.compliance.aws_well_architected.aws_well_architected im
from prowler.lib.outputs.compliance.cis.cis_aws import AWSCIS
from prowler.lib.outputs.compliance.cis.cis_azure import AzureCIS
from prowler.lib.outputs.compliance.cis.cis_gcp import GCPCIS
from prowler.lib.outputs.compliance.cis.cis_github import GithubCIS
from prowler.lib.outputs.compliance.cis.cis_kubernetes import KubernetesCIS
from prowler.lib.outputs.compliance.cis.cis_m365 import M365CIS
from prowler.lib.outputs.compliance.ens.ens_aws import AWSENS
@@ -93,6 +94,9 @@ COMPLIANCE_CLASS_MAP = {
(lambda name: name == "prowler_threatscore_m365", ProwlerThreatScoreM365),
(lambda name: name.startswith("iso27001_"), M365ISO27001),
],
"github": [
(lambda name: name.startswith("cis_"), GithubCIS),
],
}
@@ -0,0 +1,234 @@
from locust import events, task
from utils.config import (
L_PROVIDER_NAME,
M_PROVIDER_NAME,
RESOURCES_UI_FIELDS,
S_PROVIDER_NAME,
TARGET_INSERTED_AT,
)
from utils.helpers import (
APIUserBase,
get_api_token,
get_auth_headers,
get_dynamic_filters_pairs,
get_next_resource_filter,
get_scan_id_from_provider_name,
)
GLOBAL = {
"token": None,
"scan_ids": {},
"resource_filters": None,
"large_resource_filters": None,
}
@events.test_start.add_listener
def on_test_start(environment, **kwargs):
GLOBAL["token"] = get_api_token(environment.host)
GLOBAL["scan_ids"]["small"] = get_scan_id_from_provider_name(
environment.host, GLOBAL["token"], S_PROVIDER_NAME
)
GLOBAL["scan_ids"]["medium"] = get_scan_id_from_provider_name(
environment.host, GLOBAL["token"], M_PROVIDER_NAME
)
GLOBAL["scan_ids"]["large"] = get_scan_id_from_provider_name(
environment.host, GLOBAL["token"], L_PROVIDER_NAME
)
GLOBAL["resource_filters"] = get_dynamic_filters_pairs(
environment.host, GLOBAL["token"], "resources"
)
GLOBAL["large_resource_filters"] = get_dynamic_filters_pairs(
environment.host, GLOBAL["token"], "resources", GLOBAL["scan_ids"]["large"]
)
class APIUser(APIUserBase):
def on_start(self):
self.token = GLOBAL["token"]
self.s_scan_id = GLOBAL["scan_ids"]["small"]
self.m_scan_id = GLOBAL["scan_ids"]["medium"]
self.l_scan_id = GLOBAL["scan_ids"]["large"]
self.available_resource_filters = GLOBAL["resource_filters"]
self.available_resource_filters_large_scan = GLOBAL["large_resource_filters"]
@task
def resources_default(self):
name = "/resources"
page_number = self._next_page(name)
endpoint = (
f"/resources?page[number]={page_number}"
f"&filter[updated_at]={TARGET_INSERTED_AT}"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task(3)
def resources_default_ui_fields(self):
name = "/resources?fields"
page_number = self._next_page(name)
endpoint = (
f"/resources?page[number]={page_number}"
f"&fields[resources]={','.join(RESOURCES_UI_FIELDS)}"
f"&filter[updated_at]={TARGET_INSERTED_AT}"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task(3)
def resources_default_include(self):
name = "/resources?include"
page = self._next_page(name)
endpoint = (
f"/resources?page[number]={page}"
f"&filter[updated_at]={TARGET_INSERTED_AT}"
f"&include=provider"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task(3)
def resources_metadata(self):
name = "/resources/metadata"
endpoint = f"/resources/metadata?filter[updated_at]={TARGET_INSERTED_AT}"
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task
def resources_scan_small(self):
name = "/resources?filter[scan_id] - 50k"
page_number = self._next_page(name)
endpoint = (
f"/resources?page[number]={page_number}" f"&filter[scan]={self.s_scan_id}"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task
def resources_metadata_scan_small(self):
name = "/resources/metadata?filter[scan_id] - 50k"
endpoint = f"/resources/metadata?&filter[scan]={self.s_scan_id}"
self.client.get(
endpoint,
headers=get_auth_headers(self.token),
name=name,
)
@task(2)
def resources_scan_medium(self):
name = "/resources?filter[scan_id] - 250k"
page_number = self._next_page(name)
endpoint = (
f"/resources?page[number]={page_number}" f"&filter[scan]={self.m_scan_id}"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task
def resources_metadata_scan_medium(self):
name = "/resources/metadata?filter[scan_id] - 250k"
endpoint = f"/resources/metadata?&filter[scan]={self.m_scan_id}"
self.client.get(
endpoint,
headers=get_auth_headers(self.token),
name=name,
)
@task
def resources_scan_large(self):
name = "/resources?filter[scan_id] - 500k"
page_number = self._next_page(name)
endpoint = (
f"/resources?page[number]={page_number}" f"&filter[scan]={self.l_scan_id}"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task
def resources_scan_large_include(self):
name = "/resources?filter[scan_id]&include - 500k"
page_number = self._next_page(name)
endpoint = (
f"/resources?page[number]={page_number}"
f"&filter[scan]={self.l_scan_id}"
f"&include=provider"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task
def resources_metadata_scan_large(self):
endpoint = f"/resources/metadata?&filter[scan]={self.l_scan_id}"
self.client.get(
endpoint,
headers=get_auth_headers(self.token),
name="/resources/metadata?filter[scan_id] - 500k",
)
@task(2)
def resources_filters(self):
name = "/resources?filter[resource_filter]&include"
filter_name, filter_value = get_next_resource_filter(
self.available_resource_filters
)
endpoint = (
f"/resources?filter[{filter_name}]={filter_value}"
f"&filter[updated_at]={TARGET_INSERTED_AT}"
f"&include=provider"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task(3)
def resources_metadata_filters(self):
name = "/resources/metadata?filter[resource_filter]"
filter_name, filter_value = get_next_resource_filter(
self.available_resource_filters
)
endpoint = (
f"/resources/metadata?filter[{filter_name}]={filter_value}"
f"&filter[updated_at]={TARGET_INSERTED_AT}"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task(3)
def resources_metadata_filters_scan_large(self):
name = "/resources/metadata?filter[resource_filter]&filter[scan_id] - 500k"
filter_name, filter_value = get_next_resource_filter(
self.available_resource_filters
)
endpoint = (
f"/resources/metadata?filter[{filter_name}]={filter_value}"
f"&filter[scan]={self.l_scan_id}"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task(2)
def resourcess_filter_large_scan_include(self):
name = "/resources?filter[resource_filter][scan]&include - 500k"
filter_name, filter_value = get_next_resource_filter(
self.available_resource_filters
)
endpoint = (
f"/resources?filter[{filter_name}]={filter_value}"
f"&filter[scan]={self.l_scan_id}"
f"&include=provider"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task(3)
def resources_latest_default_ui_fields(self):
name = "/resources/latest?fields"
page_number = self._next_page(name)
endpoint = (
f"/resources/latest?page[number]={page_number}"
f"&fields[resources]={','.join(RESOURCES_UI_FIELDS)}"
)
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
@task(3)
def resources_latest_metadata_filters(self):
name = "/resources/metadata/latest?filter[resource_filter]"
filter_name, filter_value = get_next_resource_filter(
self.available_resource_filters
)
endpoint = f"/resources/metadata/latest?filter[{filter_name}]={filter_value}"
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
+17
View File
@@ -13,6 +13,23 @@ FINDINGS_RESOURCE_METADATA = {
"resource_types": "resource_type",
"services": "service",
}
RESOURCE_METADATA = {
"regions": "region",
"types": "type",
"services": "service",
}
RESOURCES_UI_FIELDS = [
"name",
"failed_findings_count",
"region",
"service",
"type",
"provider",
"inserted_at",
"updated_at",
"uid",
]
S_PROVIDER_NAME = "provider-50k"
M_PROVIDER_NAME = "provider-250k"
+16 -6
View File
@@ -7,6 +7,7 @@ from locust import HttpUser, between
from utils.config import (
BASE_HEADERS,
FINDINGS_RESOURCE_METADATA,
RESOURCE_METADATA,
TARGET_INSERTED_AT,
USER_EMAIL,
USER_PASSWORD,
@@ -121,13 +122,16 @@ def get_scan_id_from_provider_name(host: str, token: str, provider_name: str) ->
return response.json()["data"][0]["id"]
def get_resource_filters_pairs(host: str, token: str, scan_id: str = "") -> dict:
def get_dynamic_filters_pairs(
host: str, token: str, endpoint: str, scan_id: str = ""
) -> dict:
"""
Retrieves and maps resource metadata filter values from the findings endpoint.
Retrieves and maps metadata filter values from a given endpoint.
Args:
host (str): The host URL of the API.
token (str): Bearer token for authentication.
endpoint (str): The API endpoint to query for metadata.
scan_id (str, optional): Optional scan ID to filter metadata. Defaults to using inserted_at timestamp.
Returns:
@@ -136,22 +140,28 @@ def get_resource_filters_pairs(host: str, token: str, scan_id: str = "") -> dict
Raises:
AssertionError: If the request fails or does not return a 200 status code.
"""
metadata_mapping = (
FINDINGS_RESOURCE_METADATA if endpoint == "findings" else RESOURCE_METADATA
)
date_filter = "inserted_at" if endpoint == "findings" else "updated_at"
metadata_filters = (
f"filter[scan]={scan_id}"
if scan_id
else f"filter[inserted_at]={TARGET_INSERTED_AT}"
else f"filter[{date_filter}]={TARGET_INSERTED_AT}"
)
response = requests.get(
f"{host}/findings/metadata?{metadata_filters}", headers=get_auth_headers(token)
f"{host}/{endpoint}/metadata?{metadata_filters}",
headers=get_auth_headers(token),
)
assert (
response.status_code == 200
), f"Failed to get resource filters values: {response.text}"
attributes = response.json()["data"]["attributes"]
return {
FINDINGS_RESOURCE_METADATA[key]: values
metadata_mapping[key]: values
for key, values in attributes.items()
if key in FINDINGS_RESOURCE_METADATA.keys()
if key in metadata_mapping.keys()
}
+5 -4
View File
@@ -23,6 +23,7 @@ import argparse
import json
import os
import re
import shlex
import signal
import socket
import subprocess
@@ -145,11 +146,11 @@ def _get_script_arguments():
def _run_prowler(prowler_args):
_debug("Running prowler with args: {0}".format(prowler_args), 1)
_prowler_command = "{prowler}/prowler {args}".format(
prowler=PATH_TO_PROWLER, args=prowler_args
_prowler_command = shlex.split(
"{prowler}/prowler {args}".format(prowler=PATH_TO_PROWLER, args=prowler_args)
)
_debug("Running command: {0}".format(_prowler_command), 2)
_process = subprocess.Popen(_prowler_command, stdout=subprocess.PIPE, shell=True)
_debug("Running command: {0}".format(" ".join(_prowler_command)), 2)
_process = subprocess.Popen(_prowler_command, stdout=subprocess.PIPE)
_output, _error = _process.communicate()
_debug("Raw prowler output: {0}".format(_output), 3)
_debug("Raw prowler error: {0}".format(_error), 3)
+25
View File
@@ -0,0 +1,25 @@
import warnings
from dashboard.common_methods import get_section_containers_cis
warnings.filterwarnings("ignore")
def get_table(data):
aux = data[
[
"REQUIREMENTS_ID",
"REQUIREMENTS_DESCRIPTION",
"REQUIREMENTS_ATTRIBUTES_SECTION",
"CHECKID",
"STATUS",
"REGION",
"ACCOUNTID",
"RESOURCEID",
]
].copy()
return get_section_containers_cis(
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
)
+1 -2
View File
@@ -109,13 +109,12 @@ Prowler will follow the same credentials search as [Google authentication librar
Prowler for Google Cloud needs the following permissions to be set:
- **Viewer (`roles/viewer`) IAM role**: granted at the project / folder / org level in order to scan the target projects
- **Reader (`roles/reader`) IAM role**: granted at the project / folder / org level in order to scan the target projects
- **Project level settings**: you need to have at least one project with the below settings:
- Identity and Access Management (IAM) API (`iam.googleapis.com`) enabled by either using the
[Google Cloud API UI](https://console.cloud.google.com/apis/api/iam.googleapis.com/metrics) or
by using the gcloud CLI `gcloud services enable iam.googleapis.com --project <your-project-id>` command
- Service Usage Consumer (`roles/serviceusage.serviceUsageConsumer`) IAM role
- Set the quota project to be this project by either running `gcloud auth application-default set-quota-project <project-id>` or by setting an environment variable:
`export GOOGLE_CLOUD_QUOTA_PROJECT=<project-id>`
+45
View File
@@ -312,6 +312,51 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/),
prowler azure --az-cli-auth
```
### Prowler App Update
You have two options to upgrade your Prowler App installation:
#### Option 1: Change env file with the following values
Edit your `.env` file and change the version values:
```env
PROWLER_UI_VERSION="5.9.0"
PROWLER_API_VERSION="5.9.0"
```
#### Option 2: Run the following command
```bash
docker compose pull --policy always
```
The `--policy always` flag ensures that Docker pulls the latest images even if they already exist locally.
???+ note "What Gets Preserved During Upgrade"
Everything is preserved, nothing will be deleted after the update.
#### Troubleshooting
If containers don't start, check logs for errors:
```bash
# Check logs for errors
docker compose logs
# Verify image versions
docker images | grep prowler
```
If you encounter issues, you can rollback to the previous version by changing the `.env` file back to your previous version and running:
```bash
docker compose pull
docker compose up -d
```
## Prowler container versions
The available versions of Prowler CLI are the following:
+12 -1
View File
@@ -78,7 +78,8 @@ The following list includes all the Azure checks with configurable variables tha
| `app_ensure_python_version_is_latest` | `python_latest_version` | String |
| `app_ensure_java_version_is_latest` | `java_latest_version` | String |
| `sqlserver_recommended_minimal_tls_version` | `recommended_minimal_tls_versions` | List of Strings |
| `defender_attack_path_notifications_properly_configured` | `defender_attack_path_minimal_risk_level` | String |
| `vm_desired_sku_size` | `desired_vm_sku_sizes` | List of Strings |
| `defender_attack_path_notifications_properly_configured` | `defender_attack_path_minimal_risk_level` | String |
## GCP
@@ -481,6 +482,16 @@ azure:
"1.3"
]
# Azure Virtual Machines
# azure.vm_desired_sku_size
# List of desired VM SKU sizes that are allowed in the organization
desired_vm_sku_sizes:
[
"Standard_A8_v2",
"Standard_DS3_v2",
"Standard_D4s_v3",
]
# GCP Configuration
gcp:
# GCP Compute Configuration
+2 -3
View File
@@ -51,7 +51,7 @@ Prowler follows the same search order as [Google authentication libraries](https
???+ note
The credentials must belong to a user or service account with the necessary permissions.
To ensure full access, assign the roles/viewer IAM role to the identity being used.
To ensure full access, assign the roles/reader IAM role to the identity being used.
???+ note
Prowler will use the enabled Google Cloud APIs to get the information needed to perform the checks.
@@ -63,13 +63,12 @@ Prowler follows the same search order as [Google authentication libraries](https
Prowler for Google Cloud needs the following permissions to be set:
- **Viewer (`roles/viewer`) IAM role**: granted at the project / folder / org level in order to scan the target projects
- **Reader (`roles/reader`) IAM role**: granted at the project / folder / org level in order to scan the target projects
- **Project level settings**: you need to have at least one project with the below settings:
- Identity and Access Management (IAM) API (`iam.googleapis.com`) enabled by either using the
[Google Cloud API UI](https://console.cloud.google.com/apis/api/iam.googleapis.com/metrics) or
by using the gcloud CLI `gcloud services enable iam.googleapis.com --project <your-project-id>` command
- Service Usage Consumer (`roles/serviceusage.serviceUsageConsumer`) IAM role
- Set the quota project to be this project by either running `gcloud auth application-default set-quota-project <project-id>` or by setting an environment variable:
`export GOOGLE_CLOUD_QUOTA_PROJECT=<project-id>`
+1 -1
View File
@@ -9,7 +9,7 @@ prowler gcp --organization-id organization-id
```
???+ warning
Make sure that the used credentials have the role Cloud Asset Viewer (`roles/cloudasset.viewer`) or Cloud Asset Owner (`roles/cloudasset.owner`) on the organization level.
Make sure that the used credentials have a role with the `cloudasset.assets.listResource` permission on the organization level like `roles/cloudasset.viewer` (Cloud Asset Viewer) or `roles/cloudasset.owner` (Cloud Asset Owner).
???+ note
With this option, Prowler retrieves all projects within the specified organization, including those organized in folders and nested subfolders. This ensures that every project under the organizations hierarchy is scanned, providing full visibility across the entire organization.
@@ -1,6 +1,6 @@
# Getting Started with GitHub Authentication
# Getting Started with GitHub
This guide explains how to set up authentication with GitHub for Prowler. The documentation covers credential retrieval processes for each supported authentication method.
This guide explains how to set up authentication with GitHub for Prowler. Learn about different authentication methods, security best practices, and troubleshooting common issues.
## Prerequisites
@@ -156,19 +156,21 @@ To grant the permissions for the PowerShell modules via application authenticati
![Exchange.ManageAsApp Permission](./img/exchange-permission.png)
You also need to assign the `Exchange Administrator` role to the app. For that go to `Roles and administrators` and in the `Administrative roles` section click `here` to go to the directory level assignment:
You also need to assign the `Global Reader` role to the app. For that go to `Roles and administrators` and in the `Administrative roles` section click `here` to go to the directory level assignment:
![Roles and administrators](./img/here.png)
Once in the directory level assignment, search for `Exchange Administrator` and click on it to open the assginments page of that role.
Once in the directory level assignment, search for `Global Reader` and click on it to open the assginments page of that role.
![Exchange Administrator Role](./img/exchange-administrator-role.png)
![Global Reader Role](./img/global-reader-role.png)
Click on `Add assignments`, search for your app and click on `Assign`.
You have to select it as `Active` and click on `Assign` to assign the role to the app.
![Assign Exchange Administrator Role](./img/assign-exchange-administrator-role.png)
![Assign Global Reader Role](./img/assign-global-reader-role.png)
For more information about the need of adding this role, see [Microsoft documentation](https://learn.microsoft.com/en-us/powershell/exchange/app-only-auth-powershell-v2?view=exchange-ps#step-5-assign-microsoft-entra-roles-to-the-application). You can select any other role of the specified.
2. Add Teams API:
Binary file not shown.

Before

Width:  |  Height:  |  Size: 142 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 111 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

+33 -6
View File
@@ -2,6 +2,39 @@
All notable changes to the **Prowler SDK** are documented in this file.
## [v5.10.0] (Prowler UNRELEASED)
### Added
- `bedrock_api_key_no_administrative_privileges` check for AWS provider [(#8321)](https://github.com/prowler-cloud/prowler/pull/8321)
- Support App Key Content in GitHub provider [(#8271)](https://github.com/prowler-cloud/prowler/pull/8271)
- CIS 4.0 for the Azure provider [(#7782)](https://github.com/prowler-cloud/prowler/pull/7782)
- `vm_desired_sku_size` check for Azure provider [(#8191)](https://github.com/prowler-cloud/prowler/pull/8191)
- `vm_scaleset_not_empty` check for Azure provider [(#8192)](https://github.com/prowler-cloud/prowler/pull/8192)
### Changed
- Handle some AWS errors as warnings instead of errors [(#8347)](https://github.com/prowler-cloud/prowler/pull/8347)
### Fixed
- False positives in SQS encryption check for ephemeral queues [(#8330)](https://github.com/prowler-cloud/prowler/pull/8330)
- Add protocol validation check in security group checks to ensure proper protocol matching [(#8374)](https://github.com/prowler-cloud/prowler/pull/8374)
---
## [v5.9.3] (Prowler UNRELEASED)
### Fixed
- Add more validations to Azure Storage models when some values are None to avoid serialization issues [(#8325)](https://github.com/prowler-cloud/prowler/pull/8325)
- `sns_topics_not_publicly_accessible` false positive with `aws:SourceArn` conditions [(#8326)](https://github.com/prowler-cloud/prowler/issues/8326)
---
## [v5.9.2] (Prowler v5.9.2)
### Fixed
- Use the correct resource name in `defender_domain_dkim_enabled` check [(#8334)](https://github.com/prowler-cloud/prowler/pull/8334)
---
## [v5.9.0] (Prowler v5.9.0)
### Added
@@ -32,12 +65,6 @@ All notable changes to the **Prowler SDK** are documented in this file.
- Update `entra_users_mfa_capable` check to use the correct resource name and ID [(#8288)](https://github.com/prowler-cloud/prowler/pull/8288)
- Handle multiple services and severities while listing checks [(#8302)](https://github.com/prowler-cloud/prowler/pull/8302)
- Handle `tenant_id` for M365 Mutelist [(#8306)](https://github.com/prowler-cloud/prowler/pull/8306)
---
## [v5.8.2] (Prowler 5.8.2)
### Fixed
- Fix error in Dashboard Overview page when reading CSV files [(#8257)](https://github.com/prowler-cloud/prowler/pull/8257)
---
File diff suppressed because one or more lines are too long
+10
View File
@@ -450,6 +450,16 @@ azure:
"1.3",
]
# Azure Virtual Machines
# azure.vm_desired_sku_size
# List of desired VM SKU sizes that are allowed in the organization
desired_vm_sku_sizes:
[
"Standard_A8_v2",
"Standard_DS3_v2",
"Standard_D4s_v3",
]
# GCP Configuration
gcp:
# GCP Compute Configuration
+2
View File
@@ -353,6 +353,8 @@ class Finding(BaseModel):
finding.region = resource.region
# Azure, GCP specified field
finding.location = resource.region
# GitHub specified field
finding.owner = resource.region
# K8s specified field
if provider.type == "kubernetes":
finding.namespace = resource.region.removeprefix("namespace: ")
@@ -1374,6 +1374,7 @@
"sa-east-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2"
],
"aws-cn": [],
@@ -1405,6 +1406,7 @@
"sa-east-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2"
],
"aws-cn": [],
@@ -1417,6 +1419,11 @@
"bedrock-data-automation": {
"regions": {
"aws": [
"ap-south-1",
"ap-southeast-2",
"eu-central-1",
"eu-west-1",
"eu-west-2",
"us-east-1",
"us-west-2"
],
@@ -2490,6 +2497,7 @@
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ap-southeast-7",
"ca-central-1",
"ca-west-1",
"eu-central-1",
@@ -2503,6 +2511,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -2531,6 +2540,7 @@
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ap-southeast-7",
"ca-central-1",
"ca-west-1",
"eu-central-1",
@@ -2544,6 +2554,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -2574,6 +2585,7 @@
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ap-southeast-7",
"ca-central-1",
"ca-west-1",
"eu-central-1",
@@ -2587,6 +2599,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -2845,6 +2858,7 @@
"aws": [
"af-south-1",
"ap-east-1",
"ap-east-2",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
@@ -2888,6 +2902,7 @@
"aws": [
"af-south-1",
"ap-east-1",
"ap-east-2",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
@@ -5075,6 +5090,7 @@
"ap-southeast-2",
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ca-central-1",
"ca-west-1",
"eu-central-1",
@@ -5088,6 +5104,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -5670,6 +5687,38 @@
]
}
},
"iot-jobs-data": {
"regions": {
"aws": [
"ap-east-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-south-1",
"ap-southeast-1",
"ap-southeast-2",
"ca-central-1",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"me-south-1",
"sa-east-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2"
],
"aws-cn": [
"cn-north-1",
"cn-northwest-1"
],
"aws-us-gov": [
"us-gov-east-1",
"us-gov-west-1"
]
}
},
"iotanalytics": {
"regions": {
"aws": [
@@ -5994,6 +6043,7 @@
"aws": [
"af-south-1",
"ap-east-1",
"ap-east-2",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
@@ -6559,6 +6609,7 @@
"aws": [
"af-south-1",
"ap-east-1",
"ap-east-2",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
@@ -7396,6 +7447,8 @@
"ap-southeast-2",
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ap-southeast-7",
"ca-central-1",
"eu-central-1",
"eu-central-2",
@@ -7492,6 +7545,7 @@
"aws": [
"af-south-1",
"ap-east-1",
"ap-east-2",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
@@ -7720,18 +7774,24 @@
"ap-northeast-2",
"ap-northeast-3",
"ap-south-1",
"ap-south-2",
"ap-southeast-1",
"ap-southeast-2",
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ca-central-1",
"ca-west-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
"eu-south-1",
"eu-south-2",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"il-central-1",
"me-central-1",
"me-south-1",
"sa-east-1",
"us-east-1",
@@ -8181,6 +8241,7 @@
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ap-southeast-7",
"ca-central-1",
"ca-west-1",
"eu-central-1",
@@ -9540,7 +9601,9 @@
"ap-southeast-2",
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ca-central-1",
"ca-west-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
@@ -10090,6 +10153,7 @@
"aws": [
"af-south-1",
"ap-east-1",
"ap-east-2",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
@@ -0,0 +1,36 @@
{
"Provider": "aws",
"CheckID": "bedrock_api_key_no_administrative_privileges",
"CheckTitle": "Ensure Amazon Bedrock API keys do not have administrative privileges or privilege escalation",
"CheckType": [
"Software and Configuration Checks",
"Industry and Regulatory Standards"
],
"ServiceName": "bedrock",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:iam:region:account-id:user/{user-name}/credential/{api-key-id}",
"Severity": "high",
"ResourceType": "AwsIamServiceSpecificCredential",
"Description": "Ensure that Amazon Bedrock API keys do not have administrative privileges or privilege escalation capabilities. API keys with administrative privileges can perform any action on any resource in your AWS environment, while privilege escalation allows users to grant themselves additional permissions, both posing significant security risks.",
"Risk": "Amazon Bedrock API keys with administrative privileges can perform any action on any resource in your AWS environment. Privilege escalation capabilities allow users to grant themselves additional permissions beyond their intended scope. Both violations of the principle of least privilege can lead to security vulnerabilities, data leaks, data loss, or unexpected charges if the API key is compromised or misused.",
"RelatedUrl": "https://docs.aws.amazon.com/bedrock/latest/userguide/api-keys.html",
"Remediation": {
"Code": {
"CLI": "aws iam delete-service-specific-credential --user-name <username> --service-specific-credential-id <credential-id>",
"NativeIaC": "",
"Other": "",
"Terraform": ""
},
"Recommendation": {
"Text": "Apply the principle of least privilege to Amazon Bedrock API keys. Instead of granting administrative privileges or privilege escalation capabilities, assign only the permissions necessary for specific tasks. Create custom IAM policies with minimal permissions based on the principle of least privilege. Regularly review and audit API key permissions to ensure they cannot be used for privilege escalation.",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege"
}
},
"Categories": [
"gen-ai",
"trustboundaries"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "This check verifies that Amazon Bedrock API keys do not have administrative privileges or privilege escalation capabilities through attached IAM policies or inline policies. It follows the principle of least privilege to ensure API keys only have the minimum necessary permissions and cannot be used to escalate privileges."
}
@@ -0,0 +1,57 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.iam.iam_client import iam_client
from prowler.providers.aws.services.iam.lib.policy import (
check_admin_access,
check_full_service_access,
)
from prowler.providers.aws.services.iam.lib.privilege_escalation import (
check_privilege_escalation,
)
class bedrock_api_key_no_administrative_privileges(Check):
def execute(self):
findings = []
for api_key in iam_client.service_specific_credentials:
if api_key.service_name != "bedrock.amazonaws.com":
continue
report = Check_Report_AWS(metadata=self.metadata(), resource=api_key)
report.status = "PASS"
report.status_extended = f"API key {api_key.id} in user {api_key.user.name} has no administrative privileges."
for policy in api_key.user.attached_policies:
policy_arn = policy["PolicyArn"]
if policy_arn in iam_client.policies:
policy_document = iam_client.policies[policy_arn].document
if policy_document:
if check_admin_access(policy_document):
report.status = "FAIL"
report.status_extended = f"API key {api_key.id} in user {api_key.user.name} has administrative privileges through attached policy {policy['PolicyName']}."
break
elif check_privilege_escalation(policy_document):
report.status = "FAIL"
report.status_extended = f"API key {api_key.id} in user {api_key.user.name} has privilege escalation through attached policy {policy['PolicyName']}."
break
elif check_full_service_access("bedrock", policy_document):
report.status = "FAIL"
report.status_extended = f"API key {api_key.id} in user {api_key.user.name} has full service access through attached policy {policy['PolicyName']}."
break
for inline_policy_name in api_key.user.inline_policies:
inline_policy_arn = f"{api_key.user.arn}:policy/{inline_policy_name}"
if inline_policy_arn in iam_client.policies:
policy_document = iam_client.policies[inline_policy_arn].document
if policy_document:
if check_admin_access(policy_document):
report.status = "FAIL"
report.status_extended = f"API key {api_key.id} in user {api_key.user.name} has administrative privileges through inline policy {inline_policy_name}."
break
elif check_privilege_escalation(policy_document):
report.status = "FAIL"
report.status_extended = f"API key {api_key.id} in user {api_key.user.name} has privilege escalation through inline policy {inline_policy_name}."
break
elif check_full_service_access("bedrock", policy_document):
report.status = "FAIL"
report.status_extended = f"API key {api_key.id} in user {api_key.user.name} has full service access through inline policy {inline_policy_name}."
break
findings.append(report)
return findings
@@ -45,6 +45,13 @@ def check_security_group(
if _is_cidr_public(ip_ingress_rule["CidrIpv6"], any_address):
return True
if (
ingress_rule["IpProtocol"] != "-1"
and protocol != "-1"
and ingress_rule["IpProtocol"] != protocol
):
return False
# Check for specific ports in ingress rules
if "FromPort" in ingress_rule:
# If there is a port range
@@ -1,5 +1,6 @@
from typing import Optional
from botocore.client import ClientError
from pydantic.v1 import BaseModel
from prowler.lib.logger import logger
@@ -71,6 +72,17 @@ class ElasticBeanstalk(AWSService):
and option["OptionName"] == "StreamLogs"
):
environment.cloudwatch_stream_logs = option.get("Value", "false")
except ClientError as error:
if error.response["Error"]["Code"] in [
"InvalidParameterValue",
]:
logger.warning(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -84,10 +96,17 @@ class ElasticBeanstalk(AWSService):
"ResourceTags"
]
resource.tags = response
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except ClientError as error:
if error.response["Error"]["Code"] in [
"ResourceNotFoundException",
]:
logger.warning(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -39,7 +39,7 @@ class emr_cluster_publicly_accesible(Check):
for sg in ec2_client.security_groups.values():
if sg.id == master_sg:
for ingress_rule in sg.ingress_rules:
if check_security_group(ingress_rule, -1):
if check_security_group(ingress_rule, "-1"):
master_sg_public = True
break
if master_sg_public:
@@ -61,7 +61,7 @@ class emr_cluster_publicly_accesible(Check):
for sg in ec2_client.security_groups.values():
if sg.id == slave_sg:
for ingress_rule in sg.ingress_rules:
if check_security_group(ingress_rule, -1):
if check_security_group(ingress_rule, "-1"):
slave_sg_public = True
break
if slave_sg_public:
@@ -6,7 +6,7 @@ from prowler.providers.aws.services.iam.lib.policy import check_admin_access
class iam_aws_attached_policy_no_administrative_privileges(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for policy in iam_client.policies:
for policy in iam_client.policies.values():
# Check only for attached AWS policies
if policy.attached and policy.type == "AWS":
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
@@ -6,7 +6,7 @@ from prowler.providers.aws.services.iam.lib.policy import check_admin_access
class iam_customer_attached_policy_no_administrative_privileges(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for policy in iam_client.policies:
for policy in iam_client.policies.values():
# Check only for attached custom policies
if policy.attached and policy.type == "Custom":
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
@@ -6,7 +6,7 @@ from prowler.providers.aws.services.iam.lib.policy import check_admin_access
class iam_customer_unattached_policy_no_administrative_privileges(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for policy in iam_client.policies:
for policy in iam_client.policies.values():
# Check only for cutomer unattached policies
if not policy.attached and policy.type == "Custom":
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
@@ -9,7 +9,7 @@ class iam_inline_policy_allows_privilege_escalation(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for policy in iam_client.policies:
for policy in iam_client.policies.values():
if policy.type == "Inline":
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
report.resource_id = f"{policy.entity}/{policy.name}"
@@ -6,7 +6,7 @@ from prowler.providers.aws.services.iam.lib.policy import check_admin_access
class iam_inline_policy_no_administrative_privileges(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for policy in iam_client.policies:
for policy in iam_client.policies.values():
if policy.type == "Inline":
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
report.region = iam_client.region
@@ -9,7 +9,7 @@ class iam_inline_policy_no_full_access_to_cloudtrail(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for policy in iam_client.policies:
for policy in iam_client.policies.values():
# Check only inline policies
if policy.type == "Inline":
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
@@ -9,7 +9,7 @@ class iam_inline_policy_no_full_access_to_kms(Check):
def execute(self):
findings = []
for policy in iam_client.policies:
for policy in iam_client.policies.values():
if policy.type == "Inline":
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
report.region = iam_client.region
@@ -13,7 +13,7 @@ class iam_no_custom_policy_permissive_role_assumption(Check):
return any("*" in r for r in resource)
return False
for policy in iam_client.policies:
for policy in iam_client.policies.values():
# Check only custom policies
if policy.type == "Custom":
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
@@ -9,7 +9,7 @@ class iam_policy_allows_privilege_escalation(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for policy in iam_client.policies:
for policy in iam_client.policies.values():
if policy.type == "Custom":
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
report.region = iam_client.region
@@ -8,7 +8,7 @@ critical_service = "cloudtrail"
class iam_policy_no_full_access_to_cloudtrail(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for policy in iam_client.policies:
for policy in iam_client.policies.values():
# Check only custom policies
if policy.type == "Custom":
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
@@ -8,7 +8,7 @@ critical_service = "kms"
class iam_policy_no_full_access_to_kms(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for policy in iam_client.policies:
for policy in iam_client.policies.values():
# Check only custom policies
if policy.type == "Custom":
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
@@ -77,13 +77,15 @@ class IAM(AWSService):
cloudshell_admin_policy_arn
)
# List both Customer (attached and unattached) and AWS Managed (only attached) policies
self.policies = []
self.policies.extend(self._list_policies("AWS"))
self.policies.extend(self._list_policies("Local"))
self.policies = {}
self.policies.update(self._list_policies("AWS"))
self.policies.update(self._list_policies("Local"))
self._list_policies_version(self.policies)
self._list_inline_user_policies()
self._list_inline_group_policies()
self._list_inline_role_policies()
self.service_specific_credentials = []
self._list_service_specific_credentials()
self.saml_providers = self._list_saml_providers()
self.server_certificates = self._list_server_certificates()
self.access_keys_metadata = {}
@@ -99,7 +101,7 @@ class IAM(AWSService):
self.__threading_call__(self._list_tags, self.roles)
self.__threading_call__(
self._list_tags,
[policy for policy in self.policies if policy.type == "Custom"],
[policy for policy in self.policies.values() if policy.type == "Custom"],
)
self.__threading_call__(self._list_tags, self.server_certificates)
if self.saml_providers is not None:
@@ -514,16 +516,15 @@ class IAM(AWSService):
UserName=user.name, PolicyName=policy
)
inline_user_policy_doc = inline_policy["PolicyDocument"]
self.policies.append(
Policy(
name=policy,
arn=user.arn,
entity=user.name,
type="Inline",
attached=True,
version_id="v1",
document=inline_user_policy_doc,
)
inline_user_policy_arn = f"{user.arn}:policy/{policy}"
self.policies[inline_user_policy_arn] = Policy(
name=policy,
arn=user.arn,
entity=user.name,
type="Inline",
attached=True,
version_id="v1",
document=inline_user_policy_doc,
)
except ClientError as error:
if error.response["Error"]["Code"] == "NoSuchEntity":
@@ -572,16 +573,15 @@ class IAM(AWSService):
GroupName=group.name, PolicyName=policy
)
inline_group_policy_doc = inline_policy["PolicyDocument"]
self.policies.append(
Policy(
name=policy,
arn=group.arn,
entity=group.name,
type="Inline",
attached=True,
version_id="v1",
document=inline_group_policy_doc,
)
inline_group_policy_arn = f"{group.arn}:policy/{policy}"
self.policies[inline_group_policy_arn] = Policy(
name=policy,
arn=group.arn,
entity=group.name,
type="Inline",
attached=True,
version_id="v1",
document=inline_group_policy_doc,
)
except ClientError as error:
if error.response["Error"]["Code"] == "NoSuchEntity":
@@ -633,16 +633,15 @@ class IAM(AWSService):
RoleName=role.name, PolicyName=policy
)
inline_role_policy_doc = inline_policy["PolicyDocument"]
self.policies.append(
Policy(
name=policy,
arn=role.arn,
entity=role.name,
type="Inline",
attached=True,
version_id="v1",
document=inline_role_policy_doc,
)
inline_role_policy_arn = f"{role.arn}:policy/{policy}"
self.policies[inline_role_policy_arn] = Policy(
name=policy,
arn=role.arn,
entity=role.name,
type="Inline",
attached=True,
version_id="v1",
document=inline_role_policy_doc,
)
except ClientError as error:
if error.response["Error"]["Code"] == "NoSuchEntity":
@@ -742,7 +741,7 @@ class IAM(AWSService):
def _list_policies(self, scope):
logger.info("IAM - List Policies...")
try:
policies = []
policies = {}
list_policies_paginator = self.client.get_paginator("list_policies")
for page in list_policies_paginator.paginate(
Scope=scope, OnlyAttached=False if scope == "Local" else True
@@ -751,17 +750,13 @@ class IAM(AWSService):
if not self.audit_resources or (
is_resource_filtered(policy["Arn"], self.audit_resources)
):
policies.append(
Policy(
name=policy["PolicyName"],
arn=policy["Arn"],
entity=policy["PolicyId"],
version_id=policy["DefaultVersionId"],
type="Custom" if scope == "Local" else "AWS",
attached=(
True if policy["AttachmentCount"] > 0 else False
),
)
policies[policy["Arn"]] = Policy(
name=policy["PolicyName"],
arn=policy["Arn"],
entity=policy["PolicyId"],
version_id=policy["DefaultVersionId"],
type="Custom" if scope == "Local" else "AWS",
attached=(True if policy["AttachmentCount"] > 0 else False),
)
except Exception as error:
logger.error(
@@ -773,7 +768,7 @@ class IAM(AWSService):
def _list_policies_version(self, policies):
logger.info("IAM - List Policies Version...")
try:
for policy in policies:
for policy in policies.values():
try:
policy_version = self.client.get_policy_version(
PolicyArn=policy.arn, VersionId=policy.version_id
@@ -870,7 +865,10 @@ class IAM(AWSService):
SAMLProviderArn=resource.arn
).get("Tags", [])
except Exception as error:
if error.response["Error"]["Code"] == "NoSuchEntityException":
if error.response["Error"]["Code"] in [
"NoSuchEntity",
"NoSuchEntityException",
]:
logger.warning(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
@@ -1019,6 +1017,43 @@ class IAM(AWSService):
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def _list_service_specific_credentials(self):
logger.info("IAM - List Service Specific Credentials...")
try:
for user in self.users:
service_specific_credentials = (
self.client.list_service_specific_credentials(UserName=user.name)
)
for credential in service_specific_credentials.get(
"ServiceSpecificCredentials", []
):
credential["Arn"] = (
f"arn:{self.audited_partition}:iam:{self.region}:{self.audited_account}:user/{user.name}/credential/{credential['ServiceSpecificCredentialId']}"
)
if not self.audit_resources or (
is_resource_filtered(credential["Arn"], self.audit_resources)
):
self.service_specific_credentials.append(
ServiceSpecificCredential(
arn=credential["Arn"],
user=user,
status=credential["Status"],
create_date=credential["CreateDate"],
service_user_name=credential.get("ServiceUserName"),
service_credential_alias=credential.get(
"ServiceCredentialAlias"
),
expiration_date=credential.get("ExpirationDate"),
id=credential.get("ServiceSpecificCredentialId"),
service_name=credential.get("ServiceName"),
region=self.region,
)
)
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
class MFADevice(BaseModel):
serial_number: str
@@ -1046,6 +1081,19 @@ class Role(BaseModel):
tags: Optional[list]
class ServiceSpecificCredential(BaseModel):
arn: str
user: User
status: str
create_date: datetime
service_user_name: Optional[str]
service_credential_alias: Optional[str]
expiration_date: Optional[datetime]
id: str
service_name: str
region: str
class Group(BaseModel):
name: str
arn: str
+191 -61
View File
@@ -223,6 +223,108 @@ def check_full_service_access(service: str, policy: dict) -> bool:
return all_target_service_actions.issubset(actions_allowed_on_all_resources)
def has_public_principal(statement: dict) -> bool:
"""
Check if a policy statement has a public principal.
Args:
statement (dict): IAM policy statement
Returns:
bool: True if the statement has a public principal, False otherwise
"""
principal = statement.get("Principal", "")
return (
"*" in principal
or "arn:aws:iam::*:root" in principal
or (
isinstance(principal, dict)
and (
"*" in principal.get("AWS", "")
or "arn:aws:iam::*:root" in principal.get("AWS", "")
or (
isinstance(principal.get("AWS"), list)
and (
"*" in principal["AWS"]
or "arn:aws:iam::*:root" in principal["AWS"]
)
)
or "*" in principal.get("CanonicalUser", "")
or "arn:aws:iam::*:root" in principal.get("CanonicalUser", "")
)
)
)
def has_restrictive_source_arn_condition(
statement: dict, source_account: str = ""
) -> bool:
"""
Check if a policy statement has a restrictive aws:SourceArn condition.
A SourceArn condition is considered restrictive if:
1. It doesn't contain overly permissive wildcards (like "*" or "arn:aws:s3:::*")
2. When source_account is provided, the ARN either contains no account field (like S3 buckets)
or contains the source_account
Args:
statement (dict): IAM policy statement
source_account (str): The account to check restrictions for (optional)
Returns:
bool: True if the statement has a restrictive aws:SourceArn condition, False otherwise
"""
if "Condition" not in statement:
return False
for condition_operator in statement["Condition"]:
for condition_key, condition_value in statement["Condition"][
condition_operator
].items():
if condition_key.lower() == "aws:sourcearn":
arn_values = (
condition_value
if isinstance(condition_value, list)
else [condition_value]
)
for arn_value in arn_values:
if (
arn_value == "*" # Global wildcard
or arn_value.count("*")
>= 3 # Too many wildcards (e.g., arn:aws:*:*:*:*)
or (
isinstance(arn_value, str)
and (
arn_value.endswith(
":::*"
) # Service-wide wildcard (e.g., arn:aws:s3:::*)
or arn_value.endswith(
":*"
) # Resource wildcard (e.g., arn:aws:sns:us-east-1:123456789012:*)
)
)
):
return False
if source_account:
arn_parts = arn_value.split(":")
if len(arn_parts) > 4 and arn_parts[4] and arn_parts[4] != "*":
if arn_parts[4].isdigit():
if source_account not in arn_value:
return False
else:
if arn_parts[4] != source_account:
return False
elif len(arn_parts) > 4 and arn_parts[4] == "*":
return False
# else: ARN doesn't contain account field (like S3 bucket), so it's restrictive
return True
return False
def is_condition_restricting_from_private_ip(condition_statement: dict) -> bool:
"""Check if the policy condition is coming from a private IP address.
@@ -303,61 +405,49 @@ def is_policy_public(
for statement in policy.get("Statement", []):
# Only check allow statements
if statement["Effect"] == "Allow":
has_public_access = has_public_principal(statement)
principal = statement.get("Principal", "")
if (
"*" in principal
or "arn:aws:iam::*:root" in principal
or (
isinstance(principal, dict)
and (
"*" in principal.get("AWS", "")
or "arn:aws:iam::*:root" in principal.get("AWS", "")
or (
isinstance(principal.get("AWS"), str)
and source_account
and not is_cross_account_allowed
and source_account not in principal.get("AWS", "")
)
or (
isinstance(principal.get("AWS"), list)
and (
"*" in principal["AWS"]
or "arn:aws:iam::*:root" in principal["AWS"]
or (
source_account
and not is_cross_account_allowed
and not any(
source_account in principal_aws
for principal_aws in principal["AWS"]
)
)
)
)
or "*" in principal.get("CanonicalUser", "")
or "arn:aws:iam::*:root"
in principal.get("CanonicalUser", "")
or check_cross_service_confused_deputy
and (
# Check if function can be invoked by other AWS services if check_cross_service_confused_deputy is True
(
".amazonaws.com" in principal.get("Service", "")
or ".amazon.com" in principal.get("Service", "")
or "*" in principal.get("Service", "")
)
and (
"secretsmanager.amazonaws.com"
not in principal.get(
"Service", ""
) # AWS ensures that resources called by SecretsManager are executed in the same AWS account
or "eks.amazonaws.com"
not in principal.get(
"Service", ""
) # AWS ensures that resources called by EKS are executed in the same AWS account
)
)
if not has_public_access and isinstance(principal, dict):
# Check for cross-account access when not allowed
if (
isinstance(principal.get("AWS"), str)
and source_account
and not is_cross_account_allowed
and source_account not in principal.get("AWS", "")
) or (
isinstance(principal.get("AWS"), list)
and source_account
and not is_cross_account_allowed
and not any(
source_account in principal_aws
for principal_aws in principal["AWS"]
)
)
) and (
):
has_public_access = True
# Check for cross-service confused deputy
if check_cross_service_confused_deputy and (
# Check if function can be invoked by other AWS services if check_cross_service_confused_deputy is True
(
".amazonaws.com" in principal.get("Service", "")
or ".amazon.com" in principal.get("Service", "")
or "*" in principal.get("Service", "")
)
and (
"secretsmanager.amazonaws.com"
not in principal.get(
"Service", ""
) # AWS ensures that resources called by SecretsManager are executed in the same AWS account
or "eks.amazonaws.com"
not in principal.get(
"Service", ""
) # AWS ensures that resources called by EKS are executed in the same AWS account
)
):
has_public_access = True
if has_public_access and (
not not_allowed_actions # If not_allowed_actions is empty, the function will not consider the actions in the policy
or (
statement.get(
@@ -498,9 +588,29 @@ def is_condition_block_restrictive(
"aws:sourcevpc" != value
and "aws:sourcevpce" != value
):
if source_account not in item:
is_condition_key_restrictive = False
break
if value == "aws:sourcearn":
# Use the specialized function to properly validate SourceArn restrictions
# Create a minimal statement to test with our function
test_statement = {
"Condition": {
condition_operator: {
value: condition_statement[
condition_operator
][value]
}
}
}
is_condition_key_restrictive = (
has_restrictive_source_arn_condition(
test_statement, source_account
)
)
if not is_condition_key_restrictive:
break
else:
if source_account not in item:
is_condition_key_restrictive = False
break
if is_condition_key_restrictive:
is_condition_valid = True
@@ -516,11 +626,31 @@ def is_condition_block_restrictive(
if is_cross_account_allowed:
is_condition_valid = True
else:
if (
source_account
in condition_statement[condition_operator][value]
):
is_condition_valid = True
if value == "aws:sourcearn":
# Use the specialized function to properly validate SourceArn restrictions
# Create a minimal statement to test with our function
test_statement = {
"Condition": {
condition_operator: {
value: condition_statement[
condition_operator
][value]
}
}
}
is_condition_valid = (
has_restrictive_source_arn_condition(
test_statement, source_account
)
)
else:
if (
source_account
in condition_statement[condition_operator][
value
]
):
is_condition_valid = True
return is_condition_valid
@@ -2,6 +2,7 @@ import json
from datetime import datetime, timezone
from typing import Dict, List, Optional
from botocore.client import ClientError
from pydantic.v1 import BaseModel, Field
from prowler.lib.logger import logger
@@ -67,6 +68,21 @@ class SecretsManager(AWSService):
)
if secret_policy.get("ResourcePolicy"):
secret.policy = json.loads(secret_policy["ResourcePolicy"])
except ClientError as error:
if error.response["Error"]["Code"] in [
"ResourceNotFoundException",
]:
logger.warning(
f"{self.region} --"
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
f" {error}"
)
else:
logger.error(
f"{self.region} --"
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
f" {error}"
)
except Exception as error:
logger.error(
f"{self.region} --"
@@ -1,5 +1,7 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.iam.lib.policy import (
has_public_principal,
has_restrictive_source_arn_condition,
is_condition_block_restrictive,
is_condition_block_restrictive_organization,
is_condition_block_restrictive_sns_endpoint,
@@ -16,46 +18,26 @@ class sns_topics_not_publicly_accessible(Check):
report.status_extended = (
f"SNS topic {topic.name} is not publicly accessible."
)
if topic.policy:
for statement in topic.policy["Statement"]:
# Only check allow statements
if statement["Effect"] == "Allow":
if (
"*" in statement["Principal"]
or (
"AWS" in statement["Principal"]
and "*" in statement["Principal"]["AWS"]
if statement["Effect"] == "Allow" and has_public_principal(
statement
):
if has_restrictive_source_arn_condition(statement):
break
elif "Condition" in statement:
condition_account = is_condition_block_restrictive(
statement["Condition"], sns_client.audited_account
)
or (
"CanonicalUser" in statement["Principal"]
and "*" in statement["Principal"]["CanonicalUser"]
condition_org = is_condition_block_restrictive_organization(
statement["Condition"]
)
):
condition_account = False
condition_org = False
condition_endpoint = False
if (
"Condition" in statement
and is_condition_block_restrictive(
statement["Condition"],
sns_client.audited_account,
condition_endpoint = (
is_condition_block_restrictive_sns_endpoint(
statement["Condition"]
)
):
condition_account = True
if (
"Condition" in statement
and is_condition_block_restrictive_organization(
statement["Condition"],
)
):
condition_org = True
if (
"Condition" in statement
and is_condition_block_restrictive_sns_endpoint(
statement["Condition"],
)
):
condition_endpoint = True
)
if condition_account and condition_org:
report.status_extended = f"SNS topic {topic.name} is not public because its policy only allows access from the account {sns_client.audited_account} and an organization."
@@ -69,7 +51,11 @@ class sns_topics_not_publicly_accessible(Check):
report.status = "FAIL"
report.status_extended = f"SNS topic {topic.name} is public because its policy allows public access."
break
else:
# Public principal with no conditions = public
report.status = "FAIL"
report.status_extended = f"SNS topic {topic.name} is public because its policy allows public access."
break
findings.append(report)
return findings
@@ -51,6 +51,7 @@ class SQS(AWSService):
def _get_queue_attributes(self):
try:
logger.info("SQS - describing queue attributes...")
non_existing_queues = []
for queue in self.queues:
try:
regional_client = self.regional_clients[queue.region]
@@ -80,6 +81,7 @@ class SQS(AWSService):
logger.warning(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
non_existing_queues.append(queue)
else:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -88,6 +90,7 @@ class SQS(AWSService):
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
self.queues = [q for q in self.queues if q not in non_existing_queues]
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -99,6 +99,21 @@ class SSM(AWSService):
"AccountIds"
]
except ClientError as error:
if error.response["Error"]["Code"] in [
"InvalidDocumentOperation",
]:
logger.warning(
f"{regional_client.region} --"
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
f" {error}"
)
else:
logger.error(
f"{regional_client.region} --"
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
f" {error}"
)
except Exception as error:
logger.error(
f"{regional_client.region} --"
@@ -70,17 +70,44 @@ class Storage(AzureService):
],
key_expiration_period_in_days=key_expiration_period_in_days,
location=storage_account.location,
default_to_entra_authorization=getattr(
storage_account,
"default_to_o_auth_authentication",
False,
default_to_entra_authorization=(
False
if getattr(
storage_account,
"default_to_o_auth_authentication",
False,
)
is None
else getattr(
storage_account,
"default_to_o_auth_authentication",
False,
)
),
replication_settings=replication_settings,
allow_cross_tenant_replication=getattr(
storage_account, "allow_cross_tenant_replication", True
allow_cross_tenant_replication=(
True
if getattr(
storage_account,
"allow_cross_tenant_replication",
True,
)
is None
else getattr(
storage_account,
"allow_cross_tenant_replication",
True,
)
),
allow_shared_key_access=getattr(
storage_account, "allow_shared_key_access", True
allow_shared_key_access=(
True
if getattr(
storage_account, "allow_shared_key_access", True
)
is None
else getattr(
storage_account, "allow_shared_key_access", True
)
),
)
)
@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "vm_desired_sku_size",
"CheckTitle": "Ensure that your virtual machine instances are using SKU sizes that are approved by your organization",
"CheckType": [],
"ServiceName": "vm",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Microsoft.Compute/virtualMachines",
"Description": "Ensure that your virtual machine instances are using SKU sizes that are approved by your organization. This check requires configuration of the desired VM SKU sizes in the Prowler configuration file.",
"Risk": "Setting limits for the SKU size(s) of the virtual machine instances provisioned in your Microsoft Azure account can help you to manage better your cloud compute power, address internal compliance requirements and prevent unexpected charges on your Azure monthly bill. Without proper SKU size controls, organizations may face cost overruns and compliance violations.",
"RelatedUrl": "https://learn.microsoft.com/en-us/azure/virtual-machines/sizes/overview",
"Remediation": {
"Code": {
"CLI": "az policy assignment create --display-name 'Allowed VM SKU Sizes' --policy cccc23c7-8427-4f53-ad12-b6a63eb452b3 -p '{\"listOfAllowedSKUs\": {\"value\": [\"<desired-sku-1>\", \"<desired-sku-2>\"]}}' --scope /subscriptions/<subscription-id>",
"NativeIaC": "",
"Other": "",
"Terraform": ""
},
"Recommendation": {
"Text": "1. Define and document your organization's approved VM SKU sizes based on workload requirements, cost constraints, and compliance needs. 2. Implement Azure Policy to enforce VM size restrictions across your subscriptions. 3. Use the 'Allowed virtual machine size SKUs' built-in policy to restrict VM creation to approved sizes. 4. Regularly review and update your approved SKU list based on changing business requirements and cost optimization goals. 5. Monitor VM usage and costs to ensure compliance with your SKU size policies.",
"Url": "https://learn.microsoft.com/en-us/azure/virtual-machines/sizes/resize-vm"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "This check requires configuration of the desired VM SKU sizes in the Prowler configuration file. Configure the azure.desired_vm_sku_sizes list in your Prowler configuration file (see https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/configuration_file/) with the SKU sizes approved by your organization."
}
@@ -0,0 +1,49 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.vm.vm_client import vm_client
class vm_desired_sku_size(Check):
"""
Ensure that Azure virtual machines are using SKU sizes that are approved by your organization.
This check evaluates whether each virtual machine's SKU size is included in the organization's approved list of VM sizes.
The approved SKU sizes are configured in the Prowler configuration file under azure.desired_vm_sku_sizes.
- PASS: The VM is using a SKU size that is approved by the organization.
- FAIL: The VM is using a SKU size that is not approved by the organization.
"""
def execute(self) -> list[Check_Report_Azure]:
"""
Execute the check to verify that virtual machines are using desired SKU sizes.
Returns:
A list of check reports for each virtual machine
"""
findings = []
# Get the desired SKU sizes from configuration
DESIRED_SKU_SIZES = vm_client.audit_config.get(
"desired_vm_sku_sizes",
[
"Standard_A8_v2",
"Standard_DS3_v2",
"Standard_D4s_v3",
],
)
for subscription_name, vms in vm_client.virtual_machines.items():
for vm in vms.values():
report = Check_Report_Azure(metadata=self.metadata(), resource=vm)
report.subscription = subscription_name
if vm.vm_size in DESIRED_SKU_SIZES:
report.status = "PASS"
report.status_extended = f"VM {vm.resource_name} is using desired SKU size {vm.vm_size} in subscription {subscription_name}."
else:
report.status = "FAIL"
report.status_extended = f"VM {vm.resource_name} is using {vm.vm_size} which is not a desired SKU size in subscription {subscription_name}."
findings.append(report)
return findings
@@ -0,0 +1,30 @@
{
"Provider": "azure",
"CheckID": "vm_scaleset_not_empty",
"CheckTitle": "Check for Empty Virtual Machine Scale Sets",
"CheckType": [],
"ServiceName": "vm",
"SubServiceName": "scaleset",
"ResourceIdTemplate": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}",
"Severity": "low",
"ResourceType": "Microsoft.Compute/virtualMachineScaleSets",
"Description": "Identify and remove empty virtual machine scale sets from your Azure cloud account.",
"Risk": "Empty virtual machine scale sets may incur unnecessary costs and complicate cloud resource management, impacting cost optimization and compliance.",
"RelatedUrl": "https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview",
"Remediation": {
"Code": {
"CLI": "az vmss delete --name <scale-set-name> --resource-group <resource-group>",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/VirtualMachines/empty-vm-scale-sets.html",
"Terraform": ""
},
"Recommendation": {
"Text": "Remove empty Azure virtual machine scale sets to optimize costs and simplify management.",
"Url": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/VirtualMachines/empty-vm-scale-sets.html"
}
},
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
}
@@ -0,0 +1,29 @@
from prowler.lib.check.models import Check, Check_Report_Azure
from prowler.providers.azure.services.vm.vm_client import vm_client
class vm_scaleset_not_empty(Check):
"""
Ensure that Azure virtual machine scale sets are not empty (i.e., have no VM instances and no load balancer attached).
This check evaluates whether each VM scale set has zero VM instances and is not associated with any load balancer backend pool.
- PASS: The scale set has at least one VM instance or is associated with a load balancer backend pool.
- FAIL: The scale set has no VM instances and is not associated with any load balancer backend pool (i.e., it is empty).
"""
def execute(self):
findings = []
for subscription, scale_sets in vm_client.vm_scale_sets.items():
for scale_set in scale_sets.values():
report = Check_Report_Azure(
metadata=self.metadata(), resource=scale_set
)
report.subscription = subscription
if not scale_set.instance_ids:
report.status = "FAIL"
report.status_extended = f"Scale set '{scale_set.resource_name}' in subscription '{subscription}' is empty: no VM instances present."
else:
report.status = "PASS"
report.status_extended = f"Scale set '{scale_set.resource_name}' in subscription '{subscription}' has {len(scale_set.instance_ids)} VM instances."
findings.append(report)
return findings
@@ -75,6 +75,30 @@ class VirtualMachines(AzureService):
)
)
# Convert Azure SDK SecurityProfile to custom SecurityProfile dataclass
azure_security_profile = getattr(vm, "security_profile", None)
security_profile = None
if azure_security_profile:
uefi_settings = None
azure_uefi_settings = getattr(
azure_security_profile, "uefi_settings", None
)
if azure_uefi_settings:
uefi_settings = UefiSettings(
secure_boot_enabled=getattr(
azure_uefi_settings, "secure_boot_enabled", False
),
v_tpm_enabled=getattr(
azure_uefi_settings, "v_tpm_enabled", False
),
)
security_profile = SecurityProfile(
security_type=getattr(
azure_security_profile, "security_type", None
),
uefi_settings=uefi_settings,
)
virtual_machines[subscription_name].update(
{
vm.id: VirtualMachine(
@@ -103,8 +127,13 @@ class VirtualMachines(AzureService):
else None
),
location=vm.location,
security_profile=getattr(vm, "security_profile", None),
security_profile=security_profile,
extensions=extensions,
vm_size=getattr(
getattr(vm, "hardware_profile", None),
"vm_size",
None,
),
image_reference=getattr(
getattr(storage_profile, "image_reference", None),
"id",
@@ -205,12 +234,17 @@ class VirtualMachines(AzureService):
for pool in pools:
if getattr(pool, "id", None):
backend_pools.append(pool.id)
# Get instance IDs using the private method
instance_ids = self._get_vmss_instance_ids(
subscription_name, scale_set.id
)
vm_scale_sets[subscription_name][scale_set.id] = (
VirtualMachineScaleSet(
resource_id=scale_set.id,
resource_name=scale_set.name,
location=scale_set.location,
load_balancer_backend_pools=backend_pools,
instance_ids=instance_ids,
)
)
except Exception as error:
@@ -219,6 +253,46 @@ class VirtualMachines(AzureService):
)
return vm_scale_sets
def _get_vmss_instance_ids(
self, subscription_name: str, scale_set_id: str
) -> list[str]:
"""
Given a subscription and scale set ID, return the list of VM instance IDs in the scale set.
Args:
subscription_name: The name of the subscription.
scale_set_id: The ID of the scale set.
Returns:
A list of VM instance IDs that compose the scale set.
"""
logger.info(
f"VirtualMachines - Getting VM scale set instance IDs for {scale_set_id} in {subscription_name}..."
)
vm_instance_ids = []
client = self.clients.get(subscription_name, None)
try:
resource_id_parts = scale_set_id.split("/")
resource_group = ""
scale_set_name = ""
for i, part in enumerate(resource_id_parts):
if part.lower() == "resourcegroups" and i + 1 < len(resource_id_parts):
resource_group = resource_id_parts[i + 1]
if part.lower() == "virtualmachinescalesets" and i + 1 < len(
resource_id_parts
):
scale_set_name = resource_id_parts[i + 1]
if resource_group and scale_set_name:
instances = client.virtual_machine_scale_set_vms.list(
resource_group, scale_set_name
)
vm_instance_ids = [instance.instance_id for instance in instances]
except Exception as e:
logger.error(
f"Failed to list instances for scale set {scale_set_name} in {resource_group}: {e}"
)
return vm_instance_ids
@dataclass
class UefiSettings:
@@ -273,6 +347,7 @@ class VirtualMachine(BaseModel):
security_profile: Optional[SecurityProfile]
extensions: list[VirtualMachineExtension]
storage_profile: Optional[StorageProfile] = None
vm_size: Optional[str] = None
image_reference: Optional[str] = None
linux_configuration: Optional[LinuxConfiguration] = None
@@ -290,3 +365,4 @@ class VirtualMachineScaleSet(BaseModel):
resource_name: str
location: str
load_balancer_backend_pools: list[str]
instance_ids: list[str]
@@ -99,6 +99,7 @@ class GithubProvider(Provider):
personal_access_token: str = "",
oauth_app_token: str = "",
github_app_key: str = "",
github_app_key_content: str = "",
github_app_id: int = 0,
# Provider configuration
config_path: str = None,
@@ -114,6 +115,7 @@ class GithubProvider(Provider):
personal_access_token (str): GitHub personal access token.
oauth_app_token (str): GitHub OAuth App token.
github_app_key (str): GitHub App key.
github_app_key_content (str): GitHub App key content.
github_app_id (int): GitHub App ID.
config_path (str): Path to the audit configuration file.
config_content (dict): Audit configuration content.
@@ -128,6 +130,7 @@ class GithubProvider(Provider):
oauth_app_token,
github_app_id,
github_app_key,
github_app_key_content,
)
# Set the authentication method
@@ -26,7 +26,7 @@ class defender_domain_dkim_enabled(Check):
report = CheckReportM365(
metadata=self.metadata(),
resource=config,
resource_name="DKIM Configuration",
resource_name=config.id,
resource_id=config.id,
)
report.status = "FAIL"
+5
View File
@@ -321,6 +321,11 @@ config_azure = {
"python_latest_version": "3.12",
"java_latest_version": "17",
"recommended_minimal_tls_versions": ["1.2", "1.3"],
"desired_vm_sku_sizes": [
"Standard_A8_v2",
"Standard_DS3_v2",
"Standard_D4s_v3",
],
"defender_attack_path_minimal_risk_level": "High",
}
+10
View File
@@ -395,6 +395,16 @@ azure:
"1.3"
]
# Azure Virtual Machines
# azure.vm_desired_sku_size
# List of desired VM SKU sizes that are allowed in the organization
desired_vm_sku_sizes:
[
"Standard_A8_v2",
"Standard_DS3_v2",
"Standard_D4s_v3",
]
# GCP Configuration
gcp:
# GCP Compute Configuration
@@ -0,0 +1,256 @@
#!/usr/bin/env python
"""
Security test for prowler-wrapper.py command injection vulnerability
This test demonstrates the command injection vulnerability and validates the fix
"""
import os
import shutil
import sys
import tempfile
import unittest
from unittest.mock import MagicMock, patch
class TestProwlerWrapperSecurity(unittest.TestCase):
"""Test cases for command injection vulnerability in prowler-wrapper.py"""
def setUp(self):
"""Set up test environment"""
# Create a temporary directory for testing
self.test_dir = tempfile.mkdtemp()
self.prowler_wrapper_path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
),
"contrib",
"wazuh",
"prowler-wrapper.py",
)
def tearDown(self):
"""Clean up test environment"""
shutil.rmtree(self.test_dir, ignore_errors=True)
def _import_prowler_wrapper(self):
"""Helper to import prowler_wrapper with mocked WAZUH_PATH"""
sys.path.insert(0, os.path.dirname(self.prowler_wrapper_path))
# Mock the WAZUH_PATH that's read at module level
with patch("builtins.open", create=True) as mock_open:
mock_open.return_value.readline.return_value = 'DIRECTORY="/opt/wazuh"'
import importlib.util
spec = importlib.util.spec_from_file_location(
"prowler_wrapper", self.prowler_wrapper_path
)
prowler_wrapper = importlib.util.module_from_spec(spec)
spec.loader.exec_module(prowler_wrapper)
return prowler_wrapper._run_prowler
def test_command_injection_semicolon(self):
"""Test command injection using semicolon"""
# Create a test file that should not be created if injection is prevented
test_file = os.path.join(self.test_dir, "pwned.txt")
# Malicious profile that attempts to create a file
malicious_profile = f"test; touch {test_file}"
# Mock the subprocess.Popen to capture the command
with patch("subprocess.Popen") as mock_popen:
mock_process = MagicMock()
mock_process.communicate.return_value = (b"test output", None)
mock_popen.return_value = mock_process
# Import and run the vulnerable function
_run_prowler = self._import_prowler_wrapper()
# Run with malicious input
_run_prowler(f'-p "{malicious_profile}" -V')
# Check that Popen was called
self.assertTrue(mock_popen.called)
# Get the actual command that was passed to Popen
actual_command = mock_popen.call_args[0][0]
# With the fix, the command should be a list (from shlex.split)
# and should NOT have shell=True
self.assertIsInstance(
actual_command, list, "Command should be a list after shlex.split"
)
# Check that shell=True is not in the call
call_kwargs = mock_popen.call_args[1]
self.assertNotIn(
"shell",
call_kwargs,
"shell parameter should not be present (defaults to False)",
)
def test_command_injection_ampersand(self):
"""Test command injection using ampersand"""
# Create a test file that should not be created if injection is prevented
test_file = os.path.join(self.test_dir, "pwned2.txt")
# Malicious profile that attempts to create a file
malicious_profile = f"test && touch {test_file}"
with patch("subprocess.Popen") as mock_popen:
mock_process = MagicMock()
mock_process.communicate.return_value = (b"test output", None)
mock_popen.return_value = mock_process
# Import and run the function
_run_prowler = self._import_prowler_wrapper()
# Run with malicious input
_run_prowler(f'-p "{malicious_profile}" -V')
# Get the actual command
actual_command = mock_popen.call_args[0][0]
# Verify it's a list (safe execution)
self.assertIsInstance(actual_command, list)
# The malicious characters should be preserved as part of the argument
# not interpreted as shell commands
command_str = " ".join(actual_command)
self.assertIn(
"&&",
command_str,
"Shell metacharacters should be preserved as literals",
)
def test_command_injection_pipe(self):
"""Test command injection using pipe"""
malicious_profile = 'test | echo "injected"'
with patch("subprocess.Popen") as mock_popen:
mock_process = MagicMock()
mock_process.communicate.return_value = (b"test output", None)
mock_popen.return_value = mock_process
# Import and run the function
_run_prowler = self._import_prowler_wrapper()
# Run with malicious input
_run_prowler(f'-p "{malicious_profile}" -V')
# Get the actual command
actual_command = mock_popen.call_args[0][0]
# Verify safe execution
self.assertIsInstance(actual_command, list)
# Pipe should be preserved as literal
command_str = " ".join(actual_command)
self.assertIn("|", command_str)
def test_command_injection_backticks(self):
"""Test command injection using backticks"""
malicious_profile = "test `echo injected`"
with patch("subprocess.Popen") as mock_popen:
mock_process = MagicMock()
mock_process.communicate.return_value = (b"test output", None)
mock_popen.return_value = mock_process
# Import and run the function
_run_prowler = self._import_prowler_wrapper()
# Run with malicious input
_run_prowler(f'-p "{malicious_profile}" -V')
# Get the actual command
actual_command = mock_popen.call_args[0][0]
# Verify safe execution
self.assertIsInstance(actual_command, list)
# Backticks should be preserved as literals
command_str = " ".join(actual_command)
self.assertIn("`", command_str)
def test_command_injection_dollar_parentheses(self):
"""Test command injection using $() syntax"""
malicious_profile = "test $(echo injected)"
with patch("subprocess.Popen") as mock_popen:
mock_process = MagicMock()
mock_process.communicate.return_value = (b"test output", None)
mock_popen.return_value = mock_process
# Import and run the function
_run_prowler = self._import_prowler_wrapper()
# Run with malicious input
_run_prowler(f'-p "{malicious_profile}" -V')
# Get the actual command
actual_command = mock_popen.call_args[0][0]
# Verify safe execution
self.assertIsInstance(actual_command, list)
# $() should be preserved as literals
command_str = " ".join(actual_command)
self.assertIn("$(", command_str)
def test_legitimate_profile_name(self):
"""Test that legitimate profile names still work correctly"""
legitimate_profile = "production-aws-profile"
with patch("subprocess.Popen") as mock_popen:
mock_process = MagicMock()
mock_process.communicate.return_value = (b"test output", None)
mock_popen.return_value = mock_process
# Import and run the function
_run_prowler = self._import_prowler_wrapper()
# Run with legitimate input
result = _run_prowler(f"-p {legitimate_profile} -V")
# Verify the function returns output
self.assertEqual(result, b"test output")
# Verify Popen was called correctly
actual_command = mock_popen.call_args[0][0]
self.assertIsInstance(actual_command, list)
# Check the profile is passed correctly
command_str = " ".join(actual_command)
self.assertIn(legitimate_profile, command_str)
def test_shlex_split_behavior(self):
"""Test that shlex properly handles quoted arguments"""
profile_with_spaces = "my profile name"
with patch("subprocess.Popen") as mock_popen:
mock_process = MagicMock()
mock_process.communicate.return_value = (b"test output", None)
mock_popen.return_value = mock_process
# Import and run the function
_run_prowler = self._import_prowler_wrapper()
# Run with profile containing spaces
_run_prowler(f'-p "{profile_with_spaces}" -V')
# Get the actual command
actual_command = mock_popen.call_args[0][0]
# Verify it's properly split
self.assertIsInstance(actual_command, list)
# The profile name should be preserved as a single argument
# despite containing spaces
self.assertIn("my profile name", actual_command)
if __name__ == "__main__":
unittest.main()
@@ -0,0 +1,618 @@
from datetime import timezone
from json import dumps
from unittest import mock
from boto3 import client
from moto import mock_aws
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
# Test policy documents
ADMIN_POLICY = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": ["*"], "Resource": "*"}],
}
NON_ADMIN_POLICY = {
"Version": "2012-10-17",
"Statement": [{"Effect": "Allow", "Action": ["bedrock:*"], "Resource": "*"}],
}
PRIVILEGE_ESCALATION_POLICY = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"iam:CreateAccessKey",
"iam:CreateUser",
"iam:AttachUserPolicy",
],
"Resource": "*",
}
],
}
class Test_bedrock_api_key_no_administrative_privileges:
@mock_aws
def test_no_bedrock_api_keys(self):
from prowler.providers.aws.services.iam.iam_service import IAM
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges.iam_client",
new=IAM(aws_provider),
),
):
from prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges import (
bedrock_api_key_no_administrative_privileges,
)
check = bedrock_api_key_no_administrative_privileges()
result = check.execute()
assert len(result) == 0
@mock_aws
def test_bedrock_api_key_with_admin_attached_policy(self):
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
# Create user
user_name = "test_user"
user_arn = iam_client.create_user(UserName=user_name)["User"]["Arn"]
# Create admin policy
admin_policy_arn = iam_client.create_policy(
PolicyName="AdminPolicy",
PolicyDocument=dumps(ADMIN_POLICY),
Path="/",
)["Policy"]["Arn"]
# Attach admin policy to user
iam_client.attach_user_policy(UserName=user_name, PolicyArn=admin_policy_arn)
from prowler.providers.aws.services.iam.iam_service import IAM
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
iam = IAM(aws_provider)
# Mock service-specific credentials
from datetime import datetime
from prowler.providers.aws.services.iam.iam_service import (
ServiceSpecificCredential,
User,
)
# Create a mock user with the attached policy
mock_user = User(
name=user_name,
arn=user_arn,
attached_policies=[
{"PolicyArn": admin_policy_arn, "PolicyName": "AdminPolicy"}
],
inline_policies=[],
)
# Create a mock service-specific credential
mock_credential = ServiceSpecificCredential(
arn=f"arn:aws:iam:{AWS_REGION_US_EAST_1}:123456789012:user/{user_name}/credential/test-credential-id",
user=mock_user,
status="Active",
create_date=datetime.now(timezone.utc),
service_user_name=None,
service_credential_alias=None,
expiration_date=None,
id="test-credential-id",
service_name="bedrock.amazonaws.com",
region=AWS_REGION_US_EAST_1,
)
iam.service_specific_credentials = [mock_credential]
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges.iam_client",
new=iam,
),
):
from prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges import (
bedrock_api_key_no_administrative_privileges,
)
check = bedrock_api_key_no_administrative_privileges()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "API key test-credential-id in user test_user has administrative privileges through attached policy AdminPolicy."
)
assert result[0].resource_id == "test-credential-id"
assert result[0].region == AWS_REGION_US_EAST_1
@mock_aws
def test_bedrock_api_key_with_admin_inline_policy(self):
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
# Create user
user_name = "test_user"
user_arn = iam_client.create_user(UserName=user_name)["User"]["Arn"]
# Create inline admin policy
iam_client.put_user_policy(
UserName=user_name,
PolicyName="AdminInlinePolicy",
PolicyDocument=dumps(ADMIN_POLICY),
)
from prowler.providers.aws.services.iam.iam_service import IAM
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
iam = IAM(aws_provider)
# Mock service-specific credentials
from datetime import datetime
from prowler.providers.aws.services.iam.iam_service import (
ServiceSpecificCredential,
User,
)
# Create a mock user with the inline policy
mock_user = User(
name=user_name,
arn=user_arn,
attached_policies=[],
inline_policies=["AdminInlinePolicy"],
)
# Create a mock service-specific credential
mock_credential = ServiceSpecificCredential(
arn=f"arn:aws:iam:{AWS_REGION_US_EAST_1}:123456789012:user/{user_name}/credential/test-credential-id",
user=mock_user,
status="Active",
create_date=datetime.now(timezone.utc),
service_user_name=None,
service_credential_alias=None,
expiration_date=None,
id="test-credential-id",
service_name="bedrock.amazonaws.com",
region=AWS_REGION_US_EAST_1,
)
iam.service_specific_credentials = [mock_credential]
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges.iam_client",
new=iam,
),
):
from prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges import (
bedrock_api_key_no_administrative_privileges,
)
check = bedrock_api_key_no_administrative_privileges()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "API key test-credential-id in user test_user has administrative privileges through inline policy AdminInlinePolicy."
)
assert result[0].resource_id == "test-credential-id"
assert result[0].region == AWS_REGION_US_EAST_1
@mock_aws
def test_bedrock_api_key_with_privilege_escalation_attached_policy(self):
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
# Create user
user_name = "test_user"
user_arn = iam_client.create_user(UserName=user_name)["User"]["Arn"]
# Create privilege escalation policy
escalation_policy_arn = iam_client.create_policy(
PolicyName="EscalationPolicy",
PolicyDocument=dumps(PRIVILEGE_ESCALATION_POLICY),
Path="/",
)["Policy"]["Arn"]
# Attach privilege escalation policy to user
iam_client.attach_user_policy(
UserName=user_name, PolicyArn=escalation_policy_arn
)
from prowler.providers.aws.services.iam.iam_service import IAM
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
iam = IAM(aws_provider)
# Mock service-specific credentials
from datetime import datetime
from prowler.providers.aws.services.iam.iam_service import (
ServiceSpecificCredential,
User,
)
# Create a mock user with the attached policy
mock_user = User(
name=user_name,
arn=user_arn,
attached_policies=[
{"PolicyArn": escalation_policy_arn, "PolicyName": "EscalationPolicy"}
],
inline_policies=[],
)
# Create a mock service-specific credential
mock_credential = ServiceSpecificCredential(
arn=f"arn:aws:iam:{AWS_REGION_US_EAST_1}:123456789012:user/{user_name}/credential/test-credential-id",
user=mock_user,
status="Active",
create_date=datetime.now(timezone.utc),
service_user_name=None,
service_credential_alias=None,
expiration_date=None,
id="test-credential-id",
service_name="bedrock.amazonaws.com",
region=AWS_REGION_US_EAST_1,
)
iam.service_specific_credentials = [mock_credential]
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges.iam_client",
new=iam,
),
):
from prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges import (
bedrock_api_key_no_administrative_privileges,
)
check = bedrock_api_key_no_administrative_privileges()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "API key test-credential-id in user test_user has privilege escalation through attached policy EscalationPolicy."
)
assert result[0].resource_id == "test-credential-id"
assert result[0].region == AWS_REGION_US_EAST_1
@mock_aws
def test_bedrock_api_key_with_privilege_escalation_inline_policy(self):
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
# Create user
user_name = "test_user"
user_arn = iam_client.create_user(UserName=user_name)["User"]["Arn"]
# Create inline privilege escalation policy
iam_client.put_user_policy(
UserName=user_name,
PolicyName="EscalationInlinePolicy",
PolicyDocument=dumps(PRIVILEGE_ESCALATION_POLICY),
)
from prowler.providers.aws.services.iam.iam_service import IAM
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
iam = IAM(aws_provider)
# Mock service-specific credentials
from datetime import datetime
from prowler.providers.aws.services.iam.iam_service import (
ServiceSpecificCredential,
User,
)
# Create a mock user with the inline policy
mock_user = User(
name=user_name,
arn=user_arn,
attached_policies=[],
inline_policies=["EscalationInlinePolicy"],
)
# Create a mock service-specific credential
mock_credential = ServiceSpecificCredential(
arn=f"arn:aws:iam:{AWS_REGION_US_EAST_1}:123456789012:user/{user_name}/credential/test-credential-id",
user=mock_user,
status="Active",
create_date=datetime.now(timezone.utc),
service_user_name=None,
service_credential_alias=None,
expiration_date=None,
id="test-credential-id",
service_name="bedrock.amazonaws.com",
region=AWS_REGION_US_EAST_1,
)
iam.service_specific_credentials = [mock_credential]
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges.iam_client",
new=iam,
),
):
from prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges import (
bedrock_api_key_no_administrative_privileges,
)
check = bedrock_api_key_no_administrative_privileges()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "API key test-credential-id in user test_user has privilege escalation through inline policy EscalationInlinePolicy."
)
assert result[0].resource_id == "test-credential-id"
assert result[0].region == AWS_REGION_US_EAST_1
@mock_aws
def test_bedrock_api_key_with_non_admin_policy(self):
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
# Create user
user_name = "test_user"
user_arn = iam_client.create_user(UserName=user_name)["User"]["Arn"]
# Create non-admin policy
non_admin_policy_arn = iam_client.create_policy(
PolicyName="NonAdminPolicy",
PolicyDocument=dumps(NON_ADMIN_POLICY),
Path="/",
)["Policy"]["Arn"]
# Attach non-admin policy to user
iam_client.attach_user_policy(
UserName=user_name, PolicyArn=non_admin_policy_arn
)
from prowler.providers.aws.services.iam.iam_service import IAM
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
iam = IAM(aws_provider)
# Mock service-specific credentials
from datetime import datetime
from prowler.providers.aws.services.iam.iam_service import (
ServiceSpecificCredential,
User,
)
# Create a mock user with the attached policy
mock_user = User(
name=user_name,
arn=user_arn,
attached_policies=[
{"PolicyArn": non_admin_policy_arn, "PolicyName": "NonAdminPolicy"}
],
inline_policies=[],
)
# Create a mock service-specific credential
mock_credential = ServiceSpecificCredential(
arn=f"arn:aws:iam:{AWS_REGION_US_EAST_1}:123456789012:user/{user_name}/credential/test-credential-id",
user=mock_user,
status="Active",
create_date=datetime.now(timezone.utc),
service_user_name=None,
service_credential_alias=None,
expiration_date=None,
id="test-credential-id",
service_name="bedrock.amazonaws.com",
region=AWS_REGION_US_EAST_1,
)
iam.service_specific_credentials = [mock_credential]
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges.iam_client",
new=iam,
),
):
from prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges import (
bedrock_api_key_no_administrative_privileges,
)
check = bedrock_api_key_no_administrative_privileges()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "API key test-credential-id in user test_user has full service access through attached policy NonAdminPolicy."
)
assert result[0].resource_id == "test-credential-id"
assert result[0].region == AWS_REGION_US_EAST_1
@mock_aws
def test_bedrock_api_key_with_no_policies(self):
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
# Create user
user_name = "test_user"
user_arn = iam_client.create_user(UserName=user_name)["User"]["Arn"]
from prowler.providers.aws.services.iam.iam_service import IAM
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
iam = IAM(aws_provider)
# Mock service-specific credentials
from datetime import datetime
from prowler.providers.aws.services.iam.iam_service import (
ServiceSpecificCredential,
User,
)
# Create a mock user with no policies
mock_user = User(
name=user_name,
arn=user_arn,
attached_policies=[],
inline_policies=[],
)
# Create a mock service-specific credential
mock_credential = ServiceSpecificCredential(
arn=f"arn:aws:iam:{AWS_REGION_US_EAST_1}:123456789012:user/{user_name}/credential/test-credential-id",
user=mock_user,
status="Active",
create_date=datetime.now(timezone.utc),
service_user_name=None,
service_credential_alias=None,
expiration_date=None,
id="test-credential-id",
service_name="bedrock.amazonaws.com",
region=AWS_REGION_US_EAST_1,
)
iam.service_specific_credentials = [mock_credential]
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges.iam_client",
new=iam,
),
):
from prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges import (
bedrock_api_key_no_administrative_privileges,
)
check = bedrock_api_key_no_administrative_privileges()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "API key test-credential-id in user test_user has no administrative privileges."
)
assert result[0].resource_id == "test-credential-id"
assert result[0].region == AWS_REGION_US_EAST_1
@mock_aws
def test_non_bedrock_api_key_ignored(self):
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
# Create user
user_name = "test_user"
user_arn = iam_client.create_user(UserName=user_name)["User"]["Arn"]
# Create admin policy
admin_policy_arn = iam_client.create_policy(
PolicyName="AdminPolicy",
PolicyDocument=dumps(ADMIN_POLICY),
Path="/",
)["Policy"]["Arn"]
# Attach admin policy to user
iam_client.attach_user_policy(UserName=user_name, PolicyArn=admin_policy_arn)
from prowler.providers.aws.services.iam.iam_service import IAM
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
iam = IAM(aws_provider)
# Mock service-specific credentials
from datetime import datetime
from prowler.providers.aws.services.iam.iam_service import (
ServiceSpecificCredential,
User,
)
# Create a mock user with the attached policy
mock_user = User(
name=user_name,
arn=user_arn,
attached_policies=[
{"PolicyArn": admin_policy_arn, "PolicyName": "AdminPolicy"}
],
inline_policies=[],
)
# Create a mock service-specific credential for a different service (not Bedrock)
mock_credential = ServiceSpecificCredential(
arn=f"arn:aws:iam:{AWS_REGION_US_EAST_1}:123456789012:user/{user_name}/credential/test-credential-id",
user=mock_user,
status="Active",
create_date=datetime.now(timezone.utc),
service_user_name=None,
service_credential_alias=None,
expiration_date=None,
id="test-credential-id",
service_name="codecommit.amazonaws.com",
region=AWS_REGION_US_EAST_1,
)
iam.service_specific_credentials = [mock_credential]
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges.iam_client",
new=iam,
),
):
from prowler.providers.aws.services.bedrock.bedrock_api_key_no_administrative_privileges.bedrock_api_key_no_administrative_privileges import (
bedrock_api_key_no_administrative_privileges,
)
check = bedrock_api_key_no_administrative_privileges()
result = check.execute()
# Should return 0 results since the API key is not for Bedrock
assert len(result) == 0
@@ -404,7 +404,7 @@ class Test_ec2_securitygroup_allow_ingress_from_internet_to_all_ports:
new=EC2(aws_provider),
),
mock.patch(
"prowler.providers.aws.services.vpc.vpc_service.VPC",
"prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_all_ports.ec2_securitygroup_allow_ingress_from_internet_to_all_ports.vpc_client",
new=VPC(aws_provider),
),
mock.patch(
@@ -6,6 +6,7 @@ from prowler.providers.aws.services.ec2.lib.security_groups import (
)
TRANSPORT_PROTOCOL_TCP = "tcp"
TRANSPORT_PROTOCOL_UDP = "udp"
TRANSPORT_PROTOCOL_ALL = "-1"
IP_V4_ALL_CIDRS = "0.0.0.0/0"
@@ -362,6 +363,26 @@ class Test_check_security_group:
)
assert check_security_group(ingress_rule, TRANSPORT_PROTOCOL_ALL, None, True)
# UDP Protocol - IP_V4_ALL_CIDRS - Any Port - check None - Any Address - Open
def test_all_public_ipv4_address_open_any_port_check_none_any_address_udp(
self,
):
ingress_rule = self.ingress_rule_generator(
0, 65535, TRANSPORT_PROTOCOL_UDP, [IP_V4_ALL_CIDRS], []
)
assert check_security_group(ingress_rule, TRANSPORT_PROTOCOL_UDP, None, True)
# UDP Protocol - IP_V4_ALL_CIDRS - Any Port - check TCP - Any Address - Open
def test_all_public_ipv4_address_open_any_port_udp_protocol_check_tcp_any_address(
self,
):
ingress_rule = self.ingress_rule_generator(
0, 65535, TRANSPORT_PROTOCOL_UDP, [IP_V4_ALL_CIDRS], []
)
assert not check_security_group(
ingress_rule, TRANSPORT_PROTOCOL_TCP, None, True
)
# ALL (-1) Protocol - IP_V6_ALL_CIDRS - Any Port - check None - Any Address - Open
def test_all_public_ipv6_address_open_any_port_check_none_any_address(
self,
@@ -760,7 +760,7 @@ class Test_IAM_Service:
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
iam = IAM(aws_provider)
custom_policies = 0
for policy in iam.policies:
for policy in iam.policies.values():
if policy.type == "Custom":
custom_policies += 1
assert policy.name == "policy1"
@@ -786,7 +786,7 @@ class Test_IAM_Service:
iam = IAM(aws_provider)
custom_policies = 0
for policy in iam.policies:
for policy in iam.policies.values():
if policy.type == "Custom":
custom_policies += 1
assert policy.name == "policy2"
@@ -872,7 +872,7 @@ nTTxU4a7x1naFxzYXK1iQ1vMARKMjDb19QEJIEJKZlDK4uS7yMlf1nFS
assert iam.users[0].tags == []
# TODO: Workaround until this gets fixed https://github.com/getmoto/moto/issues/6712
for policy in iam.policies:
for policy in iam.policies.values():
if policy.name == policy_name:
assert policy == Policy(
name=policy_name,
@@ -914,7 +914,7 @@ nTTxU4a7x1naFxzYXK1iQ1vMARKMjDb19QEJIEJKZlDK4uS7yMlf1nFS
assert iam.groups[0].users == []
# TODO: Workaround until this gets fixed https://github.com/getmoto/moto/issues/6712
for policy in iam.policies:
for policy in iam.policies.values():
if policy.name == policy_name:
assert policy == Policy(
name=policy_name,
@@ -960,7 +960,7 @@ nTTxU4a7x1naFxzYXK1iQ1vMARKMjDb19QEJIEJKZlDK4uS7yMlf1nFS
assert iam.roles[0].tags == []
# TODO: Workaround until this gets fixed https://github.com/getmoto/moto/issues/6712
for policy in iam.policies:
for policy in iam.policies.values():
if policy.name == policy_name:
assert policy == Policy(
name=policy_name,
@@ -6,6 +6,8 @@ from prowler.providers.aws.services.iam.lib.policy import (
check_full_service_access,
get_effective_actions,
has_codebuild_trusted_principal,
has_public_principal,
has_restrictive_source_arn_condition,
is_codebuild_using_allowed_github_org,
is_condition_block_restrictive,
is_condition_block_restrictive_organization,
@@ -2451,3 +2453,266 @@ def test_has_codebuild_trusted_principal_list():
],
}
assert has_codebuild_trusted_principal(trust_policy) is True
class Test_has_public_principal:
"""Tests for the has_public_principal function"""
def test_has_public_principal_wildcard_string(self):
"""Test public principal detection with wildcard string"""
statement = {"Principal": "*"}
assert has_public_principal(statement) is True
def test_has_public_principal_root_arn_string(self):
"""Test public principal detection with root ARN string"""
statement = {"Principal": "arn:aws:iam::*:root"}
assert has_public_principal(statement) is True
def test_has_public_principal_aws_dict_wildcard(self):
"""Test public principal detection with AWS dict containing wildcard"""
statement = {"Principal": {"AWS": "*"}}
assert has_public_principal(statement) is True
def test_has_public_principal_aws_dict_root_arn(self):
"""Test public principal detection with AWS dict containing root ARN"""
statement = {"Principal": {"AWS": "arn:aws:iam::*:root"}}
assert has_public_principal(statement) is True
def test_has_public_principal_aws_list_wildcard(self):
"""Test public principal detection with AWS list containing wildcard"""
statement = {"Principal": {"AWS": ["arn:aws:iam::123456789012:user/test", "*"]}}
assert has_public_principal(statement) is True
def test_has_public_principal_aws_list_root_arn(self):
"""Test public principal detection with AWS list containing root ARN"""
statement = {
"Principal": {
"AWS": ["arn:aws:iam::123456789012:user/test", "arn:aws:iam::*:root"]
}
}
assert has_public_principal(statement) is True
def test_has_public_principal_canonical_user_wildcard(self):
"""Test public principal detection with CanonicalUser wildcard"""
statement = {"Principal": {"CanonicalUser": "*"}}
assert has_public_principal(statement) is True
def test_has_public_principal_canonical_user_root_arn(self):
"""Test public principal detection with CanonicalUser root ARN"""
statement = {"Principal": {"CanonicalUser": "arn:aws:iam::*:root"}}
assert has_public_principal(statement) is True
def test_has_public_principal_no_principal(self):
"""Test with statement that has no Principal field"""
statement = {"Effect": "Allow", "Action": "s3:GetObject"}
assert has_public_principal(statement) is False
def test_has_public_principal_empty_principal(self):
"""Test with empty principal"""
statement = {"Principal": ""}
assert has_public_principal(statement) is False
def test_has_public_principal_specific_account(self):
"""Test with specific account principal (not public)"""
statement = {"Principal": {"AWS": "arn:aws:iam::123456789012:root"}}
assert has_public_principal(statement) is False
def test_has_public_principal_service_principal(self):
"""Test with service principal (not public)"""
statement = {"Principal": {"Service": "lambda.amazonaws.com"}}
assert has_public_principal(statement) is False
def test_has_public_principal_mixed_principals(self):
"""Test with mixed principals including public one"""
statement = {
"Principal": {
"AWS": ["arn:aws:iam::123456789012:user/test"],
"Service": "lambda.amazonaws.com",
"CanonicalUser": "*",
}
}
assert has_public_principal(statement) is True
class Test_has_restrictive_source_arn_condition:
"""Tests for the has_restrictive_source_arn_condition function"""
def test_no_condition_block(self):
"""Test statement without Condition block"""
statement = {"Effect": "Allow", "Principal": "*", "Action": "s3:GetObject"}
assert has_restrictive_source_arn_condition(statement) is False
def test_no_source_arn_condition(self):
"""Test with condition block but no aws:SourceArn"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Condition": {"StringEquals": {"aws:SourceAccount": "123456789012"}},
}
assert has_restrictive_source_arn_condition(statement) is False
def test_restrictive_source_arn_s3_bucket(self):
"""Test restrictive SourceArn condition with S3 bucket"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {"ArnLike": {"aws:SourceArn": "arn:aws:s3:::my-bucket"}},
}
assert has_restrictive_source_arn_condition(statement) is True
def test_restrictive_source_arn_lambda_function(self):
"""Test restrictive SourceArn condition with Lambda function"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {
"ArnEquals": {
"aws:SourceArn": "arn:aws:lambda:us-east-1:123456789012:function:MyFunction"
}
},
}
assert has_restrictive_source_arn_condition(statement) is True
def test_non_restrictive_global_wildcard(self):
"""Test non-restrictive SourceArn with global wildcard"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {"ArnLike": {"aws:SourceArn": "*"}},
}
assert has_restrictive_source_arn_condition(statement) is False
def test_non_restrictive_service_wildcard(self):
"""Test non-restrictive SourceArn with service wildcard"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {"ArnLike": {"aws:SourceArn": "arn:aws:s3:::*"}},
}
assert has_restrictive_source_arn_condition(statement) is False
def test_non_restrictive_multi_wildcard(self):
"""Test non-restrictive SourceArn with multiple wildcards"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {"ArnLike": {"aws:SourceArn": "arn:aws:*:*:*:*"}},
}
assert has_restrictive_source_arn_condition(statement) is False
def test_non_restrictive_resource_wildcard(self):
"""Test non-restrictive SourceArn with resource wildcard"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {
"ArnLike": {"aws:SourceArn": "arn:aws:lambda:us-east-1:123456789012:*"}
},
}
assert has_restrictive_source_arn_condition(statement) is False
def test_source_arn_list_with_valid_arn(self):
"""Test SourceArn condition with list containing valid ARN"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {
"ArnLike": {
"aws:SourceArn": ["arn:aws:s3:::bucket1", "arn:aws:s3:::bucket2"]
}
},
}
assert has_restrictive_source_arn_condition(statement) is True
def test_source_arn_list_with_wildcard(self):
"""Test SourceArn condition with list containing wildcard"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {"ArnLike": {"aws:SourceArn": ["arn:aws:s3:::bucket1", "*"]}},
}
assert has_restrictive_source_arn_condition(statement) is False
def test_source_arn_with_account_validation_match(self):
"""Test SourceArn with account validation - matching account"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {
"ArnLike": {
"aws:SourceArn": "arn:aws:lambda:us-east-1:123456789012:function:MyFunction"
}
},
}
assert has_restrictive_source_arn_condition(statement, "123456789012") is True
def test_source_arn_with_account_validation_mismatch(self):
"""Test SourceArn with account validation - non-matching account"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {
"ArnLike": {
"aws:SourceArn": "arn:aws:lambda:us-east-1:123456789012:function:MyFunction"
}
},
}
assert has_restrictive_source_arn_condition(statement, "987654321098") is False
def test_source_arn_with_account_wildcard(self):
"""Test SourceArn with account wildcard"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {
"ArnLike": {
"aws:SourceArn": "arn:aws:lambda:us-east-1:*:function:MyFunction"
}
},
}
assert has_restrictive_source_arn_condition(statement, "123456789012") is False
def test_source_arn_s3_bucket_no_account_field(self):
"""Test SourceArn with S3 bucket (no account field) - should be restrictive"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {"ArnLike": {"aws:SourceArn": "arn:aws:s3:::my-bucket"}},
}
assert has_restrictive_source_arn_condition(statement, "123456789012") is True
def test_source_arn_case_insensitive(self):
"""Test SourceArn condition key is case insensitive"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {"ArnLike": {"AWS:SourceArn": "arn:aws:s3:::my-bucket"}},
}
assert has_restrictive_source_arn_condition(statement) is True
def test_source_arn_mixed_operators(self):
"""Test SourceArn with multiple condition operators"""
statement = {
"Effect": "Allow",
"Principal": "*",
"Action": "sns:Publish",
"Condition": {
"ArnLike": {"aws:SourceArn": "arn:aws:s3:::my-bucket"},
"StringEquals": {"aws:SourceAccount": "123456789012"},
},
}
assert has_restrictive_source_arn_condition(statement) is True
@@ -2,9 +2,10 @@ from typing import Any, Dict
from unittest import mock
from uuid import uuid4
import pytest
from prowler.providers.aws.services.sns.sns_service import Topic
from tests.providers.aws.utils import AWS_ACCOUNT_NUMBER, AWS_REGION_EU_WEST_1
import pytest
kms_key_id = str(uuid4())
topic_name = "test-topic"
@@ -98,6 +99,73 @@ test_policy_restricted_principal_account_organization = {
]
}
test_policy_restricted_source_arn = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "SNS:Publish",
"Resource": f"arn:aws:sns:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:{topic_name}",
"Condition": {
"ArnLike": {"aws:SourceArn": "arn:aws:s3:::test-bucket-name"}
},
}
],
}
test_policy_invalid_source_arn = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "SNS:Publish",
"Resource": f"arn:aws:sns:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:{topic_name}",
"Condition": {"ArnLike": {"aws:SourceArn": "invalid-arn-format"}},
}
],
}
test_policy_unrestricted_source_arn_wildcard = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "SNS:Publish",
"Resource": f"arn:aws:sns:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:{topic_name}",
"Condition": {"ArnLike": {"aws:SourceArn": "*"}},
}
],
}
test_policy_unrestricted_source_arn_service_wildcard = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "SNS:Publish",
"Resource": f"arn:aws:sns:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:{topic_name}",
"Condition": {"ArnLike": {"aws:SourceArn": "arn:aws:s3:::*"}},
}
],
}
test_policy_unrestricted_source_arn_multi_wildcard = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "SNS:Publish",
"Resource": f"arn:aws:sns:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:{topic_name}",
"Condition": {"ArnLike": {"aws:SourceArn": "arn:aws:*:*:*:*"}},
}
],
}
def generate_policy_restricted_on_sns_endpoint(endpoint: str) -> Dict[str, Any]:
return {
@@ -396,6 +464,78 @@ class Test_sns_topics_not_publicly_accessible:
assert result[0].region == AWS_REGION_EU_WEST_1
assert result[0].resource_tags == []
def test_topic_public_with_source_arn_restriction(self):
sns_client = mock.MagicMock
sns_client.audited_account = AWS_ACCOUNT_NUMBER
sns_client.topics = []
sns_client.topics.append(
Topic(
arn=topic_arn,
name=topic_name,
policy=test_policy_restricted_source_arn,
region=AWS_REGION_EU_WEST_1,
)
)
sns_client.provider = mock.MagicMock()
sns_client.provider.organizations_metadata = mock.MagicMock()
sns_client.provider.organizations_metadata.organization_id = org_id
with mock.patch(
"prowler.providers.aws.services.sns.sns_service.SNS",
sns_client,
):
from prowler.providers.aws.services.sns.sns_topics_not_publicly_accessible.sns_topics_not_publicly_accessible import (
sns_topics_not_publicly_accessible,
)
check = sns_topics_not_publicly_accessible()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"SNS topic {topic_name} is not publicly accessible."
)
assert result[0].resource_id == topic_name
assert result[0].resource_arn == topic_arn
assert result[0].region == AWS_REGION_EU_WEST_1
assert result[0].resource_tags == []
def test_topic_public_with_invalid_source_arn(self):
sns_client = mock.MagicMock
sns_client.audited_account = AWS_ACCOUNT_NUMBER
sns_client.topics = []
sns_client.topics.append(
Topic(
arn=topic_arn,
name=topic_name,
policy=test_policy_invalid_source_arn,
region=AWS_REGION_EU_WEST_1,
)
)
sns_client.provider = mock.MagicMock()
sns_client.provider.organizations_metadata = mock.MagicMock()
sns_client.provider.organizations_metadata.organization_id = org_id
with mock.patch(
"prowler.providers.aws.services.sns.sns_service.SNS",
sns_client,
):
from prowler.providers.aws.services.sns.sns_topics_not_publicly_accessible.sns_topics_not_publicly_accessible import (
sns_topics_not_publicly_accessible,
)
check = sns_topics_not_publicly_accessible()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"SNS topic {topic_name} is not publicly accessible."
)
assert result[0].resource_id == topic_name
assert result[0].resource_arn == topic_arn
assert result[0].region == AWS_REGION_EU_WEST_1
assert result[0].resource_tags == []
@pytest.mark.parametrize(
"endpoint",
[
@@ -443,6 +583,114 @@ class Test_sns_topics_not_publicly_accessible:
assert result[0].region == AWS_REGION_EU_WEST_1
assert result[0].resource_tags == []
def test_topic_public_with_unrestricted_source_arn_wildcard(self):
sns_client = mock.MagicMock
sns_client.audited_account = AWS_ACCOUNT_NUMBER
sns_client.topics = []
sns_client.topics.append(
Topic(
arn=topic_arn,
name=topic_name,
policy=test_policy_unrestricted_source_arn_wildcard,
region=AWS_REGION_EU_WEST_1,
)
)
sns_client.provider = mock.MagicMock()
sns_client.provider.organizations_metadata = mock.MagicMock()
sns_client.provider.organizations_metadata.organization_id = org_id
with mock.patch(
"prowler.providers.aws.services.sns.sns_service.SNS",
sns_client,
):
from prowler.providers.aws.services.sns.sns_topics_not_publicly_accessible.sns_topics_not_publicly_accessible import (
sns_topics_not_publicly_accessible,
)
check = sns_topics_not_publicly_accessible()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"SNS topic {topic_name} is public because its policy allows public access."
)
assert result[0].resource_id == topic_name
assert result[0].resource_arn == topic_arn
assert result[0].region == AWS_REGION_EU_WEST_1
assert result[0].resource_tags == []
def test_topic_public_with_unrestricted_source_arn_service_wildcard(self):
sns_client = mock.MagicMock
sns_client.audited_account = AWS_ACCOUNT_NUMBER
sns_client.topics = []
sns_client.topics.append(
Topic(
arn=topic_arn,
name=topic_name,
policy=test_policy_unrestricted_source_arn_service_wildcard,
region=AWS_REGION_EU_WEST_1,
)
)
sns_client.provider = mock.MagicMock()
sns_client.provider.organizations_metadata = mock.MagicMock()
sns_client.provider.organizations_metadata.organization_id = org_id
with mock.patch(
"prowler.providers.aws.services.sns.sns_service.SNS",
sns_client,
):
from prowler.providers.aws.services.sns.sns_topics_not_publicly_accessible.sns_topics_not_publicly_accessible import (
sns_topics_not_publicly_accessible,
)
check = sns_topics_not_publicly_accessible()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"SNS topic {topic_name} is public because its policy allows public access."
)
assert result[0].resource_id == topic_name
assert result[0].resource_arn == topic_arn
assert result[0].region == AWS_REGION_EU_WEST_1
assert result[0].resource_tags == []
def test_topic_public_with_unrestricted_source_arn_multi_wildcard(self):
sns_client = mock.MagicMock
sns_client.audited_account = AWS_ACCOUNT_NUMBER
sns_client.topics = []
sns_client.topics.append(
Topic(
arn=topic_arn,
name=topic_name,
policy=test_policy_unrestricted_source_arn_multi_wildcard,
region=AWS_REGION_EU_WEST_1,
)
)
sns_client.provider = mock.MagicMock()
sns_client.provider.organizations_metadata = mock.MagicMock()
sns_client.provider.organizations_metadata.organization_id = org_id
with mock.patch(
"prowler.providers.aws.services.sns.sns_service.SNS",
sns_client,
):
from prowler.providers.aws.services.sns.sns_topics_not_publicly_accessible.sns_topics_not_publicly_accessible import (
sns_topics_not_publicly_accessible,
)
check = sns_topics_not_publicly_accessible()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"SNS topic {topic_name} is public because its policy allows public access."
)
assert result[0].resource_id == topic_name
assert result[0].resource_arn == topic_arn
assert result[0].region == AWS_REGION_EU_WEST_1
assert result[0].resource_tags == []
@pytest.mark.parametrize(
"endpoint",
[
@@ -4,6 +4,7 @@ from uuid import uuid4
import botocore
from boto3 import client
from botocore.exceptions import ClientError
from moto import mock_aws
from prowler.providers.aws.services.sqs.sqs_service import SQS
@@ -114,3 +115,42 @@ class Test_SQS_Service:
assert sqs.queues[0].region == AWS_REGION_EU_WEST_1
assert sqs.queues[0].policy
assert sqs.queues[0].kms_key_id == test_key
@mock_aws
def test_get_queue_attributes_nonexistent_queue(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
sqs_service = SQS(aws_provider)
queue_url = f"https://sqs.{AWS_REGION_EU_WEST_1}.amazonaws.com/{AWS_ACCOUNT_NUMBER}/{test_queue}"
sqs_service.queues = [
type(
"Queue",
(),
{
"id": queue_url,
"name": test_queue,
"arn": test_queue_arn,
"region": AWS_REGION_EU_WEST_1,
},
)()
]
def mock_get_queue_attributes(**kwargs):
raise ClientError(
{
"Error": {
"Code": "AWS.SimpleQueueService.NonExistentQueue",
"Message": "The specified queue does not exist.",
}
},
"GetQueueAttributes",
)
with patch.object(
sqs_service.regional_clients[AWS_REGION_EU_WEST_1],
"get_queue_attributes",
side_effect=mock_get_queue_attributes,
):
sqs_service._get_queue_attributes()
assert sqs_service.queues == []
@@ -85,6 +85,11 @@ class TestAzureProvider:
"python_latest_version": "3.12",
"java_latest_version": "17",
"recommended_minimal_tls_versions": ["1.2", "1.3"],
"desired_vm_sku_sizes": [
"Standard_A8_v2",
"Standard_DS3_v2",
"Standard_D4s_v3",
],
"defender_attack_path_minimal_risk_level": "High",
}
@@ -0,0 +1,660 @@
from unittest import mock
from uuid import uuid4
from prowler.providers.azure.services.vm.vm_service import (
SecurityProfile,
StorageProfile,
UefiSettings,
VirtualMachine,
)
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
class Test_vm_desired_sku_size:
def test_vm_no_subscriptions(self):
"""Test when there are no subscriptions."""
vm_client = mock.MagicMock
vm_client.virtual_machines = {}
vm_client.audit_config = {}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size.vm_client",
new=vm_client,
),
):
from prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size import (
vm_desired_sku_size,
)
check = vm_desired_sku_size()
result = check.execute()
assert len(result) == 0
def test_vm_subscriptions_empty(self):
"""Test when subscriptions exist but have no VMs."""
vm_client = mock.MagicMock
vm_client.virtual_machines = {AZURE_SUBSCRIPTION_ID: {}}
vm_client.audit_config = {}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size.vm_client",
new=vm_client,
),
):
from prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size import (
vm_desired_sku_size,
)
check = vm_desired_sku_size()
result = check.execute()
assert len(result) == 0
def test_vm_using_desired_sku_size_default_config(self):
"""Test VM using a SKU size that is in the default configuration."""
vm_id = str(uuid4())
vm_client = mock.MagicMock
vm_client.virtual_machines = {
AZURE_SUBSCRIPTION_ID: {
vm_id: VirtualMachine(
resource_id=vm_id,
resource_name="VMTest",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size="Standard_A8_v2",
),
}
}
vm_client.audit_config = {}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size.vm_client",
new=vm_client,
),
):
from prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size import (
vm_desired_sku_size,
)
check = vm_desired_sku_size()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "VMTest"
assert result[0].resource_id == vm_id
assert (
result[0].status_extended
== f"VM VMTest is using desired SKU size Standard_A8_v2 in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_vm_using_desired_sku_size_custom_config(self):
"""Test VM using a SKU size that is in the custom configuration."""
vm_id = str(uuid4())
vm_client = mock.MagicMock
vm_client.virtual_machines = {
AZURE_SUBSCRIPTION_ID: {
vm_id: VirtualMachine(
resource_id=vm_id,
resource_name="VMTest",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size="Standard_B1s",
),
}
}
vm_client.audit_config = {
"desired_vm_sku_sizes": ["Standard_B1s", "Standard_B2s"]
}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size.vm_client",
new=vm_client,
),
):
from prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size import (
vm_desired_sku_size,
)
check = vm_desired_sku_size()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "VMTest"
assert result[0].resource_id == vm_id
assert (
result[0].status_extended
== f"VM VMTest is using desired SKU size Standard_B1s in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_vm_using_non_desired_sku_size_default_config(self):
"""Test VM using a SKU size that is not in the default configuration."""
vm_id = str(uuid4())
vm_client = mock.MagicMock
vm_client.virtual_machines = {
AZURE_SUBSCRIPTION_ID: {
vm_id: VirtualMachine(
resource_id=vm_id,
resource_name="VMTest",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size="Standard_B1s",
),
}
}
vm_client.audit_config = {}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size.vm_client",
new=vm_client,
),
):
from prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size import (
vm_desired_sku_size,
)
check = vm_desired_sku_size()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "VMTest"
assert result[0].resource_id == vm_id
assert (
result[0].status_extended
== f"VM VMTest is using Standard_B1s which is not a desired SKU size in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_vm_using_non_desired_sku_size_custom_config(self):
"""Test VM using a SKU size that is not in the custom configuration."""
vm_id = str(uuid4())
vm_client = mock.MagicMock
vm_client.virtual_machines = {
AZURE_SUBSCRIPTION_ID: {
vm_id: VirtualMachine(
resource_id=vm_id,
resource_name="VMTest",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size="Standard_A8_v2",
),
}
}
vm_client.audit_config = {
"desired_vm_sku_sizes": ["Standard_B1s", "Standard_B2s"]
}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size.vm_client",
new=vm_client,
),
):
from prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size import (
vm_desired_sku_size,
)
check = vm_desired_sku_size()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "VMTest"
assert result[0].resource_id == vm_id
assert (
result[0].status_extended
== f"VM VMTest is using Standard_A8_v2 which is not a desired SKU size in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_vm_with_none_vm_size(self):
"""Test VM with None vm_size."""
vm_id = str(uuid4())
vm_client = mock.MagicMock
vm_client.virtual_machines = {
AZURE_SUBSCRIPTION_ID: {
vm_id: VirtualMachine(
resource_id=vm_id,
resource_name="VMTest",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size=None,
),
}
}
vm_client.audit_config = {"desired_vm_sku_sizes": ["Standard_A8_v2"]}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size.vm_client",
new=vm_client,
),
):
from prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size import (
vm_desired_sku_size,
)
check = vm_desired_sku_size()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "VMTest"
assert result[0].resource_id == vm_id
assert (
result[0].status_extended
== f"VM VMTest is using None which is not a desired SKU size in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_multiple_vms_different_statuses(self):
"""Test multiple VMs with different statuses."""
vm_id_1 = str(uuid4())
vm_id_2 = str(uuid4())
vm_id_3 = str(uuid4())
vm_client = mock.MagicMock
vm_client.virtual_machines = {
AZURE_SUBSCRIPTION_ID: {
vm_id_1: VirtualMachine(
resource_id=vm_id_1,
resource_name="VMApproved",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size="Standard_A8_v2",
),
vm_id_2: VirtualMachine(
resource_id=vm_id_2,
resource_name="VMNotApproved",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size="Standard_B1s",
),
vm_id_3: VirtualMachine(
resource_id=vm_id_3,
resource_name="VMAnotherApproved",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size="Standard_DS3_v2",
),
}
}
vm_client.audit_config = {
"desired_vm_sku_sizes": ["Standard_A8_v2", "Standard_DS3_v2"]
}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size.vm_client",
new=vm_client,
),
):
from prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size import (
vm_desired_sku_size,
)
check = vm_desired_sku_size()
result = check.execute()
assert len(result) == 3
# Find the PASS result
pass_result = next(
r
for r in result
if r.status == "PASS" and r.resource_name == "VMApproved"
)
assert pass_result.subscription == AZURE_SUBSCRIPTION_ID
assert pass_result.resource_id == vm_id_1
assert (
pass_result.status_extended
== f"VM VMApproved is using desired SKU size Standard_A8_v2 in subscription {AZURE_SUBSCRIPTION_ID}."
)
# Find the FAIL result
fail_result = next(
r
for r in result
if r.status == "FAIL" and r.resource_name == "VMNotApproved"
)
assert fail_result.subscription == AZURE_SUBSCRIPTION_ID
assert fail_result.resource_id == vm_id_2
assert (
fail_result.status_extended
== f"VM VMNotApproved is using Standard_B1s which is not a desired SKU size in subscription {AZURE_SUBSCRIPTION_ID}."
)
# Find the second PASS result
pass_result_2 = next(
r
for r in result
if r.status == "PASS" and r.resource_name == "VMAnotherApproved"
)
assert pass_result_2.subscription == AZURE_SUBSCRIPTION_ID
assert pass_result_2.resource_id == vm_id_3
assert (
pass_result_2.status_extended
== f"VM VMAnotherApproved is using desired SKU size Standard_DS3_v2 in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_multiple_subscriptions(self):
"""Test multiple subscriptions with different VMs."""
vm_id_1 = str(uuid4())
vm_id_2 = str(uuid4())
subscription_2 = "subscription-2"
vm_client = mock.MagicMock
vm_client.virtual_machines = {
AZURE_SUBSCRIPTION_ID: {
vm_id_1: VirtualMachine(
resource_id=vm_id_1,
resource_name="VMSub1",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size="Standard_A8_v2",
),
},
subscription_2: {
vm_id_2: VirtualMachine(
resource_id=vm_id_2,
resource_name="VMSub2",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size="Standard_B1s",
),
},
}
vm_client.audit_config = {"desired_vm_sku_sizes": ["Standard_A8_v2"]}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size.vm_client",
new=vm_client,
),
):
from prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size import (
vm_desired_sku_size,
)
check = vm_desired_sku_size()
result = check.execute()
assert len(result) == 2
# Find the PASS result from subscription 1
pass_result = next(
r
for r in result
if r.status == "PASS" and r.subscription == AZURE_SUBSCRIPTION_ID
)
assert pass_result.resource_name == "VMSub1"
assert pass_result.resource_id == vm_id_1
# Find the FAIL result from subscription 2
fail_result = next(
r
for r in result
if r.status == "FAIL" and r.subscription == subscription_2
)
assert fail_result.resource_name == "VMSub2"
assert fail_result.resource_id == vm_id_2
def test_empty_desired_sku_sizes_config(self):
"""Test when the desired SKU sizes configuration is empty."""
vm_id = str(uuid4())
vm_client = mock.MagicMock
vm_client.virtual_machines = {
AZURE_SUBSCRIPTION_ID: {
vm_id: VirtualMachine(
resource_id=vm_id,
resource_name="VMTest",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size="Standard_A8_v2",
),
}
}
vm_client.audit_config = {"desired_vm_sku_sizes": []}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size.vm_client",
new=vm_client,
),
):
from prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size import (
vm_desired_sku_size,
)
check = vm_desired_sku_size()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "VMTest"
assert result[0].resource_id == vm_id
assert (
result[0].status_extended
== f"VM VMTest is using Standard_A8_v2 which is not a desired SKU size in subscription {AZURE_SUBSCRIPTION_ID}."
)
def test_case_sensitive_sku_size_matching(self):
"""Test that SKU size matching is case sensitive."""
vm_id = str(uuid4())
vm_client = mock.MagicMock
vm_client.virtual_machines = {
AZURE_SUBSCRIPTION_ID: {
vm_id: VirtualMachine(
resource_id=vm_id,
resource_name="VMTest",
location="location",
security_profile=SecurityProfile(
security_type="TrustedLaunch",
uefi_settings=UefiSettings(
secure_boot_enabled=True,
v_tpm_enabled=True,
),
),
extensions=[],
storage_profile=StorageProfile(
os_disk=None,
data_disks=[],
),
vm_size="standard_a8_v2", # lowercase
),
}
}
vm_client.audit_config = {
"desired_vm_sku_sizes": ["Standard_A8_v2"]
} # proper case
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size.vm_client",
new=vm_client,
),
):
from prowler.providers.azure.services.vm.vm_desired_sku_size.vm_desired_sku_size import (
vm_desired_sku_size,
)
check = vm_desired_sku_size()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL" # Should fail due to case mismatch
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == "VMTest"
assert result[0].resource_id == vm_id
assert (
result[0].status_extended
== f"VM VMTest is using standard_a8_v2 which is not a desired SKU size in subscription {AZURE_SUBSCRIPTION_ID}."
)
@@ -59,6 +59,7 @@ class Test_vm_scaleset_associated_with_load_balancer:
resource_name="compliant-vmss",
location="eastus",
load_balancer_backend_pools=[backend_pool_id],
instance_ids=[],
)
}
}
@@ -98,6 +99,7 @@ class Test_vm_scaleset_associated_with_load_balancer:
resource_name="noncompliant-vmss",
location="westeurope",
load_balancer_backend_pools=[],
instance_ids=[],
)
}
}
@@ -139,12 +141,14 @@ class Test_vm_scaleset_associated_with_load_balancer:
resource_name="compliant-vmss",
location="eastus",
load_balancer_backend_pools=[backend_pool_id],
instance_ids=[],
),
noncompliant_id: VirtualMachineScaleSet(
resource_id=noncompliant_id,
resource_name="noncompliant-vmss",
location="westeurope",
load_balancer_backend_pools=[],
instance_ids=[],
),
}
}
@@ -191,6 +195,7 @@ class Test_vm_scaleset_associated_with_load_balancer:
resource_name="",
location="",
load_balancer_backend_pools=[],
instance_ids=[],
)
}
}
@@ -0,0 +1,174 @@
from unittest import mock
from uuid import uuid4
from prowler.providers.azure.services.vm.vm_service import VirtualMachineScaleSet
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
set_mocked_azure_provider,
)
class Test_vm_scaleset_not_empty:
def test_no_subscriptions(self):
vm_scale_sets = {}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_client.vm_client.vm_scale_sets",
new=vm_scale_sets,
),
):
from prowler.providers.azure.services.vm.vm_scaleset_not_empty.vm_scaleset_not_empty import (
vm_scaleset_not_empty,
)
check = vm_scaleset_not_empty()
result = check.execute()
assert len(result) == 0
def test_empty_scale_sets(self):
vm_scale_sets = {AZURE_SUBSCRIPTION_ID: {}}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_client.vm_client.vm_scale_sets",
new=vm_scale_sets,
),
):
from prowler.providers.azure.services.vm.vm_scaleset_not_empty.vm_scaleset_not_empty import (
vm_scaleset_not_empty,
)
check = vm_scaleset_not_empty()
result = check.execute()
assert len(result) == 0
def test_scale_set_with_no_instances(self):
vmss_id = str(uuid4())
vm_scale_sets = {
AZURE_SUBSCRIPTION_ID: {
vmss_id: VirtualMachineScaleSet(
resource_id=vmss_id,
resource_name="empty-vmss",
location="eastus",
load_balancer_backend_pools=[],
instance_ids=[],
)
}
}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_client.vm_client.vm_scale_sets",
new=vm_scale_sets,
),
):
from prowler.providers.azure.services.vm.vm_scaleset_not_empty.vm_scaleset_not_empty import (
vm_scaleset_not_empty,
)
check = vm_scaleset_not_empty()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].resource_id == vmss_id
assert result[0].resource_name == "empty-vmss"
assert result[0].location == "eastus"
expected_status_extended = f"Scale set 'empty-vmss' in subscription '{AZURE_SUBSCRIPTION_ID}' is empty: no VM instances present."
assert result[0].status_extended == expected_status_extended
def test_scale_set_with_instances(self):
vmss_id = str(uuid4())
instance_ids = ["1", "2"]
vm_scale_sets = {
AZURE_SUBSCRIPTION_ID: {
vmss_id: VirtualMachineScaleSet(
resource_id=vmss_id,
resource_name="nonempty-vmss",
location="westeurope",
load_balancer_backend_pools=[],
instance_ids=instance_ids,
)
}
}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_client.vm_client.vm_scale_sets",
new=vm_scale_sets,
),
):
from prowler.providers.azure.services.vm.vm_scaleset_not_empty.vm_scaleset_not_empty import (
vm_scaleset_not_empty,
)
check = vm_scaleset_not_empty()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].resource_id == vmss_id
assert result[0].resource_name == "nonempty-vmss"
assert result[0].location == "westeurope"
expected_status_extended = f"Scale set 'nonempty-vmss' in subscription '{AZURE_SUBSCRIPTION_ID}' has {len(instance_ids)} VM instances."
assert result[0].status_extended == expected_status_extended
def test_multiple_scale_sets(self):
empty_id = str(uuid4())
nonempty_id = str(uuid4())
instance_ids = ["1"]
vm_scale_sets = {
AZURE_SUBSCRIPTION_ID: {
empty_id: VirtualMachineScaleSet(
resource_id=empty_id,
resource_name="empty-vmss",
location="eastus",
load_balancer_backend_pools=[],
instance_ids=[],
),
nonempty_id: VirtualMachineScaleSet(
resource_id=nonempty_id,
resource_name="nonempty-vmss",
location="westeurope",
load_balancer_backend_pools=[],
instance_ids=instance_ids,
),
}
}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
),
mock.patch(
"prowler.providers.azure.services.vm.vm_client.vm_client.vm_scale_sets",
new=vm_scale_sets,
),
):
from prowler.providers.azure.services.vm.vm_scaleset_not_empty.vm_scaleset_not_empty import (
vm_scaleset_not_empty,
)
check = vm_scaleset_not_empty()
result = check.execute()
assert len(result) == 2
for r in result:
if r.resource_name == "empty-vmss":
expected_status_extended = f"Scale set 'empty-vmss' in subscription '{AZURE_SUBSCRIPTION_ID}' is empty: no VM instances present."
assert r.status == "FAIL"
assert r.status_extended == expected_status_extended
elif r.resource_name == "nonempty-vmss":
expected_status_extended = f"Scale set 'nonempty-vmss' in subscription '{AZURE_SUBSCRIPTION_ID}' has {len(instance_ids)} VM instances."
assert r.status == "PASS"
assert r.status_extended == expected_status_extended
@@ -41,6 +41,7 @@ def mock_vm_get_virtual_machines(_):
),
data_disks=[],
),
vm_size="Standard_A8_v2",
linux_configuration=None,
)
}
@@ -57,6 +58,7 @@ def mock_vm_get_virtual_machines_with_none(_):
security_profile=None,
extensions=[],
storage_profile=None,
vm_size=None,
linux_configuration=None,
),
"vm_id-2": VirtualMachine(
@@ -69,6 +71,7 @@ def mock_vm_get_virtual_machines_with_none(_):
os_disk=None,
data_disks=[],
),
vm_size="Standard_B1s",
linux_configuration=None,
),
}
@@ -177,6 +180,10 @@ class Test_VirtualMachines_Service:
)
== 0
)
assert (
virtual_machines.virtual_machines[AZURE_SUBSCRIPTION_ID]["vm_id-1"].vm_size
== "Standard_A8_v2"
)
def test__get_disks(self):
disks = VirtualMachines(set_mocked_azure_provider()).disks
@@ -43,7 +43,7 @@ class Test_defender_domain_dkim_enabled:
== "DKIM is enabled for domain with ID domain1."
)
assert result[0].resource == defender_client.dkim_configurations[0].dict()
assert result[0].resource_name == "DKIM Configuration"
assert result[0].resource_name == "domain1"
assert result[0].resource_id == "domain1"
assert result[0].location == "global"
@@ -86,7 +86,7 @@ class Test_defender_domain_dkim_enabled:
== "DKIM is not enabled for domain with ID domain2."
)
assert result[0].resource == defender_client.dkim_configurations[0].dict()
assert result[0].resource_name == "DKIM Configuration"
assert result[0].resource_name == "domain2"
assert result[0].resource_id == "domain2"
assert result[0].location == "global"
+2
View File
@@ -12,6 +12,7 @@ All notable changes to the **Prowler UI** are documented in this file.
- Navigation link in Scans view to access Compliance Overview [(#8251)](https://github.com/prowler-cloud/prowler/pull/8251)
- Status column for findings table in the Compliance Detail view [(#8244)](https://github.com/prowler-cloud/prowler/pull/8244)
- Allow to restrict routes access based on user permissions [(#8287)](https://github.com/prowler-cloud/prowler/pull/8287)
- Max character limit validation for Scan label [(#8319)](https://github.com/prowler-cloud/prowler/pull/8319)
### Security
@@ -21,6 +22,7 @@ All notable changes to the **Prowler UI** are documented in this file.
- Upgrade to Next.js 14.2.30 and lock TypeScript to 5.5.4 for ESLint compatibility [(#8189)](https://github.com/prowler-cloud/prowler/pull/8189)
- Improved active step highlighting and updated step titles and descriptions in the Cloud Provider credentials update flow [(#8303)](https://github.com/prowler-cloud/prowler/pull/8303)
- Refactored all existing links across the app to use new custom-link component for consistent styling [(#8341)](https://github.com/prowler-cloud/prowler/pull/8341)
### 🐞 Fixed
+3 -3
View File
@@ -1,10 +1,10 @@
"use client";
import Link from "next/link";
import { useEffect } from "react";
import { RocketIcon } from "@/components/icons";
import { Alert, AlertDescription, AlertTitle } from "@/components/ui";
import { CustomLink } from "@/components/ui/custom/custom-link";
export default function Error({
error,
@@ -27,9 +27,9 @@ export default function Error({
We&apos;re sorry for the inconvenience. Please try again or contact
support if the problem persists.
</AlertDescription>
<Link href="/" className="font-bold">
<CustomLink href="/" target="_self" className="font-bold">
Go to the homepage
</Link>
</CustomLink>
</Alert>
);
}
+14 -18
View File
@@ -2,7 +2,7 @@
import { zodResolver } from "@hookform/resolvers/zod";
import { Icon } from "@iconify/react";
import { Button, Checkbox, Divider, Link, Tooltip } from "@nextui-org/react";
import { Button, Checkbox, Divider, Tooltip } from "@nextui-org/react";
import { useRouter, useSearchParams } from "next/navigation";
import { useEffect } from "react";
import { useForm } from "react-hook-form";
@@ -15,6 +15,7 @@ import { NotificationIcon, ProwlerExtended } from "@/components/icons";
import { ThemeSwitch } from "@/components/ThemeSwitch";
import { useToast } from "@/components/ui";
import { CustomButton, CustomInput } from "@/components/ui/custom";
import { CustomLink } from "@/components/ui/custom/custom-link";
import {
Form,
FormControl,
@@ -301,13 +302,12 @@ export const AuthForm = ({
onChange={(e) => field.onChange(e.target.checked)}
>
I agree with the&nbsp;
<Link
<CustomLink
href="https://prowler.com/terms-of-service/"
size="sm"
target="_blank"
>
Terms of Service
</Link>
</CustomLink>
&nbsp;of Prowler
</Checkbox>
</FormControl>
@@ -359,13 +359,9 @@ export const AuthForm = ({
content={
<div className="flex-inline text-small">
Social Login with Google is not enabled.{" "}
<Link
target="_blank"
rel="noopener noreferrer"
className="text-xs font-medium text-primary"
>
<CustomLink href="https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/prowler-app-social-login/#google-oauth-configuration">
Read the docs
</Link>
</CustomLink>
</div>
}
placement="right-start"
@@ -392,13 +388,9 @@ export const AuthForm = ({
content={
<div className="flex-inline text-small">
Social Login with Github is not enabled.{" "}
<Link
target="_blank"
rel="noopener noreferrer"
className="text-xs font-medium text-primary"
>
<CustomLink href="https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/prowler-app-social-login/#github-oauth-configuration">
Read the docs
</Link>
</CustomLink>
</div>
}
placement="right-start"
@@ -451,12 +443,16 @@ export const AuthForm = ({
{type === "sign-in" ? (
<p className="text-center text-small">
Need to create an account?&nbsp;
<Link href="/sign-up">Sign up</Link>
<CustomLink size="base" href="/sign-up" target="_self">
Sign up
</CustomLink>
</p>
) : (
<p className="text-center text-small">
Already have an account?&nbsp;
<Link href="/sign-in">Log in</Link>
<CustomLink size="base" href="/sign-in" target="_self">
Log in
</CustomLink>
</p>
)}
</div>
@@ -1,3 +1,4 @@
import { CustomLink } from "@/components/ui/custom/custom-link";
import { SeverityBadge } from "@/components/ui/table";
import { Requirement } from "@/types/compliance";
@@ -7,7 +8,6 @@ import {
ComplianceDetailContainer,
ComplianceDetailSection,
ComplianceDetailText,
ComplianceLink,
} from "./shared-components";
export const AWSWellArchitectedCustomDetails = ({
@@ -75,11 +75,9 @@ export const AWSWellArchitectedCustomDetails = ({
{requirement.implementation_guidance_url && (
<ComplianceDetailSection title="Implementation Guidance">
<ComplianceLink
href={requirement.implementation_guidance_url as string}
>
<CustomLink href={requirement.implementation_guidance_url as string}>
{requirement.implementation_guidance_url as string}
</ComplianceLink>
</CustomLink>
</ComplianceDetailSection>
)}
</ComplianceDetailContainer>
@@ -1,5 +1,6 @@
import ReactMarkdown from "react-markdown";
import { CustomLink } from "@/components/ui/custom/custom-link";
import { Requirement } from "@/types/compliance";
import {
@@ -8,7 +9,6 @@ import {
ComplianceDetailContainer,
ComplianceDetailSection,
ComplianceDetailText,
ComplianceLink,
} from "./shared-components";
interface CISDetailsProps {
@@ -121,7 +121,7 @@ export const CISCustomDetails = ({ requirement }: CISDetailsProps) => {
{processReferences(requirement.references).map(
(url: string, index: number) => (
<div key={index}>
<ComplianceLink href={url}>{url}</ComplianceLink>
<CustomLink href={url}>{url}</CustomLink>
</div>
),
)}
@@ -1,3 +1,4 @@
import { CustomLink } from "@/components/ui/custom/custom-link";
import { Requirement } from "@/types/compliance";
import {
@@ -7,7 +8,6 @@ import {
ComplianceDetailContainer,
ComplianceDetailSection,
ComplianceDetailText,
ComplianceLink,
} from "./shared-components";
export const MITRECustomDetails = ({
@@ -63,9 +63,9 @@ export const MITRECustomDetails = ({
{requirement.technique_url && (
<ComplianceDetailSection title="MITRE ATT&CK Reference">
<ComplianceLink href={requirement.technique_url as string}>
<CustomLink href={requirement.technique_url as string}>
{requirement.technique_url as string}
</ComplianceLink>
</CustomLink>
</ComplianceDetailSection>
)}
@@ -1,26 +1,5 @@
import Link from "next/link";
import { cn } from "@/lib/utils";
export const ComplianceLink = ({
href,
children,
}: {
href: string;
children: React.ReactNode;
}) => {
return (
<Link
href={href}
target="_blank"
rel="noopener noreferrer"
className="break-all text-sm text-blue-600 decoration-1 transition-colors hover:text-blue-800 dark:text-blue-400 dark:hover:text-blue-300"
>
{children}
</Link>
);
};
export const ComplianceDetailContainer = ({
children,
}: {
@@ -1,10 +1,10 @@
"use client";
import { Snippet } from "@nextui-org/react";
import Link from "next/link";
import { CodeSnippet } from "@/components/ui/code-snippet/code-snippet";
import { CustomSection } from "@/components/ui/custom";
import { CustomLink } from "@/components/ui/custom/custom-link";
import { EntityInfoShort, InfoField } from "@/components/ui/entities";
import { DateWithTime } from "@/components/ui/entities/date-with-time";
import { SeverityBadge } from "@/components/ui/table/severity-badge";
@@ -151,15 +151,14 @@ export const FindingDetail = ({
{attributes.check_metadata.remediation.recommendation.text}
</p>
{attributes.check_metadata.remediation.recommendation.url && (
<Link
<CustomLink
href={
attributes.check_metadata.remediation.recommendation.url
}
target="_blank"
className="text-sm text-blue-500 hover:underline"
size="sm"
>
Learn more
</Link>
</CustomLink>
)}
</div>
</InfoField>
@@ -179,13 +178,12 @@ export const FindingDetail = ({
{/* Additional Resources section */}
{attributes.check_metadata.remediation.code.other && (
<InfoField label="Additional Resources">
<Link
<CustomLink
href={attributes.check_metadata.remediation.code.other}
target="_blank"
className="text-sm text-blue-500 hover:underline"
size="sm"
>
View documentation
</Link>
</CustomLink>
</InfoField>
)}
</div>
@@ -1,6 +1,5 @@
"use client";
import Link from "next/link";
import { Dispatch, SetStateAction, useEffect, useRef, useState } from "react";
import { useFormState } from "react-dom";
import { z } from "zod";
@@ -9,6 +8,7 @@ import { createSamlConfig, updateSamlConfig } from "@/actions/integrations";
import { AddIcon } from "@/components/icons";
import { useToast } from "@/components/ui";
import { CustomButton, CustomServerInput } from "@/components/ui/custom";
import { CustomLink } from "@/components/ui/custom/custom-link";
import { SnippetChip } from "@/components/ui/entities";
import { FormButtons } from "@/components/ui/form";
import { apiBaseUrl } from "@/lib";
@@ -239,9 +239,9 @@ export const SamlConfigForm = ({
assign the user&apos;s role. If the role does not exist, one will
be created with minimal permissions. You can assign permissions to
roles on the{" "}
<Link href="/roles">
<span className="underline">Roles</span>
</Link>{" "}
<CustomLink href="/roles" target="_self">
<span>Roles</span>
</CustomLink>{" "}
page.
</p>
</div>

Some files were not shown because too many files have changed in this diff Show More