Compare commits
117 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f53ae7f62 | ||
|
|
6cd1bcaca8 | ||
|
|
3fbff8c8cd | ||
|
|
c96e8eeeb1 | ||
|
|
953d518bf3 | ||
|
|
8cdcbc7e60 | ||
|
|
e969b24652 | ||
|
|
ec54a00f1d | ||
|
|
05eba69058 | ||
|
|
ff900a2a45 | ||
|
|
a41663fb0d | ||
|
|
033e9fd58c | ||
|
|
240b02b498 | ||
|
|
87eb2dfdf7 | ||
|
|
b4d8d64f0e | ||
|
|
7944ebe83a | ||
|
|
bd138114c9 | ||
|
|
d527a3f12b | ||
|
|
260fada3eb | ||
|
|
0ee0fc082a | ||
|
|
9d66d86f66 | ||
|
|
825e53c38f | ||
|
|
196c17d44d | ||
|
|
fc69e195e4 | ||
|
|
5f53a9ec6f | ||
|
|
5e72a40898 | ||
|
|
496ada3cba | ||
|
|
481a43f3f6 | ||
|
|
58298706d4 | ||
|
|
e75a760da0 | ||
|
|
c313757ef2 | ||
|
|
284678fe48 | ||
|
|
c3d25e6f39 | ||
|
|
a9d16bbbce | ||
|
|
92bc992e7f | ||
|
|
903e4f8b9f | ||
|
|
2c09076f91 | ||
|
|
3d4902b057 | ||
|
|
b30eab7935 | ||
|
|
cf8402e013 | ||
|
|
af8fbaf2cd | ||
|
|
c748e57878 | ||
|
|
a5187c6a42 | ||
|
|
e19ed30ac7 | ||
|
|
96ce1461b9 | ||
|
|
9da5fb67c3 | ||
|
|
eb1c1791e4 | ||
|
|
581afd38e6 | ||
|
|
19a735aafe | ||
|
|
2170fbb1ab | ||
|
|
90c6c6b98d | ||
|
|
02b416b4f8 | ||
|
|
1022b5e413 | ||
|
|
d1bad9d9ab | ||
|
|
178f3850be | ||
|
|
d239d299e2 | ||
|
|
88fae9ecae | ||
|
|
a3bff9705c | ||
|
|
75989b09d7 | ||
|
|
9a622f60fe | ||
|
|
7cd1966066 | ||
|
|
77e59203ae | ||
|
|
0a449c7e13 | ||
|
|
163fbaff19 | ||
|
|
7ec514d9dd | ||
|
|
b63f70ac82 | ||
|
|
2c86b3a990 | ||
|
|
12443f7cbb | ||
|
|
3a8c635b75 | ||
|
|
8bc6e8b7ab | ||
|
|
9ca1899ebf | ||
|
|
1bdcf2c7f1 | ||
|
|
92a804bf88 | ||
|
|
f85ad9a7a2 | ||
|
|
308c778bad | ||
|
|
ee06d3a68a | ||
|
|
8dc4bd0be8 | ||
|
|
bf9e38dc5c | ||
|
|
a85b89ffb5 | ||
|
|
87da11b712 | ||
|
|
8b57f178e0 | ||
|
|
7830ed8b9f | ||
|
|
d4e66c4a6f | ||
|
|
1cfe610d47 | ||
|
|
d9a9236ab7 | ||
|
|
285aea3458 | ||
|
|
b051aeeb64 | ||
|
|
b99dce6a43 | ||
|
|
04749c1da1 | ||
|
|
44d70f8467 | ||
|
|
95791a9909 | ||
|
|
ad0b8a4208 | ||
|
|
5669a42039 | ||
|
|
83b328ea92 | ||
|
|
a6c88c0d9e | ||
|
|
922f9d2f91 | ||
|
|
a69d0d16c0 | ||
|
|
676cc44fe2 | ||
|
|
3840e40870 | ||
|
|
ab2d57554a | ||
|
|
cbb5b21e6c | ||
|
|
1efd5668ce | ||
|
|
ca86aeb1d7 | ||
|
|
4f2a8b71bb | ||
|
|
3b0cb3db85 | ||
|
|
00c527ff79 | ||
|
|
ab348d5752 | ||
|
|
dd713351dc | ||
|
|
fa722f1dc7 | ||
|
|
b0cc3978d0 | ||
|
|
aa843b823c | ||
|
|
020edc0d1d | ||
|
|
036da81bbd | ||
|
|
4428bcb2c0 | ||
|
|
21de9a2f6f | ||
|
|
231d933b9e | ||
|
|
2ad360a7f9 |
4
.env
@@ -10,6 +10,8 @@ NEXT_PUBLIC_API_BASE_URL=${API_BASE_URL}
|
||||
NEXT_PUBLIC_API_DOCS_URL=http://prowler-api:8080/api/v1/docs
|
||||
AUTH_TRUST_HOST=true
|
||||
UI_PORT=3000
|
||||
# Temp URL for feeds need to use actual
|
||||
RSS_FEED_URL=https://prowler.com/blog/rss
|
||||
# openssl rand -base64 32
|
||||
AUTH_SECRET="N/c6mnaS5+SWq81+819OrzQZlmx1Vxtp/orjttJSmw8="
|
||||
# Google Tag Manager ID
|
||||
@@ -131,7 +133,7 @@ SENTRY_ENVIRONMENT=local
|
||||
SENTRY_RELEASE=local
|
||||
|
||||
#### Prowler release version ####
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.7.5
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.10.0
|
||||
|
||||
# Social login credentials
|
||||
SOCIAL_GOOGLE_OAUTH_CALLBACK_URL="${AUTH_URL}/api/auth/callback/google"
|
||||
|
||||
5
.github/labeler.yml
vendored
@@ -22,6 +22,11 @@ provider/kubernetes:
|
||||
- any-glob-to-any-file: "prowler/providers/kubernetes/**"
|
||||
- any-glob-to-any-file: "tests/providers/kubernetes/**"
|
||||
|
||||
provider/m365:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: "prowler/providers/m365/**"
|
||||
- any-glob-to-any-file: "tests/providers/m365/**"
|
||||
|
||||
provider/github:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: "prowler/providers/github/**"
|
||||
|
||||
4
.github/workflows/api-codeql.yml
vendored
@@ -48,12 +48,12 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
config-file: ./.github/codeql/api-codeql-config.yml
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
|
||||
3
.github/workflows/api-pull-request.yml
vendored
@@ -164,8 +164,9 @@ jobs:
|
||||
working-directory: ./api
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
# 76352, 76353, 77323 come from SDK, but they cannot upgrade it yet. It does not affect API
|
||||
# TODO: Botocore needs urllib3 1.X so we need to ignore these vulnerabilities 77744,77745. Remove this once we upgrade to urllib3 2.X
|
||||
run: |
|
||||
poetry run safety check --ignore 70612,66963,74429,76352,76353,77323
|
||||
poetry run safety check --ignore 70612,66963,74429,76352,76353,77323,77744,77745
|
||||
|
||||
- name: Vulture
|
||||
working-directory: ./api
|
||||
|
||||
2
.github/workflows/find-secrets.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: TruffleHog OSS
|
||||
uses: trufflesecurity/trufflehog@6641d4ba5b684fffe195b9820345de1bf19f3181 # v3.89.2
|
||||
uses: trufflesecurity/trufflehog@a05cf0859455b5b16317ee22d809887a4043cdf0 # v3.90.2
|
||||
with:
|
||||
path: ./
|
||||
base: ${{ github.event.repository.default_branch }}
|
||||
|
||||
@@ -19,11 +19,28 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
python3 -m pip install --user poetry
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config --global user.name "prowler-bot"
|
||||
git config --global user.email "179230569+prowler-bot@users.noreply.github.com"
|
||||
|
||||
- name: Parse version and determine branch
|
||||
run: |
|
||||
@@ -107,11 +124,12 @@ jobs:
|
||||
echo "✓ api/pyproject.toml version: $CURRENT_API_VERSION"
|
||||
|
||||
- name: Verify prowler dependency in api/pyproject.toml
|
||||
if: ${{ env.PATCH_VERSION != '0' }}
|
||||
run: |
|
||||
CURRENT_PROWLER_REF=$(grep 'prowler @ git+https://github.com/prowler-cloud/prowler.git@' api/pyproject.toml | sed -E 's/.*@([^"]+)".*/\1/' | tr -d '[:space:]')
|
||||
PROWLER_VERSION_TRIMMED=$(echo "$PROWLER_VERSION" | tr -d '[:space:]')
|
||||
if [ "$CURRENT_PROWLER_REF" != "$PROWLER_VERSION_TRIMMED" ]; then
|
||||
echo "ERROR: Prowler dependency mismatch in api/pyproject.toml (expected: '$PROWLER_VERSION_TRIMMED', found: '$CURRENT_PROWLER_REF')"
|
||||
BRANCH_NAME_TRIMMED=$(echo "$BRANCH_NAME" | tr -d '[:space:]')
|
||||
if [ "$CURRENT_PROWLER_REF" != "$BRANCH_NAME_TRIMMED" ]; then
|
||||
echo "ERROR: Prowler dependency mismatch in api/pyproject.toml (expected: '$BRANCH_NAME_TRIMMED', found: '$CURRENT_PROWLER_REF')"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ api/pyproject.toml prowler dependency: $CURRENT_PROWLER_REF"
|
||||
@@ -135,6 +153,39 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
|
||||
# Push the new branch first so it exists remotely
|
||||
git push origin "$BRANCH_NAME"
|
||||
|
||||
- name: Update prowler dependency in api/pyproject.toml
|
||||
if: ${{ env.PATCH_VERSION == '0' }}
|
||||
run: |
|
||||
CURRENT_PROWLER_REF=$(grep 'prowler @ git+https://github.com/prowler-cloud/prowler.git@' api/pyproject.toml | sed -E 's/.*@([^"]+)".*/\1/' | tr -d '[:space:]')
|
||||
BRANCH_NAME_TRIMMED=$(echo "$BRANCH_NAME" | tr -d '[:space:]')
|
||||
|
||||
# Minor release: update the dependency to use the new branch
|
||||
echo "Minor release detected - updating prowler dependency from '$CURRENT_PROWLER_REF' to '$BRANCH_NAME_TRIMMED'"
|
||||
sed -i "s|prowler @ git+https://github.com/prowler-cloud/prowler.git@[^\"]*\"|prowler @ git+https://github.com/prowler-cloud/prowler.git@$BRANCH_NAME_TRIMMED\"|" api/pyproject.toml
|
||||
|
||||
# Verify the change was made
|
||||
UPDATED_PROWLER_REF=$(grep 'prowler @ git+https://github.com/prowler-cloud/prowler.git@' api/pyproject.toml | sed -E 's/.*@([^"]+)".*/\1/' | tr -d '[:space:]')
|
||||
if [ "$UPDATED_PROWLER_REF" != "$BRANCH_NAME_TRIMMED" ]; then
|
||||
echo "ERROR: Failed to update prowler dependency in api/pyproject.toml"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Update poetry lock file
|
||||
echo "Updating poetry.lock file..."
|
||||
cd api
|
||||
poetry lock
|
||||
cd ..
|
||||
|
||||
# Commit and push the changes
|
||||
git add api/pyproject.toml api/poetry.lock
|
||||
git commit -m "chore(api): update prowler dependency to $BRANCH_NAME_TRIMMED for release $PROWLER_VERSION"
|
||||
git push origin "$BRANCH_NAME"
|
||||
|
||||
echo "✓ api/pyproject.toml prowler dependency updated to: $UPDATED_PROWLER_REF"
|
||||
|
||||
- name: Extract changelog entries
|
||||
run: |
|
||||
|
||||
@@ -55,29 +55,20 @@ jobs:
|
||||
comment-author: 'github-actions[bot]'
|
||||
body-includes: '<!-- changelog-check -->'
|
||||
|
||||
- name: Comment on PR if changelog is missing
|
||||
if: github.event.pull_request.head.repo.full_name == github.repository && steps.check_folders.outputs.missing_changelogs != ''
|
||||
- name: Update PR comment with changelog status
|
||||
if: github.event.pull_request.head.repo.full_name == github.repository
|
||||
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
comment-id: ${{ steps.find_comment.outputs.comment-id }}
|
||||
edit-mode: replace
|
||||
body: |
|
||||
<!-- changelog-check -->
|
||||
⚠️ **Changes detected in the following folders without a corresponding update to the `CHANGELOG.md`:**
|
||||
${{ steps.check_folders.outputs.missing_changelogs != '' && format('⚠️ **Changes detected in the following folders without a corresponding update to the `CHANGELOG.md`:**
|
||||
|
||||
${{ steps.check_folders.outputs.missing_changelogs }}
|
||||
{0}
|
||||
|
||||
Please add an entry to the corresponding `CHANGELOG.md` file to maintain a clear history of changes.
|
||||
|
||||
- name: Comment on PR if all changelogs are present
|
||||
if: github.event.pull_request.head.repo.full_name == github.repository && steps.check_folders.outputs.missing_changelogs == ''
|
||||
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
comment-id: ${{ steps.find_comment.outputs.comment-id }}
|
||||
body: |
|
||||
<!-- changelog-check -->
|
||||
✅ All necessary `CHANGELOG.md` files have been updated. Great job! 🎉
|
||||
Please add an entry to the corresponding `CHANGELOG.md` file to maintain a clear history of changes.', steps.check_folders.outputs.missing_changelogs) || '✅ All necessary `CHANGELOG.md` files have been updated. Great job! 🎉' }}
|
||||
|
||||
- name: Fail if changelog is missing
|
||||
if: steps.check_folders.outputs.missing_changelogs != ''
|
||||
|
||||
17
.github/workflows/pull-request-merged.yml
vendored
@@ -27,11 +27,12 @@ jobs:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.CLOUD_DISPATCH }}
|
||||
event-type: prowler-pull-request-merged
|
||||
client-payload: '{
|
||||
"PROWLER_COMMIT_SHA": "${{ github.event.pull_request.merge_commit_sha }}",
|
||||
"PROWLER_COMMIT_SHORT_SHA": "${{ env.SHORT_SHA }}",
|
||||
"PROWLER_PR_TITLE": "${{ github.event.pull_request.title }}",
|
||||
"PROWLER_PR_LABELS": ${{ toJson(github.event.pull_request.labels.*.name) }},
|
||||
"PROWLER_PR_BODY": ${{ toJson(github.event.pull_request.body) }},
|
||||
"PROWLER_PR_URL":${{ toJson(github.event.pull_request.html_url) }}
|
||||
}'
|
||||
client-payload: |
|
||||
{
|
||||
"PROWLER_COMMIT_SHA": "${{ github.event.pull_request.merge_commit_sha }}",
|
||||
"PROWLER_COMMIT_SHORT_SHA": "${{ env.SHORT_SHA }}",
|
||||
"PROWLER_PR_TITLE": ${{ toJson(github.event.pull_request.title) }},
|
||||
"PROWLER_PR_LABELS": ${{ toJson(github.event.pull_request.labels.*.name) }},
|
||||
"PROWLER_PR_BODY": ${{ toJson(github.event.pull_request.body) }},
|
||||
"PROWLER_PR_URL": ${{ toJson(github.event.pull_request.html_url) }}
|
||||
}
|
||||
|
||||
1
.github/workflows/sdk-bump-version.yml
vendored
@@ -12,7 +12,6 @@ env:
|
||||
jobs:
|
||||
bump-version:
|
||||
name: Bump Version
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
4
.github/workflows/sdk-codeql.yml
vendored
@@ -56,12 +56,12 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
config-file: ./.github/codeql/sdk-codeql-config.yml
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
|
||||
4
.github/workflows/ui-codeql.yml
vendored
@@ -48,12 +48,12 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
config-file: ./.github/codeql/ui-codeql-config.yml
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
|
||||
98
.github/workflows/ui-e2e-tests.yml
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
name: UI - E2E Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- "v5.*"
|
||||
paths:
|
||||
- '.github/workflows/ui-e2e-tests.yml'
|
||||
- 'ui/**'
|
||||
|
||||
jobs:
|
||||
e2e-tests:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
AUTH_SECRET: 'fallback-ci-secret-for-testing'
|
||||
AUTH_TRUST_HOST: true
|
||||
NEXTAUTH_URL: 'http://localhost:3000'
|
||||
NEXT_PUBLIC_API_BASE_URL: 'http://localhost:8080/api/v1'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Start API services
|
||||
run: |
|
||||
# Override docker-compose image tag to use latest instead of stable
|
||||
# This overrides any PROWLER_API_VERSION set in .env file
|
||||
export PROWLER_API_VERSION=latest
|
||||
echo "Using PROWLER_API_VERSION=${PROWLER_API_VERSION}"
|
||||
docker compose up -d api worker worker-beat
|
||||
- name: Wait for API to be ready
|
||||
run: |
|
||||
echo "Waiting for prowler-api..."
|
||||
timeout=150 # 5 minutes max
|
||||
elapsed=0
|
||||
while [ $elapsed -lt $timeout ]; do
|
||||
if curl -s ${NEXT_PUBLIC_API_BASE_URL}/docs >/dev/null 2>&1; then
|
||||
echo "Prowler API is ready!"
|
||||
exit 0
|
||||
fi
|
||||
echo "Waiting for prowler-api... (${elapsed}s elapsed)"
|
||||
sleep 5
|
||||
elapsed=$((elapsed + 5))
|
||||
done
|
||||
echo "Timeout waiting for prowler-api to start"
|
||||
exit 1
|
||||
- name: Load database fixtures for E2E tests
|
||||
run: |
|
||||
docker compose exec -T api sh -c '
|
||||
echo "Loading all fixtures from api/fixtures/dev/..."
|
||||
for fixture in api/fixtures/dev/*.json; do
|
||||
if [ -f "$fixture" ]; then
|
||||
echo "Loading $fixture"
|
||||
poetry run python manage.py loaddata "$fixture" --database admin
|
||||
fi
|
||||
done
|
||||
echo "All database fixtures loaded successfully!"
|
||||
'
|
||||
- name: Setup Node.js environment
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
with:
|
||||
node-version: '20.x'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: './ui/package-lock.json'
|
||||
- name: Install UI dependencies
|
||||
working-directory: ./ui
|
||||
run: npm ci
|
||||
- name: Build UI application
|
||||
working-directory: ./ui
|
||||
run: npm run build
|
||||
- name: Cache Playwright browsers
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
id: playwright-cache
|
||||
with:
|
||||
path: ~/.cache/ms-playwright
|
||||
key: ${{ runner.os }}-playwright-${{ hashFiles('ui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-playwright-
|
||||
- name: Install Playwright browsers
|
||||
working-directory: ./ui
|
||||
if: steps.playwright-cache.outputs.cache-hit != 'true'
|
||||
run: npm run test:e2e:install
|
||||
- name: Run E2E tests
|
||||
working-directory: ./ui
|
||||
run: npm run test:e2e
|
||||
- name: Upload test reports
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
|
||||
if: failure()
|
||||
with:
|
||||
name: playwright-report
|
||||
path: ui/playwright-report/
|
||||
retention-days: 30
|
||||
- name: Cleanup services
|
||||
if: always()
|
||||
run: |
|
||||
echo "Shutting down services..."
|
||||
docker compose down -v || true
|
||||
echo "Cleanup completed"
|
||||
46
.github/workflows/ui-pull-request.yml
vendored
@@ -46,52 +46,6 @@ jobs:
|
||||
working-directory: ./ui
|
||||
run: npm run build
|
||||
|
||||
e2e-tests:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
AUTH_SECRET: 'fallback-ci-secret-for-testing'
|
||||
AUTH_TRUST_HOST: true
|
||||
NEXTAUTH_URL: http://localhost:3000
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
with:
|
||||
node-version: '20.x'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: './ui/package-lock.json'
|
||||
- name: Install dependencies
|
||||
working-directory: ./ui
|
||||
run: npm ci
|
||||
- name: Cache Playwright browsers
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
id: playwright-cache
|
||||
with:
|
||||
path: ~/.cache/ms-playwright
|
||||
key: ${{ runner.os }}-playwright-${{ hashFiles('ui/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-playwright-
|
||||
- name: Install Playwright browsers
|
||||
working-directory: ./ui
|
||||
if: steps.playwright-cache.outputs.cache-hit != 'true'
|
||||
run: npm run test:e2e:install
|
||||
- name: Build the application
|
||||
working-directory: ./ui
|
||||
run: npm run build
|
||||
- name: Run Playwright tests
|
||||
working-directory: ./ui
|
||||
run: npm run test:e2e
|
||||
- name: Upload Playwright report
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
|
||||
if: failure()
|
||||
with:
|
||||
name: playwright-report
|
||||
path: ui/playwright-report/
|
||||
retention-days: 30
|
||||
|
||||
test-container-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
@@ -115,7 +115,8 @@ repos:
|
||||
- id: safety
|
||||
name: safety
|
||||
description: "Safety is a tool that checks your installed dependencies for known security vulnerabilities"
|
||||
entry: bash -c 'safety check --ignore 70612,66963,74429,76352,76353'
|
||||
# TODO: Botocore needs urllib3 1.X so we need to ignore these vulnerabilities 77744,77745. Remove this once we upgrade to urllib3 2.X
|
||||
entry: bash -c 'safety check --ignore 70612,66963,74429,76352,76353,77744,77745'
|
||||
language: system
|
||||
|
||||
- id: vulture
|
||||
|
||||
@@ -88,7 +88,7 @@ prowler dashboard
|
||||
|---|---|---|---|---|
|
||||
| AWS | 567 | 82 | 36 | 10 |
|
||||
| GCP | 79 | 13 | 10 | 3 |
|
||||
| Azure | 142 | 18 | 10 | 3 |
|
||||
| Azure | 142 | 18 | 11 | 3 |
|
||||
| Kubernetes | 83 | 7 | 5 | 7 |
|
||||
| GitHub | 16 | 2 | 1 | 0 |
|
||||
| M365 | 69 | 7 | 3 | 2 |
|
||||
|
||||
@@ -2,6 +2,24 @@
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
## [1.11.0] (Prowler 5.10.0)
|
||||
|
||||
### Added
|
||||
- Github provider support [(#8271)](https://github.com/prowler-cloud/prowler/pull/8271)
|
||||
- Integration with Amazon S3, enabling storage and retrieval of scan data via S3 buckets [(#8056)](https://github.com/prowler-cloud/prowler/pull/8056)
|
||||
|
||||
### Fixed
|
||||
- Avoid sending errors to Sentry in M365 provider when user authentication fails [(#8420)](https://github.com/prowler-cloud/prowler/pull/8420)
|
||||
|
||||
---
|
||||
|
||||
## [1.10.2] (Prowler v5.9.2)
|
||||
|
||||
### Changed
|
||||
- Optimized queries for resources views [(#8336)](https://github.com/prowler-cloud/prowler/pull/8336)
|
||||
|
||||
---
|
||||
|
||||
## [v1.10.1] (Prowler v5.9.1)
|
||||
|
||||
### Fixed
|
||||
|
||||
@@ -44,6 +44,9 @@ USER prowler
|
||||
|
||||
WORKDIR /home/prowler
|
||||
|
||||
# Ensure output directory exists
|
||||
RUN mkdir -p /tmp/prowler_api_output
|
||||
|
||||
COPY pyproject.toml ./
|
||||
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
|
||||
@@ -32,7 +32,7 @@ start_prod_server() {
|
||||
|
||||
start_worker() {
|
||||
echo "Starting the worker..."
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans,scan-reports,deletion,backfill,overview -E --max-tasks-per-child 1
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans,scan-reports,deletion,backfill,overview,integrations -E --max-tasks-per-child 1
|
||||
}
|
||||
|
||||
start_worker_beat() {
|
||||
|
||||
1186
api/poetry.lock
generated
@@ -24,7 +24,7 @@ dependencies = [
|
||||
"drf-spectacular-jsonapi==0.5.1",
|
||||
"gunicorn==23.0.0",
|
||||
"lxml==5.3.2",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@v5.9",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@v5.10",
|
||||
"psycopg2-binary==2.9.9",
|
||||
"pytest-celery[redis] (>=1.0.1,<2.0.0)",
|
||||
"sentry-sdk[django] (>=2.20.0,<3.0.0)",
|
||||
@@ -38,7 +38,7 @@ name = "prowler-api"
|
||||
package-mode = false
|
||||
# Needed for the SDK compatibility
|
||||
requires-python = ">=3.11,<3.13"
|
||||
version = "1.10.1"
|
||||
version = "1.11.1"
|
||||
|
||||
[project.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
|
||||
@@ -24,5 +24,18 @@
|
||||
"is_active": true,
|
||||
"date_joined": "2024-09-18T09:04:20.850Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.user",
|
||||
"pk": "6d4f8a91-3c2e-4b5a-8f7d-1e9c5b2a4d6f",
|
||||
"fields": {
|
||||
"password": "pbkdf2_sha256$870000$Z63pGJ7nre48hfcGbk5S0O$rQpKczAmijs96xa+gPVJifpT3Fetb8DOusl5Eq6gxac=",
|
||||
"last_login": null,
|
||||
"name": "E2E Test User",
|
||||
"email": "e2e@prowler.com",
|
||||
"company_name": "Prowler E2E Tests",
|
||||
"is_active": true,
|
||||
"date_joined": "2024-01-01T00:00:00.850Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -46,5 +46,24 @@
|
||||
"role": "member",
|
||||
"date_joined": "2024-09-19T11:03:59.712Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.tenant",
|
||||
"pk": "7c8f94a3-e2d1-4b3a-9f87-2c4d5e6f1a2b",
|
||||
"fields": {
|
||||
"inserted_at": "2024-01-01T00:00:00Z",
|
||||
"updated_at": "2024-01-01T00:00:00Z",
|
||||
"name": "E2E Test Tenant"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.membership",
|
||||
"pk": "9b1a2c3d-4e5f-6789-abc1-23456789def0",
|
||||
"fields": {
|
||||
"user": "6d4f8a91-3c2e-4b5a-8f7d-1e9c5b2a4d6f",
|
||||
"tenant": "7c8f94a3-e2d1-4b3a-9f87-2c4d5e6f1a2b",
|
||||
"role": "owner",
|
||||
"date_joined": "2024-01-01T00:00:00.000Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -149,5 +149,32 @@
|
||||
"user": "8b38e2eb-6689-4f1e-a4ba-95b275130200",
|
||||
"inserted_at": "2024-11-20T15:36:14.302Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.role",
|
||||
"pk": "a5b6c7d8-9e0f-1234-5678-90abcdef1234",
|
||||
"fields": {
|
||||
"tenant": "7c8f94a3-e2d1-4b3a-9f87-2c4d5e6f1a2b",
|
||||
"name": "e2e_admin",
|
||||
"manage_users": true,
|
||||
"manage_account": true,
|
||||
"manage_billing": true,
|
||||
"manage_providers": true,
|
||||
"manage_integrations": true,
|
||||
"manage_scans": true,
|
||||
"unlimited_visibility": true,
|
||||
"inserted_at": "2024-01-01T00:00:00.000Z",
|
||||
"updated_at": "2024-01-01T00:00:00.000Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.userrolerelationship",
|
||||
"pk": "f1e2d3c4-b5a6-9876-5432-10fedcba9876",
|
||||
"fields": {
|
||||
"tenant": "7c8f94a3-e2d1-4b3a-9f87-2c4d5e6f1a2b",
|
||||
"role": "a5b6c7d8-9e0f-1234-5678-90abcdef1234",
|
||||
"user": "6d4f8a91-3c2e-4b5a-8f7d-1e9c5b2a4d6f",
|
||||
"inserted_at": "2024-01-01T00:00:00.000Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
from functools import partial
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from api.db_utils import create_index_on_partitions, drop_index_on_partitions
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0039_resource_resources_failed_findings_idx"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="resource_finding_mappings",
|
||||
index_name="rfm_tenant_resource_idx",
|
||||
columns="tenant_id, resource_id",
|
||||
method="BTREE",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="resource_finding_mappings",
|
||||
index_name="rfm_tenant_resource_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0040_rfm_tenant_resource_index_partitions"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddIndex(
|
||||
model_name="resourcefindingmapping",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "resource_id"],
|
||||
name="rfm_tenant_resource_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,23 @@
|
||||
from django.contrib.postgres.operations import AddIndexConcurrently
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0041_rfm_tenant_resource_parent_partitions"),
|
||||
("django_celery_beat", "0019_alter_periodictasks_options"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
AddIndexConcurrently(
|
||||
model_name="scan",
|
||||
index=models.Index(
|
||||
condition=models.Q(("state", "completed")),
|
||||
fields=["tenant_id", "provider_id", "-inserted_at"],
|
||||
include=("id",),
|
||||
name="scans_prov_ins_desc_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
33
api/src/backend/api/migrations/0043_github_provider.py
Normal file
@@ -0,0 +1,33 @@
|
||||
# Generated by Django 5.1.7 on 2025-07-09 14:44
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
import api.db_utils
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0042_scan_scans_prov_ins_desc_idx"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name="provider",
|
||||
name="provider",
|
||||
field=api.db_utils.ProviderEnumField(
|
||||
choices=[
|
||||
("aws", "AWS"),
|
||||
("azure", "Azure"),
|
||||
("gcp", "GCP"),
|
||||
("kubernetes", "Kubernetes"),
|
||||
("m365", "M365"),
|
||||
("github", "GitHub"),
|
||||
],
|
||||
default="aws",
|
||||
),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
"ALTER TYPE provider ADD VALUE IF NOT EXISTS 'github';",
|
||||
reverse_sql=migrations.RunSQL.noop,
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,19 @@
|
||||
# Generated by Django 5.1.10 on 2025-07-17 11:52
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0043_github_provider"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddConstraint(
|
||||
model_name="integration",
|
||||
constraint=models.UniqueConstraint(
|
||||
fields=("configuration", "tenant"),
|
||||
name="unique_configuration_per_tenant",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 5.1.10 on 2025-07-21 16:08
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0044_integration_unique_configuration_per_tenant"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name="scan",
|
||||
name="output_location",
|
||||
field=models.CharField(blank=True, max_length=4096, null=True),
|
||||
),
|
||||
]
|
||||
@@ -205,6 +205,7 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
GCP = "gcp", _("GCP")
|
||||
KUBERNETES = "kubernetes", _("Kubernetes")
|
||||
M365 = "m365", _("M365")
|
||||
GITHUB = "github", _("GitHub")
|
||||
|
||||
@staticmethod
|
||||
def validate_aws_uid(value):
|
||||
@@ -265,6 +266,16 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
pointer="/data/attributes/uid",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def validate_github_uid(value):
|
||||
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9-]{0,38}$", value):
|
||||
raise ModelValidationError(
|
||||
detail="GitHub provider ID must be a valid GitHub username or organization name (1-39 characters, "
|
||||
"starting with alphanumeric, containing only alphanumeric characters and hyphens).",
|
||||
code="github-uid",
|
||||
pointer="/data/attributes/uid",
|
||||
)
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
updated_at = models.DateTimeField(auto_now=True, editable=False)
|
||||
@@ -427,7 +438,7 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
scheduler_task = models.ForeignKey(
|
||||
PeriodicTask, on_delete=models.SET_NULL, null=True, blank=True
|
||||
)
|
||||
output_location = models.CharField(blank=True, null=True, max_length=200)
|
||||
output_location = models.CharField(blank=True, null=True, max_length=4096)
|
||||
provider = models.ForeignKey(
|
||||
Provider,
|
||||
on_delete=models.CASCADE,
|
||||
@@ -476,6 +487,13 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
condition=Q(state=StateChoices.COMPLETED),
|
||||
name="scans_prov_state_ins_desc_idx",
|
||||
),
|
||||
# TODO This might replace `scans_prov_state_ins_desc_idx` completely. Review usage
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id", "-inserted_at"],
|
||||
condition=Q(state=StateChoices.COMPLETED),
|
||||
include=["id"],
|
||||
name="scans_prov_ins_desc_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -860,6 +878,10 @@ class ResourceFindingMapping(PostgresPartitionedModel, RowLevelSecurityProtected
|
||||
fields=["tenant_id", "finding_id"],
|
||||
name="rfm_tenant_finding_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "resource_id"],
|
||||
name="rfm_tenant_resource_idx",
|
||||
),
|
||||
]
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
@@ -1324,7 +1346,7 @@ class ScanSummary(RowLevelSecurityProtectedModel):
|
||||
|
||||
class Integration(RowLevelSecurityProtectedModel):
|
||||
class IntegrationChoices(models.TextChoices):
|
||||
S3 = "amazon_s3", _("Amazon S3")
|
||||
AMAZON_S3 = "amazon_s3", _("Amazon S3")
|
||||
AWS_SECURITY_HUB = "aws_security_hub", _("AWS Security Hub")
|
||||
JIRA = "jira", _("JIRA")
|
||||
SLACK = "slack", _("Slack")
|
||||
@@ -1350,6 +1372,10 @@ class Integration(RowLevelSecurityProtectedModel):
|
||||
db_table = "integrations"
|
||||
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=("configuration", "tenant"),
|
||||
name="unique_configuration_per_tenant",
|
||||
),
|
||||
RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_%(class)s",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Prowler API
|
||||
version: 1.10.1
|
||||
version: 1.11.0
|
||||
description: |-
|
||||
Prowler API specification.
|
||||
|
||||
@@ -544,6 +544,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -552,6 +553,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -562,6 +564,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -572,6 +575,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -1061,6 +1065,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -1069,6 +1074,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -1079,6 +1085,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -1089,6 +1096,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -1486,6 +1494,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -1494,6 +1503,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -1504,6 +1514,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -1514,6 +1525,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -1909,6 +1921,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -1917,6 +1930,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -1927,6 +1941,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -1937,6 +1952,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -2320,6 +2336,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -2328,6 +2345,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -2338,6 +2356,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -2348,6 +2367,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -2851,6 +2871,30 @@ paths:
|
||||
responses:
|
||||
'204':
|
||||
description: No response body
|
||||
/api/v1/integrations/{id}/connection:
|
||||
post:
|
||||
operationId: integrations_connection_create
|
||||
description: Try to verify integration connection
|
||||
summary: Check integration connection
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
description: A UUID string identifying this integration.
|
||||
required: true
|
||||
tags:
|
||||
- Integration
|
||||
security:
|
||||
- jwtAuth: []
|
||||
responses:
|
||||
'202':
|
||||
content:
|
||||
application/vnd.api+json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/OpenApiResponseResponse'
|
||||
description: ''
|
||||
/api/v1/invitations/accept:
|
||||
post:
|
||||
operationId: invitations_accept_create
|
||||
@@ -3121,6 +3165,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -3129,6 +3174,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -3139,6 +3185,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -3149,6 +3196,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -3282,6 +3330,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -3290,6 +3339,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -3300,6 +3350,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -3310,6 +3361,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -3459,6 +3511,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -3467,6 +3520,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -3477,6 +3531,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -3487,6 +3542,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -4165,6 +4221,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -4173,6 +4230,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider__in]
|
||||
schema:
|
||||
@@ -4746,6 +4804,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -4754,6 +4813,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -4764,6 +4824,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -4774,6 +4835,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -5105,6 +5167,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -5113,6 +5176,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -5123,6 +5187,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -5133,6 +5198,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -5365,6 +5431,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -5373,6 +5440,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -5383,6 +5451,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -5393,6 +5462,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -5631,6 +5701,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -5639,6 +5710,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -5649,6 +5721,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -5659,6 +5732,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -6457,6 +6531,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -6465,6 +6540,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -6475,6 +6551,7 @@ paths:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- github
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
@@ -6485,6 +6562,7 @@ paths:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -8799,7 +8877,6 @@ components:
|
||||
readOnly: true
|
||||
enabled:
|
||||
type: boolean
|
||||
readOnly: true
|
||||
connected:
|
||||
type: boolean
|
||||
readOnly: true
|
||||
@@ -8831,11 +8908,16 @@ components:
|
||||
description: The name of the S3 bucket where files will be stored.
|
||||
output_directory:
|
||||
type: string
|
||||
description: The directory path within the bucket where files
|
||||
will be saved.
|
||||
description: 'The directory path within the bucket where files
|
||||
will be saved. Optional - defaults to "output" if not provided.
|
||||
Path will be normalized to remove excessive slashes and invalid
|
||||
characters are not allowed (< > : " | ? *). Maximum length is
|
||||
900 characters.'
|
||||
maxLength: 900
|
||||
pattern: ^[^<>:"|?*]+$
|
||||
default: output
|
||||
required:
|
||||
- bucket_name
|
||||
- output_directory
|
||||
credentials:
|
||||
oneOf:
|
||||
- type: object
|
||||
@@ -8940,7 +9022,6 @@ components:
|
||||
readOnly: true
|
||||
enabled:
|
||||
type: boolean
|
||||
readOnly: true
|
||||
connected:
|
||||
type: boolean
|
||||
readOnly: true
|
||||
@@ -8973,11 +9054,16 @@ components:
|
||||
stored.
|
||||
output_directory:
|
||||
type: string
|
||||
description: The directory path within the bucket where files
|
||||
will be saved.
|
||||
description: 'The directory path within the bucket where files
|
||||
will be saved. Optional - defaults to "output" if not provided.
|
||||
Path will be normalized to remove excessive slashes and
|
||||
invalid characters are not allowed (< > : " | ? *). Maximum
|
||||
length is 900 characters.'
|
||||
maxLength: 900
|
||||
pattern: ^[^<>:"|?*]+$
|
||||
default: output
|
||||
required:
|
||||
- bucket_name
|
||||
- output_directory
|
||||
credentials:
|
||||
oneOf:
|
||||
- type: object
|
||||
@@ -9129,11 +9215,16 @@ components:
|
||||
description: The name of the S3 bucket where files will be stored.
|
||||
output_directory:
|
||||
type: string
|
||||
description: The directory path within the bucket where files
|
||||
will be saved.
|
||||
description: 'The directory path within the bucket where files
|
||||
will be saved. Optional - defaults to "output" if not provided.
|
||||
Path will be normalized to remove excessive slashes and invalid
|
||||
characters are not allowed (< > : " | ? *). Maximum length is
|
||||
900 characters.'
|
||||
maxLength: 900
|
||||
pattern: ^[^<>:"|?*]+$
|
||||
default: output
|
||||
required:
|
||||
- bucket_name
|
||||
- output_directory
|
||||
credentials:
|
||||
oneOf:
|
||||
- type: object
|
||||
@@ -10483,11 +10574,16 @@ components:
|
||||
stored.
|
||||
output_directory:
|
||||
type: string
|
||||
description: The directory path within the bucket where files
|
||||
will be saved.
|
||||
description: 'The directory path within the bucket where files
|
||||
will be saved. Optional - defaults to "output" if not provided.
|
||||
Path will be normalized to remove excessive slashes and
|
||||
invalid characters are not allowed (< > : " | ? *). Maximum
|
||||
length is 900 characters.'
|
||||
maxLength: 900
|
||||
pattern: ^[^<>:"|?*]+$
|
||||
default: output
|
||||
required:
|
||||
- bucket_name
|
||||
- output_directory
|
||||
credentials:
|
||||
oneOf:
|
||||
- type: object
|
||||
@@ -11130,6 +11226,34 @@ components:
|
||||
encoded as a string.
|
||||
required:
|
||||
- kubeconfig_content
|
||||
- type: object
|
||||
title: GitHub Personal Access Token
|
||||
properties:
|
||||
personal_access_token:
|
||||
type: string
|
||||
description: GitHub personal access token for authentication.
|
||||
required:
|
||||
- personal_access_token
|
||||
- type: object
|
||||
title: GitHub OAuth App Token
|
||||
properties:
|
||||
oauth_app_token:
|
||||
type: string
|
||||
description: GitHub OAuth App token for authentication.
|
||||
required:
|
||||
- oauth_app_token
|
||||
- type: object
|
||||
title: GitHub App Credentials
|
||||
properties:
|
||||
github_app_id:
|
||||
type: integer
|
||||
description: GitHub App ID for authentication.
|
||||
github_app_key:
|
||||
type: string
|
||||
description: Path to the GitHub App private key file.
|
||||
required:
|
||||
- github_app_id
|
||||
- github_app_key
|
||||
writeOnly: true
|
||||
required:
|
||||
- secret
|
||||
@@ -12035,6 +12159,7 @@ components:
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
- github
|
||||
type: string
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
@@ -12042,6 +12167,7 @@ components:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
uid:
|
||||
type: string
|
||||
title: Unique identifier for the provider, set by the provider
|
||||
@@ -12149,6 +12275,7 @@ components:
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
- github
|
||||
type: string
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
@@ -12156,6 +12283,7 @@ components:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
uid:
|
||||
type: string
|
||||
title: Unique identifier for the provider, set by the provider
|
||||
@@ -12194,6 +12322,7 @@ components:
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
- github
|
||||
type: string
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
@@ -12201,6 +12330,7 @@ components:
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
* `github` - GitHub
|
||||
uid:
|
||||
type: string
|
||||
minLength: 3
|
||||
@@ -12852,6 +12982,34 @@ components:
|
||||
as a string.
|
||||
required:
|
||||
- kubeconfig_content
|
||||
- type: object
|
||||
title: GitHub Personal Access Token
|
||||
properties:
|
||||
personal_access_token:
|
||||
type: string
|
||||
description: GitHub personal access token for authentication.
|
||||
required:
|
||||
- personal_access_token
|
||||
- type: object
|
||||
title: GitHub OAuth App Token
|
||||
properties:
|
||||
oauth_app_token:
|
||||
type: string
|
||||
description: GitHub OAuth App token for authentication.
|
||||
required:
|
||||
- oauth_app_token
|
||||
- type: object
|
||||
title: GitHub App Credentials
|
||||
properties:
|
||||
github_app_id:
|
||||
type: integer
|
||||
description: GitHub App ID for authentication.
|
||||
github_app_key:
|
||||
type: string
|
||||
description: Path to the GitHub App private key file.
|
||||
required:
|
||||
- github_app_id
|
||||
- github_app_key
|
||||
writeOnly: true
|
||||
required:
|
||||
- secret_type
|
||||
@@ -13071,6 +13229,34 @@ components:
|
||||
encoded as a string.
|
||||
required:
|
||||
- kubeconfig_content
|
||||
- type: object
|
||||
title: GitHub Personal Access Token
|
||||
properties:
|
||||
personal_access_token:
|
||||
type: string
|
||||
description: GitHub personal access token for authentication.
|
||||
required:
|
||||
- personal_access_token
|
||||
- type: object
|
||||
title: GitHub OAuth App Token
|
||||
properties:
|
||||
oauth_app_token:
|
||||
type: string
|
||||
description: GitHub OAuth App token for authentication.
|
||||
required:
|
||||
- oauth_app_token
|
||||
- type: object
|
||||
title: GitHub App Credentials
|
||||
properties:
|
||||
github_app_id:
|
||||
type: integer
|
||||
description: GitHub App ID for authentication.
|
||||
github_app_key:
|
||||
type: string
|
||||
description: Path to the GitHub App private key file.
|
||||
required:
|
||||
- github_app_id
|
||||
- github_app_key
|
||||
writeOnly: true
|
||||
required:
|
||||
- secret_type
|
||||
@@ -13305,6 +13491,34 @@ components:
|
||||
as a string.
|
||||
required:
|
||||
- kubeconfig_content
|
||||
- type: object
|
||||
title: GitHub Personal Access Token
|
||||
properties:
|
||||
personal_access_token:
|
||||
type: string
|
||||
description: GitHub personal access token for authentication.
|
||||
required:
|
||||
- personal_access_token
|
||||
- type: object
|
||||
title: GitHub OAuth App Token
|
||||
properties:
|
||||
oauth_app_token:
|
||||
type: string
|
||||
description: GitHub OAuth App token for authentication.
|
||||
required:
|
||||
- oauth_app_token
|
||||
- type: object
|
||||
title: GitHub App Credentials
|
||||
properties:
|
||||
github_app_id:
|
||||
type: integer
|
||||
description: GitHub App ID for authentication.
|
||||
github_app_key:
|
||||
type: string
|
||||
description: Path to the GitHub App private key file.
|
||||
required:
|
||||
- github_app_id
|
||||
- github_app_key
|
||||
writeOnly: true
|
||||
required:
|
||||
- secret
|
||||
|
||||
100
api/src/backend/api/tests/test_serializers.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import pytest
|
||||
from rest_framework.exceptions import ValidationError
|
||||
|
||||
from api.v1.serializer_utils.integrations import S3ConfigSerializer
|
||||
|
||||
|
||||
class TestS3ConfigSerializer:
|
||||
"""Test cases for S3ConfigSerializer validation."""
|
||||
|
||||
def test_validate_output_directory_valid_paths(self):
|
||||
"""Test that valid output directory paths are accepted."""
|
||||
serializer = S3ConfigSerializer()
|
||||
|
||||
# Test normal paths
|
||||
assert serializer.validate_output_directory("test") == "test"
|
||||
assert serializer.validate_output_directory("test/folder") == "test/folder"
|
||||
assert serializer.validate_output_directory("my-folder_123") == "my-folder_123"
|
||||
|
||||
# Test paths with leading slashes (should be normalized)
|
||||
assert serializer.validate_output_directory("/test") == "test"
|
||||
assert serializer.validate_output_directory("/test/folder") == "test/folder"
|
||||
|
||||
# Test paths with excessive slashes (should be normalized)
|
||||
assert serializer.validate_output_directory("///test") == "test"
|
||||
assert serializer.validate_output_directory("///////test") == "test"
|
||||
assert serializer.validate_output_directory("test//folder") == "test/folder"
|
||||
assert serializer.validate_output_directory("test///folder") == "test/folder"
|
||||
|
||||
def test_validate_output_directory_empty_values(self):
|
||||
"""Test that empty values raise validation errors."""
|
||||
serializer = S3ConfigSerializer()
|
||||
|
||||
with pytest.raises(
|
||||
ValidationError, match="Output directory cannot be empty or just"
|
||||
):
|
||||
serializer.validate_output_directory(".")
|
||||
|
||||
with pytest.raises(
|
||||
ValidationError, match="Output directory cannot be empty or just"
|
||||
):
|
||||
serializer.validate_output_directory("/")
|
||||
|
||||
def test_validate_output_directory_invalid_characters(self):
|
||||
"""Test that invalid characters are rejected."""
|
||||
serializer = S3ConfigSerializer()
|
||||
|
||||
invalid_chars = ["<", ">", ":", '"', "|", "?", "*"]
|
||||
|
||||
for char in invalid_chars:
|
||||
with pytest.raises(
|
||||
ValidationError, match="Output directory contains invalid characters"
|
||||
):
|
||||
serializer.validate_output_directory(f"test{char}folder")
|
||||
|
||||
def test_validate_output_directory_too_long(self):
|
||||
"""Test that paths that are too long are rejected."""
|
||||
serializer = S3ConfigSerializer()
|
||||
|
||||
# Create a path longer than 900 characters
|
||||
long_path = "a" * 901
|
||||
|
||||
with pytest.raises(ValidationError, match="Output directory path is too long"):
|
||||
serializer.validate_output_directory(long_path)
|
||||
|
||||
def test_validate_output_directory_edge_cases(self):
|
||||
"""Test edge cases for output directory validation."""
|
||||
serializer = S3ConfigSerializer()
|
||||
|
||||
# Test path at the limit (900 characters)
|
||||
path_at_limit = "a" * 900
|
||||
assert serializer.validate_output_directory(path_at_limit) == path_at_limit
|
||||
|
||||
# Test complex normalization
|
||||
assert serializer.validate_output_directory("//test/../folder//") == "folder"
|
||||
assert serializer.validate_output_directory("/test/./folder/") == "test/folder"
|
||||
|
||||
def test_s3_config_serializer_full_validation(self):
|
||||
"""Test the full S3ConfigSerializer with valid data."""
|
||||
data = {
|
||||
"bucket_name": "my-test-bucket",
|
||||
"output_directory": "///////test", # This should be normalized
|
||||
}
|
||||
|
||||
serializer = S3ConfigSerializer(data=data)
|
||||
assert serializer.is_valid()
|
||||
|
||||
validated_data = serializer.validated_data
|
||||
assert validated_data["bucket_name"] == "my-test-bucket"
|
||||
assert validated_data["output_directory"] == "test" # Normalized
|
||||
|
||||
def test_s3_config_serializer_invalid_data(self):
|
||||
"""Test the full S3ConfigSerializer with invalid data."""
|
||||
data = {
|
||||
"bucket_name": "my-test-bucket",
|
||||
"output_directory": "test<invalid", # Contains invalid character
|
||||
}
|
||||
|
||||
serializer = S3ConfigSerializer(data=data)
|
||||
assert not serializer.is_valid()
|
||||
assert "output_directory" in serializer.errors
|
||||
@@ -966,6 +966,31 @@ class TestProviderViewSet:
|
||||
"uid": "subdomain1.subdomain2.subdomain3.subdomain4.domain.net",
|
||||
"alias": "test",
|
||||
},
|
||||
{
|
||||
"provider": "github",
|
||||
"uid": "test-user",
|
||||
"alias": "test",
|
||||
},
|
||||
{
|
||||
"provider": "github",
|
||||
"uid": "test-organization",
|
||||
"alias": "GitHub Org",
|
||||
},
|
||||
{
|
||||
"provider": "github",
|
||||
"uid": "prowler-cloud",
|
||||
"alias": "Prowler",
|
||||
},
|
||||
{
|
||||
"provider": "github",
|
||||
"uid": "microsoft",
|
||||
"alias": "Microsoft",
|
||||
},
|
||||
{
|
||||
"provider": "github",
|
||||
"uid": "a12345678901234567890123456789012345678",
|
||||
"alias": "Long Username",
|
||||
},
|
||||
]
|
||||
),
|
||||
)
|
||||
@@ -1079,6 +1104,42 @@ class TestProviderViewSet:
|
||||
"m365-uid",
|
||||
"uid",
|
||||
),
|
||||
(
|
||||
{
|
||||
"provider": "github",
|
||||
"uid": "-invalid-start",
|
||||
"alias": "test",
|
||||
},
|
||||
"github-uid",
|
||||
"uid",
|
||||
),
|
||||
(
|
||||
{
|
||||
"provider": "github",
|
||||
"uid": "invalid@username",
|
||||
"alias": "test",
|
||||
},
|
||||
"github-uid",
|
||||
"uid",
|
||||
),
|
||||
(
|
||||
{
|
||||
"provider": "github",
|
||||
"uid": "invalid_username",
|
||||
"alias": "test",
|
||||
},
|
||||
"github-uid",
|
||||
"uid",
|
||||
),
|
||||
(
|
||||
{
|
||||
"provider": "github",
|
||||
"uid": "a" * 40,
|
||||
"alias": "test",
|
||||
},
|
||||
"github-uid",
|
||||
"uid",
|
||||
),
|
||||
]
|
||||
),
|
||||
)
|
||||
@@ -5580,7 +5641,7 @@ class TestIntegrationViewSet:
|
||||
[
|
||||
# Amazon S3 - AWS credentials
|
||||
(
|
||||
Integration.IntegrationChoices.S3,
|
||||
Integration.IntegrationChoices.AMAZON_S3,
|
||||
{
|
||||
"bucket_name": "bucket-name",
|
||||
"output_directory": "output-directory",
|
||||
@@ -5592,7 +5653,7 @@ class TestIntegrationViewSet:
|
||||
),
|
||||
# Amazon S3 - No credentials (AWS self-hosted)
|
||||
(
|
||||
Integration.IntegrationChoices.S3,
|
||||
Integration.IntegrationChoices.AMAZON_S3,
|
||||
{
|
||||
"bucket_name": "bucket-name",
|
||||
"output_directory": "output-directory",
|
||||
@@ -5618,6 +5679,7 @@ class TestIntegrationViewSet:
|
||||
"integration_type": integration_type,
|
||||
"configuration": configuration,
|
||||
"credentials": credentials,
|
||||
"enabled": True,
|
||||
},
|
||||
"relationships": {
|
||||
"providers": {
|
||||
@@ -5635,6 +5697,7 @@ class TestIntegrationViewSet:
|
||||
assert Integration.objects.count() == 1
|
||||
integration = Integration.objects.first()
|
||||
assert integration.configuration == data["data"]["attributes"]["configuration"]
|
||||
assert integration.enabled == data["data"]["attributes"]["enabled"]
|
||||
assert (
|
||||
integration.integration_type
|
||||
== data["data"]["attributes"]["integration_type"]
|
||||
@@ -5656,7 +5719,7 @@ class TestIntegrationViewSet:
|
||||
"data": {
|
||||
"type": "integrations",
|
||||
"attributes": {
|
||||
"integration_type": Integration.IntegrationChoices.S3,
|
||||
"integration_type": Integration.IntegrationChoices.AMAZON_S3,
|
||||
"configuration": {
|
||||
"bucket_name": "bucket-name",
|
||||
"output_directory": "output-directory",
|
||||
@@ -5891,11 +5954,11 @@ class TestIntegrationViewSet:
|
||||
("inserted_at", TODAY, 2),
|
||||
("inserted_at.gte", "2024-01-01", 2),
|
||||
("inserted_at.lte", "2024-01-01", 0),
|
||||
("integration_type", Integration.IntegrationChoices.S3, 2),
|
||||
("integration_type", Integration.IntegrationChoices.AMAZON_S3, 2),
|
||||
("integration_type", Integration.IntegrationChoices.SLACK, 0),
|
||||
(
|
||||
"integration_type__in",
|
||||
f"{Integration.IntegrationChoices.S3},{Integration.IntegrationChoices.SLACK}",
|
||||
f"{Integration.IntegrationChoices.AMAZON_S3},{Integration.IntegrationChoices.SLACK}",
|
||||
2,
|
||||
),
|
||||
]
|
||||
|
||||
@@ -7,12 +7,14 @@ from rest_framework.exceptions import NotFound, ValidationError
|
||||
|
||||
from api.db_router import MainRouter
|
||||
from api.exceptions import InvitationTokenExpiredException
|
||||
from api.models import Invitation, Processor, Provider, Resource
|
||||
from api.models import Integration, Invitation, Processor, Provider, Resource
|
||||
from api.v1.serializers import FindingMetadataSerializer
|
||||
from prowler.providers.aws.aws_provider import AwsProvider
|
||||
from prowler.providers.aws.lib.s3.s3 import S3
|
||||
from prowler.providers.azure.azure_provider import AzureProvider
|
||||
from prowler.providers.common.models import Connection
|
||||
from prowler.providers.gcp.gcp_provider import GcpProvider
|
||||
from prowler.providers.github.github_provider import GithubProvider
|
||||
from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
|
||||
from prowler.providers.m365.m365_provider import M365Provider
|
||||
|
||||
@@ -55,14 +57,21 @@ def merge_dicts(default_dict: dict, replacement_dict: dict) -> dict:
|
||||
|
||||
def return_prowler_provider(
|
||||
provider: Provider,
|
||||
) -> [AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider]:
|
||||
) -> [
|
||||
AwsProvider
|
||||
| AzureProvider
|
||||
| GcpProvider
|
||||
| GithubProvider
|
||||
| KubernetesProvider
|
||||
| M365Provider
|
||||
]:
|
||||
"""Return the Prowler provider class based on the given provider type.
|
||||
|
||||
Args:
|
||||
provider (Provider): The provider object containing the provider type and associated secrets.
|
||||
|
||||
Returns:
|
||||
AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider: The corresponding provider class.
|
||||
AwsProvider | AzureProvider | GcpProvider | GithubProvider | KubernetesProvider | M365Provider: The corresponding provider class.
|
||||
|
||||
Raises:
|
||||
ValueError: If the provider type specified in `provider.provider` is not supported.
|
||||
@@ -78,6 +87,8 @@ def return_prowler_provider(
|
||||
prowler_provider = KubernetesProvider
|
||||
case Provider.ProviderChoices.M365.value:
|
||||
prowler_provider = M365Provider
|
||||
case Provider.ProviderChoices.GITHUB.value:
|
||||
prowler_provider = GithubProvider
|
||||
case _:
|
||||
raise ValueError(f"Provider type {provider.provider} not supported")
|
||||
return prowler_provider
|
||||
@@ -120,7 +131,14 @@ def get_prowler_provider_kwargs(
|
||||
def initialize_prowler_provider(
|
||||
provider: Provider,
|
||||
mutelist_processor: Processor | None = None,
|
||||
) -> AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider:
|
||||
) -> (
|
||||
AwsProvider
|
||||
| AzureProvider
|
||||
| GcpProvider
|
||||
| GithubProvider
|
||||
| KubernetesProvider
|
||||
| M365Provider
|
||||
):
|
||||
"""Initialize a Prowler provider instance based on the given provider type.
|
||||
|
||||
Args:
|
||||
@@ -128,8 +146,8 @@ def initialize_prowler_provider(
|
||||
mutelist_processor (Processor): The mutelist processor object containing the mutelist configuration.
|
||||
|
||||
Returns:
|
||||
AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider: An instance of the corresponding provider class
|
||||
(`AwsProvider`, `AzureProvider`, `GcpProvider`, `KubernetesProvider` or `M365Provider`) initialized with the
|
||||
AwsProvider | AzureProvider | GcpProvider | GithubProvider | KubernetesProvider | M365Provider: An instance of the corresponding provider class
|
||||
(`AwsProvider`, `AzureProvider`, `GcpProvider`, `GithubProvider`, `KubernetesProvider` or `M365Provider`) initialized with the
|
||||
provider's secrets.
|
||||
"""
|
||||
prowler_provider = return_prowler_provider(provider)
|
||||
@@ -158,6 +176,37 @@ def prowler_provider_connection_test(provider: Provider) -> Connection:
|
||||
)
|
||||
|
||||
|
||||
def prowler_integration_connection_test(integration: Integration) -> Connection:
|
||||
"""Test the connection to a Prowler integration based on the given integration type.
|
||||
|
||||
Args:
|
||||
integration (Integration): The integration object containing the integration type and associated credentials.
|
||||
|
||||
Returns:
|
||||
Connection: A connection object representing the result of the connection test for the specified integration.
|
||||
"""
|
||||
if integration.integration_type == Integration.IntegrationChoices.AMAZON_S3:
|
||||
return S3.test_connection(
|
||||
**integration.credentials,
|
||||
bucket_name=integration.configuration["bucket_name"],
|
||||
raise_on_exception=False,
|
||||
)
|
||||
# TODO: It is possible that we can unify the connection test for all integrations, but need refactoring
|
||||
# to avoid code duplication. Actually the AWS integrations are similar, so SecurityHub and S3 can be unified making some changes in the SDK.
|
||||
elif (
|
||||
integration.integration_type == Integration.IntegrationChoices.AWS_SECURITY_HUB
|
||||
):
|
||||
pass
|
||||
elif integration.integration_type == Integration.IntegrationChoices.JIRA:
|
||||
pass
|
||||
elif integration.integration_type == Integration.IntegrationChoices.SLACK:
|
||||
pass
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Integration type {integration.integration_type} not supported"
|
||||
)
|
||||
|
||||
|
||||
def validate_invitation(
|
||||
invitation_token: str, email: str, raise_not_found=False
|
||||
) -> Invitation:
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
from drf_spectacular.utils import extend_schema_field
|
||||
from rest_framework_json_api import serializers
|
||||
|
||||
@@ -6,7 +9,44 @@ from api.v1.serializer_utils.base import BaseValidateSerializer
|
||||
|
||||
class S3ConfigSerializer(BaseValidateSerializer):
|
||||
bucket_name = serializers.CharField()
|
||||
output_directory = serializers.CharField()
|
||||
output_directory = serializers.CharField(allow_blank=True)
|
||||
|
||||
def validate_output_directory(self, value):
|
||||
"""
|
||||
Validate the output_directory field to ensure it's a properly formatted path.
|
||||
Prevents paths with excessive slashes like "///////test".
|
||||
If empty, sets a default value.
|
||||
"""
|
||||
# If empty or None, set default value
|
||||
if not value:
|
||||
return "output"
|
||||
|
||||
# Normalize the path to remove excessive slashes
|
||||
normalized_path = os.path.normpath(value)
|
||||
|
||||
# Remove leading slashes for S3 paths
|
||||
if normalized_path.startswith("/"):
|
||||
normalized_path = normalized_path.lstrip("/")
|
||||
|
||||
# Check for invalid characters or patterns
|
||||
if re.search(r'[<>:"|?*]', normalized_path):
|
||||
raise serializers.ValidationError(
|
||||
'Output directory contains invalid characters. Avoid: < > : " | ? *'
|
||||
)
|
||||
|
||||
# Check for empty path after normalization
|
||||
if not normalized_path or normalized_path == ".":
|
||||
raise serializers.ValidationError(
|
||||
"Output directory cannot be empty or just '.' or '/'."
|
||||
)
|
||||
|
||||
# Check for paths that are too long (S3 key limit is 1024 characters, leave some room for filename)
|
||||
if len(normalized_path) > 900:
|
||||
raise serializers.ValidationError(
|
||||
"Output directory path is too long (max 900 characters)."
|
||||
)
|
||||
|
||||
return normalized_path
|
||||
|
||||
class Meta:
|
||||
resource_name = "integrations"
|
||||
@@ -98,10 +138,13 @@ class IntegrationCredentialField(serializers.JSONField):
|
||||
},
|
||||
"output_directory": {
|
||||
"type": "string",
|
||||
"description": "The directory path within the bucket where files will be saved.",
|
||||
"description": 'The directory path within the bucket where files will be saved. Optional - defaults to "output" if not provided. Path will be normalized to remove excessive slashes and invalid characters are not allowed (< > : " | ? *). Maximum length is 900 characters.',
|
||||
"maxLength": 900,
|
||||
"pattern": '^[^<>:"|?*]+$',
|
||||
"default": "output",
|
||||
},
|
||||
},
|
||||
"required": ["bucket_name", "output_directory"],
|
||||
"required": ["bucket_name"],
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
@@ -176,6 +176,43 @@ from rest_framework_json_api import serializers
|
||||
},
|
||||
"required": ["kubeconfig_content"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "GitHub Personal Access Token",
|
||||
"properties": {
|
||||
"personal_access_token": {
|
||||
"type": "string",
|
||||
"description": "GitHub personal access token for authentication.",
|
||||
}
|
||||
},
|
||||
"required": ["personal_access_token"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "GitHub OAuth App Token",
|
||||
"properties": {
|
||||
"oauth_app_token": {
|
||||
"type": "string",
|
||||
"description": "GitHub OAuth App token for authentication.",
|
||||
}
|
||||
},
|
||||
"required": ["oauth_app_token"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "GitHub App Credentials",
|
||||
"properties": {
|
||||
"github_app_id": {
|
||||
"type": "integer",
|
||||
"description": "GitHub App ID for authentication.",
|
||||
},
|
||||
"github_app_key": {
|
||||
"type": "string",
|
||||
"description": "Path to the GitHub App private key file.",
|
||||
},
|
||||
},
|
||||
"required": ["github_app_id", "github_app_key"],
|
||||
},
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1217,6 +1217,8 @@ class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
|
||||
serializer = AzureProviderSecret(data=secret)
|
||||
elif provider_type == Provider.ProviderChoices.GCP.value:
|
||||
serializer = GCPProviderSecret(data=secret)
|
||||
elif provider_type == Provider.ProviderChoices.GITHUB.value:
|
||||
serializer = GithubProviderSecret(data=secret)
|
||||
elif provider_type == Provider.ProviderChoices.KUBERNETES.value:
|
||||
serializer = KubernetesProviderSecret(data=secret)
|
||||
elif provider_type == Provider.ProviderChoices.M365.value:
|
||||
@@ -1296,6 +1298,16 @@ class KubernetesProviderSecret(serializers.Serializer):
|
||||
resource_name = "provider-secrets"
|
||||
|
||||
|
||||
class GithubProviderSecret(serializers.Serializer):
|
||||
personal_access_token = serializers.CharField(required=False)
|
||||
oauth_app_token = serializers.CharField(required=False)
|
||||
github_app_id = serializers.IntegerField(required=False)
|
||||
github_app_key_content = serializers.CharField(required=False)
|
||||
|
||||
class Meta:
|
||||
resource_name = "provider-secrets"
|
||||
|
||||
|
||||
class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
role_arn = serializers.CharField()
|
||||
external_id = serializers.CharField()
|
||||
@@ -1938,6 +1950,16 @@ class ScheduleDailyCreateSerializer(serializers.Serializer):
|
||||
|
||||
|
||||
class BaseWriteIntegrationSerializer(BaseWriteSerializer):
|
||||
def validate(self, attrs):
|
||||
if Integration.objects.filter(
|
||||
configuration=attrs.get("configuration")
|
||||
).exists():
|
||||
raise serializers.ValidationError(
|
||||
{"name": "This integration already exists."}
|
||||
)
|
||||
|
||||
return super().validate(attrs)
|
||||
|
||||
@staticmethod
|
||||
def validate_integration_data(
|
||||
integration_type: str,
|
||||
@@ -1945,7 +1967,7 @@ class BaseWriteIntegrationSerializer(BaseWriteSerializer):
|
||||
configuration: dict,
|
||||
credentials: dict,
|
||||
):
|
||||
if integration_type == Integration.IntegrationChoices.S3:
|
||||
if integration_type == Integration.IntegrationChoices.AMAZON_S3:
|
||||
config_serializer = S3ConfigSerializer
|
||||
credentials_serializers = [AWSCredentialSerializer]
|
||||
# TODO: This will be required for AWS Security Hub
|
||||
@@ -1963,7 +1985,11 @@ class BaseWriteIntegrationSerializer(BaseWriteSerializer):
|
||||
}
|
||||
)
|
||||
|
||||
config_serializer(data=configuration).is_valid(raise_exception=True)
|
||||
serializer_instance = config_serializer(data=configuration)
|
||||
serializer_instance.is_valid(raise_exception=True)
|
||||
|
||||
# Apply the validated (and potentially transformed) data back to configuration
|
||||
configuration.update(serializer_instance.validated_data)
|
||||
|
||||
for cred_serializer in credentials_serializers:
|
||||
try:
|
||||
@@ -2042,7 +2068,6 @@ class IntegrationCreateSerializer(BaseWriteIntegrationSerializer):
|
||||
"inserted_at": {"read_only": True},
|
||||
"updated_at": {"read_only": True},
|
||||
"connected": {"read_only": True},
|
||||
"enabled": {"read_only": True},
|
||||
"connection_last_checked_at": {"read_only": True},
|
||||
}
|
||||
|
||||
@@ -2052,10 +2077,10 @@ class IntegrationCreateSerializer(BaseWriteIntegrationSerializer):
|
||||
configuration = attrs.get("configuration")
|
||||
credentials = attrs.get("credentials")
|
||||
|
||||
validated_attrs = super().validate(attrs)
|
||||
self.validate_integration_data(
|
||||
integration_type, providers, configuration, credentials
|
||||
)
|
||||
validated_attrs = super().validate(attrs)
|
||||
return validated_attrs
|
||||
|
||||
def create(self, validated_data):
|
||||
@@ -2106,6 +2131,7 @@ class IntegrationUpdateSerializer(BaseWriteIntegrationSerializer):
|
||||
}
|
||||
|
||||
def validate(self, attrs):
|
||||
super().validate(attrs)
|
||||
integration_type = self.instance.integration_type
|
||||
providers = attrs.get("providers")
|
||||
configuration = attrs.get("configuration") or self.instance.configuration
|
||||
|
||||
@@ -22,7 +22,7 @@ from django.conf import settings as django_settings
|
||||
from django.contrib.postgres.aggregates import ArrayAgg
|
||||
from django.contrib.postgres.search import SearchQuery
|
||||
from django.db import transaction
|
||||
from django.db.models import Count, F, Prefetch, Q, Sum
|
||||
from django.db.models import Count, F, Prefetch, Q, Subquery, Sum
|
||||
from django.db.models.functions import Coalesce
|
||||
from django.http import HttpResponse
|
||||
from django.shortcuts import redirect
|
||||
@@ -57,6 +57,7 @@ from tasks.beat import schedule_provider_scan
|
||||
from tasks.jobs.export import get_s3_client
|
||||
from tasks.tasks import (
|
||||
backfill_scan_resource_summaries_task,
|
||||
check_integration_connection_task,
|
||||
check_lighthouse_connection_task,
|
||||
check_provider_connection_task,
|
||||
delete_provider_task,
|
||||
@@ -292,7 +293,7 @@ class SchemaView(SpectacularAPIView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.10.1"
|
||||
spectacular_settings.VERSION = "1.11.1"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -1994,6 +1995,21 @@ class ResourceViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
)
|
||||
)
|
||||
|
||||
def _should_prefetch_findings(self) -> bool:
|
||||
fields_param = self.request.query_params.get("fields[resources]", "")
|
||||
include_param = self.request.query_params.get("include", "")
|
||||
return (
|
||||
fields_param == ""
|
||||
or "findings" in fields_param.split(",")
|
||||
or "findings" in include_param.split(",")
|
||||
)
|
||||
|
||||
def _get_findings_prefetch(self):
|
||||
findings_queryset = Finding.all_objects.defer("scan", "resources").filter(
|
||||
tenant_id=self.request.tenant_id
|
||||
)
|
||||
return [Prefetch("findings", queryset=findings_queryset)]
|
||||
|
||||
def get_serializer_class(self):
|
||||
if self.action in ["metadata", "metadata_latest"]:
|
||||
return ResourceMetadataSerializer
|
||||
@@ -2017,7 +2033,11 @@ class ResourceViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
filtered_queryset,
|
||||
manager=Resource.all_objects,
|
||||
select_related=["provider"],
|
||||
prefetch_related=["findings"],
|
||||
prefetch_related=(
|
||||
self._get_findings_prefetch()
|
||||
if self._should_prefetch_findings()
|
||||
else []
|
||||
),
|
||||
)
|
||||
|
||||
def retrieve(self, request, *args, **kwargs):
|
||||
@@ -2042,14 +2062,18 @@ class ResourceViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
tenant_id = request.tenant_id
|
||||
filtered_queryset = self.filter_queryset(self.get_queryset())
|
||||
|
||||
latest_scan_ids = (
|
||||
Scan.all_objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
|
||||
latest_scans = (
|
||||
Scan.all_objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
state=StateChoices.COMPLETED,
|
||||
)
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
.values("provider_id")
|
||||
)
|
||||
|
||||
filtered_queryset = filtered_queryset.filter(
|
||||
tenant_id=tenant_id, provider__scan__in=latest_scan_ids
|
||||
provider_id__in=Subquery(latest_scans)
|
||||
)
|
||||
|
||||
return self.paginate_by_pk(
|
||||
@@ -2057,7 +2081,11 @@ class ResourceViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
filtered_queryset,
|
||||
manager=Resource.all_objects,
|
||||
select_related=["provider"],
|
||||
prefetch_related=["findings"],
|
||||
prefetch_related=(
|
||||
self._get_findings_prefetch()
|
||||
if self._should_prefetch_findings()
|
||||
else []
|
||||
),
|
||||
)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="metadata")
|
||||
@@ -3811,6 +3839,32 @@ class IntegrationViewSet(BaseRLSViewSet):
|
||||
context["allowed_providers"] = self.allowed_providers
|
||||
return context
|
||||
|
||||
@extend_schema(
|
||||
tags=["Integration"],
|
||||
summary="Check integration connection",
|
||||
description="Try to verify integration connection",
|
||||
request=None,
|
||||
responses={202: OpenApiResponse(response=TaskSerializer)},
|
||||
)
|
||||
@action(detail=True, methods=["post"], url_name="connection")
|
||||
def connection(self, request, pk=None):
|
||||
get_object_or_404(Integration, pk=pk)
|
||||
with transaction.atomic():
|
||||
task = check_integration_connection_task.delay(
|
||||
integration_id=pk, tenant_id=self.request.tenant_id
|
||||
)
|
||||
prowler_task = Task.objects.get(id=task.id)
|
||||
serializer = TaskSerializer(prowler_task)
|
||||
return Response(
|
||||
data=serializer.data,
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
headers={
|
||||
"Content-Location": reverse(
|
||||
"task-detail", kwargs={"pk": prowler_task.id}
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
|
||||
@@ -69,6 +69,9 @@ IGNORED_EXCEPTIONS = [
|
||||
"AzureClientIdAndClientSecretNotBelongingToTenantIdError",
|
||||
"AzureHTTPResponseError",
|
||||
"Error with credentials provided",
|
||||
# PowerShell Errors in User Authentication
|
||||
"Microsoft Teams User Auth connection failed: Please check your permissions and try again.",
|
||||
"Exchange Online User Auth connection failed: Please check your permissions and try again.",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -1065,7 +1065,7 @@ def integrations_fixture(providers_fixture):
|
||||
enabled=True,
|
||||
connected=True,
|
||||
integration_type="amazon_s3",
|
||||
configuration={"key": "value"},
|
||||
configuration={"key": "value1"},
|
||||
credentials={"psswd": "1234"},
|
||||
)
|
||||
IntegrationProviderRelationship.objects.create(
|
||||
|
||||
@@ -3,8 +3,11 @@ from datetime import datetime, timezone
|
||||
import openai
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from api.models import LighthouseConfiguration, Provider
|
||||
from api.utils import prowler_provider_connection_test
|
||||
from api.models import Integration, LighthouseConfiguration, Provider
|
||||
from api.utils import (
|
||||
prowler_integration_connection_test,
|
||||
prowler_provider_connection_test,
|
||||
)
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
@@ -83,3 +86,35 @@ def check_lighthouse_connection(lighthouse_config_id: str):
|
||||
lighthouse_config.is_active = False
|
||||
lighthouse_config.save()
|
||||
return {"connected": False, "error": str(e), "available_models": []}
|
||||
|
||||
|
||||
def check_integration_connection(integration_id: str):
|
||||
"""
|
||||
Business logic to check the connection status of an integration.
|
||||
|
||||
Args:
|
||||
integration_id (str): The primary key of the Integration instance to check.
|
||||
"""
|
||||
integration = Integration.objects.filter(pk=integration_id, enabled=True).first()
|
||||
|
||||
if not integration:
|
||||
logger.info(f"Integration {integration_id} is not enabled")
|
||||
return {"connected": False, "error": "Integration is not enabled"}
|
||||
|
||||
try:
|
||||
result = prowler_integration_connection_test(integration)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Unexpected exception checking {integration.integration_type} integration connection: {str(e)}"
|
||||
)
|
||||
raise e
|
||||
|
||||
# Update integration connection status
|
||||
integration.connected = result.is_connected
|
||||
integration.connection_last_checked_at = datetime.now(tz=timezone.utc)
|
||||
integration.save()
|
||||
|
||||
return {
|
||||
"connected": result.is_connected,
|
||||
"error": str(result.error) if result.error else None,
|
||||
}
|
||||
|
||||
@@ -8,11 +8,12 @@ from botocore.exceptions import ClientError, NoCredentialsError, ParamValidation
|
||||
from celery.utils.log import get_task_logger
|
||||
from django.conf import settings
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Scan
|
||||
from prowler.config.config import (
|
||||
csv_file_suffix,
|
||||
html_file_suffix,
|
||||
json_ocsf_file_suffix,
|
||||
output_file_timestamp,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.aws_well_architected.aws_well_architected import (
|
||||
AWSWellArchitected,
|
||||
@@ -20,6 +21,7 @@ from prowler.lib.outputs.compliance.aws_well_architected.aws_well_architected im
|
||||
from prowler.lib.outputs.compliance.cis.cis_aws import AWSCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_azure import AzureCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_gcp import GCPCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_github import GithubCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_kubernetes import KubernetesCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_m365 import M365CIS
|
||||
from prowler.lib.outputs.compliance.ens.ens_aws import AWSENS
|
||||
@@ -93,6 +95,9 @@ COMPLIANCE_CLASS_MAP = {
|
||||
(lambda name: name == "prowler_threatscore_m365", ProwlerThreatScoreM365),
|
||||
(lambda name: name.startswith("iso27001_"), M365ISO27001),
|
||||
],
|
||||
"github": [
|
||||
(lambda name: name.startswith("cis_"), GithubCIS),
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@@ -167,7 +172,7 @@ def get_s3_client():
|
||||
return s3_client
|
||||
|
||||
|
||||
def _upload_to_s3(tenant_id: str, zip_path: str, scan_id: str) -> str:
|
||||
def _upload_to_s3(tenant_id: str, zip_path: str, scan_id: str) -> str | None:
|
||||
"""
|
||||
Upload the specified ZIP file to an S3 bucket.
|
||||
If the S3 bucket environment variables are not configured,
|
||||
@@ -184,7 +189,7 @@ def _upload_to_s3(tenant_id: str, zip_path: str, scan_id: str) -> str:
|
||||
"""
|
||||
bucket = base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET
|
||||
if not bucket:
|
||||
return None
|
||||
return
|
||||
|
||||
try:
|
||||
s3 = get_s3_client()
|
||||
@@ -244,15 +249,19 @@ def _generate_output_directory(
|
||||
# Sanitize the prowler provider name to ensure it is a valid directory name
|
||||
prowler_provider_sanitized = re.sub(r"[^\w\-]", "-", prowler_provider)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
started_at = Scan.objects.get(id=scan_id).started_at
|
||||
|
||||
timestamp = started_at.strftime("%Y%m%d%H%M%S")
|
||||
path = (
|
||||
f"{output_directory}/{tenant_id}/{scan_id}/prowler-output-"
|
||||
f"{prowler_provider_sanitized}-{output_file_timestamp}"
|
||||
f"{prowler_provider_sanitized}-{timestamp}"
|
||||
)
|
||||
os.makedirs("/".join(path.split("/")[:-1]), exist_ok=True)
|
||||
|
||||
compliance_path = (
|
||||
f"{output_directory}/{tenant_id}/{scan_id}/compliance/prowler-output-"
|
||||
f"{prowler_provider_sanitized}-{output_file_timestamp}"
|
||||
f"{prowler_provider_sanitized}-{timestamp}"
|
||||
)
|
||||
os.makedirs("/".join(compliance_path.split("/")[:-1]), exist_ok=True)
|
||||
|
||||
|
||||
156
api/src/backend/tasks/jobs/integrations.py
Normal file
@@ -0,0 +1,156 @@
|
||||
import os
|
||||
from glob import glob
|
||||
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Integration
|
||||
from prowler.lib.outputs.asff.asff import ASFF
|
||||
from prowler.lib.outputs.compliance.generic.generic import GenericCompliance
|
||||
from prowler.lib.outputs.csv.csv import CSV
|
||||
from prowler.lib.outputs.html.html import HTML
|
||||
from prowler.lib.outputs.ocsf.ocsf import OCSF
|
||||
from prowler.providers.aws.lib.s3.s3 import S3
|
||||
from prowler.providers.common.models import Connection
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
def get_s3_client_from_integration(
|
||||
integration: Integration,
|
||||
) -> tuple[bool, S3 | Connection]:
|
||||
"""
|
||||
Create and return a boto3 S3 client using AWS credentials from an integration.
|
||||
|
||||
Args:
|
||||
integration (Integration): The integration to get the S3 client from.
|
||||
|
||||
Returns:
|
||||
tuple[bool, S3 | Connection]: A tuple containing a boolean indicating if the connection was successful and the S3 client or connection object.
|
||||
"""
|
||||
s3 = S3(
|
||||
**integration.credentials,
|
||||
bucket_name=integration.configuration["bucket_name"],
|
||||
output_directory=integration.configuration["output_directory"],
|
||||
)
|
||||
|
||||
connection = s3.test_connection(
|
||||
**integration.credentials,
|
||||
bucket_name=integration.configuration["bucket_name"],
|
||||
)
|
||||
|
||||
if connection.is_connected:
|
||||
return True, s3
|
||||
|
||||
return False, connection
|
||||
|
||||
|
||||
def upload_s3_integration(
|
||||
tenant_id: str, provider_id: str, output_directory: str
|
||||
) -> bool:
|
||||
"""
|
||||
Upload the specified output files to an S3 bucket from an integration.
|
||||
Reconstructs output objects from files in the output directory instead of using serialized data.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier, used as part of the S3 key prefix.
|
||||
provider_id (str): The provider identifier, used as part of the S3 key prefix.
|
||||
output_directory (str): Path to the directory containing output files.
|
||||
|
||||
Returns:
|
||||
bool: True if all integrations were executed, False otherwise.
|
||||
|
||||
Raises:
|
||||
botocore.exceptions.ClientError: If the upload attempt to S3 fails for any reason.
|
||||
"""
|
||||
logger.info(f"Processing S3 integrations for provider {provider_id}")
|
||||
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
integrations = list(
|
||||
Integration.objects.filter(
|
||||
integrationproviderrelationship__provider_id=provider_id,
|
||||
integration_type=Integration.IntegrationChoices.AMAZON_S3,
|
||||
enabled=True,
|
||||
)
|
||||
)
|
||||
|
||||
if not integrations:
|
||||
logger.error(f"No S3 integrations found for provider {provider_id}")
|
||||
return False
|
||||
|
||||
integration_executions = 0
|
||||
for integration in integrations:
|
||||
try:
|
||||
connected, s3 = get_s3_client_from_integration(integration)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
f"S3 connection failed for integration {integration.id}: {e}"
|
||||
)
|
||||
integration.connected = False
|
||||
integration.save()
|
||||
continue
|
||||
|
||||
if connected:
|
||||
try:
|
||||
# Reconstruct generated_outputs from files in output directory
|
||||
# This approach scans the output directory for files and creates the appropriate
|
||||
# output objects based on file extensions and naming patterns.
|
||||
generated_outputs = {"regular": [], "compliance": []}
|
||||
|
||||
# Find and recreate regular outputs (CSV, HTML, OCSF)
|
||||
output_file_patterns = {
|
||||
".csv": CSV,
|
||||
".html": HTML,
|
||||
".ocsf.json": OCSF,
|
||||
".asff.json": ASFF,
|
||||
}
|
||||
|
||||
base_dir = os.path.dirname(output_directory)
|
||||
for extension, output_class in output_file_patterns.items():
|
||||
pattern = f"{output_directory}*{extension}"
|
||||
for file_path in glob(pattern):
|
||||
if os.path.exists(file_path):
|
||||
output = output_class(findings=[], file_path=file_path)
|
||||
output.create_file_descriptor(file_path)
|
||||
generated_outputs["regular"].append(output)
|
||||
|
||||
# Find and recreate compliance outputs
|
||||
compliance_pattern = os.path.join(base_dir, "compliance", "*.csv")
|
||||
for file_path in glob(compliance_pattern):
|
||||
if os.path.exists(file_path):
|
||||
output = GenericCompliance(
|
||||
findings=[],
|
||||
compliance=None,
|
||||
file_path=file_path,
|
||||
file_extension=".csv",
|
||||
)
|
||||
output.create_file_descriptor(file_path)
|
||||
generated_outputs["compliance"].append(output)
|
||||
|
||||
# Use send_to_bucket with recreated generated_outputs objects
|
||||
s3.send_to_bucket(generated_outputs)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"S3 upload failed for integration {integration.id}: {e}"
|
||||
)
|
||||
continue
|
||||
integration_executions += 1
|
||||
else:
|
||||
integration.connected = False
|
||||
integration.save()
|
||||
logger.error(
|
||||
f"S3 upload failed, connection failed for integration {integration.id}: {s3.error}"
|
||||
)
|
||||
|
||||
result = integration_executions == len(integrations)
|
||||
if result:
|
||||
logger.info(
|
||||
f"All the S3 integrations completed successfully for provider {provider_id}"
|
||||
)
|
||||
else:
|
||||
logger.info(f"Some S3 integrations failed for provider {provider_id}")
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"S3 integrations failed for provider {provider_id}: {str(e)}")
|
||||
return False
|
||||
@@ -2,13 +2,17 @@ from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from shutil import rmtree
|
||||
|
||||
from celery import chain, shared_task
|
||||
from celery import chain, group, shared_task
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.celery import RLSTask
|
||||
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE, DJANGO_TMP_OUTPUT_DIRECTORY
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from tasks.jobs.backfill import backfill_resource_scan_summaries
|
||||
from tasks.jobs.connection import check_lighthouse_connection, check_provider_connection
|
||||
from tasks.jobs.connection import (
|
||||
check_integration_connection,
|
||||
check_lighthouse_connection,
|
||||
check_provider_connection,
|
||||
)
|
||||
from tasks.jobs.deletion import delete_provider, delete_tenant
|
||||
from tasks.jobs.export import (
|
||||
COMPLIANCE_CLASS_MAP,
|
||||
@@ -17,6 +21,7 @@ from tasks.jobs.export import (
|
||||
_generate_output_directory,
|
||||
_upload_to_s3,
|
||||
)
|
||||
from tasks.jobs.integrations import upload_s3_integration
|
||||
from tasks.jobs.scan import (
|
||||
aggregate_findings,
|
||||
create_compliance_requirements,
|
||||
@@ -27,7 +32,7 @@ from tasks.utils import batched, get_next_execution_datetime
|
||||
from api.compliance import get_compliance_frameworks
|
||||
from api.db_utils import rls_transaction
|
||||
from api.decorators import set_tenant
|
||||
from api.models import Finding, Provider, Scan, ScanSummary, StateChoices
|
||||
from api.models import Finding, Integration, Provider, Scan, ScanSummary, StateChoices
|
||||
from api.utils import initialize_prowler_provider
|
||||
from api.v1.serializers import ScanTaskSerializer
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
@@ -54,6 +59,10 @@ def _perform_scan_complete_tasks(tenant_id: str, scan_id: str, provider_id: str)
|
||||
generate_outputs_task.si(
|
||||
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
|
||||
),
|
||||
check_integrations_task.si(
|
||||
tenant_id=tenant_id,
|
||||
provider_id=provider_id,
|
||||
),
|
||||
).apply_async()
|
||||
|
||||
|
||||
@@ -74,6 +83,18 @@ def check_provider_connection_task(provider_id: str):
|
||||
return check_provider_connection(provider_id=provider_id)
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="integration-connection-check")
|
||||
@set_tenant
|
||||
def check_integration_connection_task(integration_id: str):
|
||||
"""
|
||||
Task to check the connection status of an integration.
|
||||
|
||||
Args:
|
||||
integration_id (str): The primary key of the Integration instance to check.
|
||||
"""
|
||||
return check_integration_connection(integration_id=integration_id)
|
||||
|
||||
|
||||
@shared_task(
|
||||
base=RLSTask, name="provider-deletion", queue="deletion", autoretry_for=(Exception,)
|
||||
)
|
||||
@@ -361,7 +382,34 @@ def generate_outputs_task(scan_id: str, provider_id: str, tenant_id: str):
|
||||
compressed = _compress_output_files(out_dir)
|
||||
upload_uri = _upload_to_s3(tenant_id, compressed, scan_id)
|
||||
|
||||
# S3 integrations (need output_directory)
|
||||
with rls_transaction(tenant_id):
|
||||
s3_integrations = Integration.objects.filter(
|
||||
integrationproviderrelationship__provider_id=provider_id,
|
||||
integration_type=Integration.IntegrationChoices.AMAZON_S3,
|
||||
enabled=True,
|
||||
)
|
||||
|
||||
if s3_integrations:
|
||||
# Pass the output directory path to S3 integration task to reconstruct objects from files
|
||||
s3_integration_task.apply_async(
|
||||
kwargs={
|
||||
"tenant_id": tenant_id,
|
||||
"provider_id": provider_id,
|
||||
"output_directory": out_dir,
|
||||
}
|
||||
).get(
|
||||
disable_sync_subtasks=False
|
||||
) # TODO: This synchronous execution is NOT recommended
|
||||
# We're forced to do this because we need the files to exist before deletion occurs.
|
||||
# Once we have the periodic file cleanup task implemented, we should:
|
||||
# 1. Remove this .get() call and make it fully async
|
||||
# 2. For Cloud deployments, develop a secondary approach where outputs are stored
|
||||
# directly in S3 and read from there, eliminating local file dependencies
|
||||
|
||||
if upload_uri:
|
||||
# TODO: We need to create a new periodic task to delete the output files
|
||||
# This task shouldn't be responsible for deleting the output files
|
||||
try:
|
||||
rmtree(Path(compressed).parent, ignore_errors=True)
|
||||
except Exception as e:
|
||||
@@ -372,7 +420,10 @@ def generate_outputs_task(scan_id: str, provider_id: str, tenant_id: str):
|
||||
|
||||
Scan.all_objects.filter(id=scan_id).update(output_location=final_location)
|
||||
logger.info(f"Scan outputs at {final_location}")
|
||||
return {"upload": did_upload}
|
||||
|
||||
return {
|
||||
"upload": did_upload,
|
||||
}
|
||||
|
||||
|
||||
@shared_task(name="backfill-scan-resource-summaries", queue="backfill")
|
||||
@@ -420,3 +471,73 @@ def check_lighthouse_connection_task(lighthouse_config_id: str, tenant_id: str =
|
||||
- 'available_models' (list): List of available models if connection is successful.
|
||||
"""
|
||||
return check_lighthouse_connection(lighthouse_config_id=lighthouse_config_id)
|
||||
|
||||
|
||||
@shared_task(name="integration-check")
|
||||
def check_integrations_task(tenant_id: str, provider_id: str):
|
||||
"""
|
||||
Check and execute all configured integrations for a provider.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier
|
||||
provider_id (str): The provider identifier
|
||||
"""
|
||||
logger.info(f"Checking integrations for provider {provider_id}")
|
||||
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
integrations = Integration.objects.filter(
|
||||
integrationproviderrelationship__provider_id=provider_id,
|
||||
enabled=True,
|
||||
)
|
||||
|
||||
if not integrations.exists():
|
||||
logger.info(f"No integrations configured for provider {provider_id}")
|
||||
return {"integrations_processed": 0}
|
||||
|
||||
integration_tasks = []
|
||||
|
||||
# TODO: Add other integration types here
|
||||
# slack_integrations = integrations.filter(
|
||||
# integration_type=Integration.IntegrationChoices.SLACK
|
||||
# )
|
||||
# if slack_integrations.exists():
|
||||
# integration_tasks.append(
|
||||
# slack_integration_task.s(
|
||||
# tenant_id=tenant_id,
|
||||
# provider_id=provider_id,
|
||||
# )
|
||||
# )
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Integration check failed for provider {provider_id}: {str(e)}")
|
||||
return {"integrations_processed": 0, "error": str(e)}
|
||||
|
||||
# Execute all integration tasks in parallel if any were found
|
||||
if integration_tasks:
|
||||
job = group(integration_tasks)
|
||||
job.apply_async()
|
||||
logger.info(f"Launched {len(integration_tasks)} integration task(s)")
|
||||
|
||||
return {"integrations_processed": len(integration_tasks)}
|
||||
|
||||
|
||||
@shared_task(
|
||||
base=RLSTask,
|
||||
name="integration-s3",
|
||||
queue="integrations",
|
||||
)
|
||||
def s3_integration_task(
|
||||
tenant_id: str,
|
||||
provider_id: str,
|
||||
output_directory: str,
|
||||
):
|
||||
"""
|
||||
Process S3 integrations for a provider.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier
|
||||
provider_id (str): The provider identifier
|
||||
output_directory (str): Path to the directory containing output files
|
||||
"""
|
||||
return upload_s3_integration(tenant_id, provider_id, output_directory)
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from tasks.jobs.connection import check_lighthouse_connection, check_provider_connection
|
||||
from tasks.jobs.connection import (
|
||||
check_integration_connection,
|
||||
check_lighthouse_connection,
|
||||
check_provider_connection,
|
||||
)
|
||||
|
||||
from api.models import LighthouseConfiguration, Provider
|
||||
from api.models import Integration, LighthouseConfiguration, Provider
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -127,3 +132,128 @@ def test_check_lighthouse_connection_missing_api_key(mock_lighthouse_get):
|
||||
assert result["available_models"] == []
|
||||
assert mock_lighthouse_instance.is_active is False
|
||||
mock_lighthouse_instance.save.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestCheckIntegrationConnection:
|
||||
def setup_method(self):
|
||||
self.integration_id = str(uuid.uuid4())
|
||||
|
||||
@patch("tasks.jobs.connection.Integration.objects.filter")
|
||||
@patch("tasks.jobs.connection.prowler_integration_connection_test")
|
||||
def test_check_integration_connection_success(
|
||||
self, mock_prowler_test, mock_integration_filter
|
||||
):
|
||||
"""Test successful integration connection check with enabled=True filter."""
|
||||
mock_integration = MagicMock()
|
||||
mock_integration.id = self.integration_id
|
||||
mock_integration.integration_type = Integration.IntegrationChoices.AMAZON_S3
|
||||
|
||||
mock_queryset = MagicMock()
|
||||
mock_queryset.first.return_value = mock_integration
|
||||
mock_integration_filter.return_value = mock_queryset
|
||||
|
||||
mock_connection_result = MagicMock()
|
||||
mock_connection_result.is_connected = True
|
||||
mock_connection_result.error = None
|
||||
mock_prowler_test.return_value = mock_connection_result
|
||||
|
||||
result = check_integration_connection(integration_id=self.integration_id)
|
||||
|
||||
# Verify that Integration.objects.filter was called with enabled=True filter
|
||||
mock_integration_filter.assert_called_once_with(
|
||||
pk=self.integration_id, enabled=True
|
||||
)
|
||||
mock_queryset.first.assert_called_once()
|
||||
mock_prowler_test.assert_called_once_with(mock_integration)
|
||||
|
||||
# Verify the integration properties were updated
|
||||
assert mock_integration.connected is True
|
||||
assert mock_integration.connection_last_checked_at is not None
|
||||
mock_integration.save.assert_called_once()
|
||||
|
||||
# Verify the return value
|
||||
assert result["connected"] is True
|
||||
assert result["error"] is None
|
||||
|
||||
@patch("tasks.jobs.connection.Integration.objects.filter")
|
||||
@patch("tasks.jobs.connection.prowler_integration_connection_test")
|
||||
def test_check_integration_connection_failure(
|
||||
self, mock_prowler_test, mock_integration_filter
|
||||
):
|
||||
"""Test failed integration connection check."""
|
||||
mock_integration = MagicMock()
|
||||
mock_integration.id = self.integration_id
|
||||
|
||||
mock_queryset = MagicMock()
|
||||
mock_queryset.first.return_value = mock_integration
|
||||
mock_integration_filter.return_value = mock_queryset
|
||||
|
||||
test_error = Exception("Connection failed")
|
||||
mock_connection_result = MagicMock()
|
||||
mock_connection_result.is_connected = False
|
||||
mock_connection_result.error = test_error
|
||||
mock_prowler_test.return_value = mock_connection_result
|
||||
|
||||
result = check_integration_connection(integration_id=self.integration_id)
|
||||
|
||||
# Verify that Integration.objects.filter was called with enabled=True filter
|
||||
mock_integration_filter.assert_called_once_with(
|
||||
pk=self.integration_id, enabled=True
|
||||
)
|
||||
mock_queryset.first.assert_called_once()
|
||||
|
||||
# Verify the integration properties were updated
|
||||
assert mock_integration.connected is False
|
||||
assert mock_integration.connection_last_checked_at is not None
|
||||
mock_integration.save.assert_called_once()
|
||||
|
||||
# Verify the return value
|
||||
assert result["connected"] is False
|
||||
assert result["error"] == str(test_error)
|
||||
|
||||
@patch("tasks.jobs.connection.Integration.objects.filter")
|
||||
def test_check_integration_connection_not_enabled(self, mock_integration_filter):
|
||||
"""Test that disabled integrations return proper error response."""
|
||||
# Mock that no enabled integration is found
|
||||
mock_queryset = MagicMock()
|
||||
mock_queryset.first.return_value = None
|
||||
mock_integration_filter.return_value = mock_queryset
|
||||
|
||||
result = check_integration_connection(integration_id=self.integration_id)
|
||||
|
||||
# Verify the filter was called with enabled=True
|
||||
mock_integration_filter.assert_called_once_with(
|
||||
pk=self.integration_id, enabled=True
|
||||
)
|
||||
mock_queryset.first.assert_called_once()
|
||||
|
||||
# Verify the return value matches the expected error response
|
||||
assert result["connected"] is False
|
||||
assert result["error"] == "Integration is not enabled"
|
||||
|
||||
@patch("tasks.jobs.connection.Integration.objects.filter")
|
||||
@patch("tasks.jobs.connection.prowler_integration_connection_test")
|
||||
def test_check_integration_connection_exception(
|
||||
self, mock_prowler_test, mock_integration_filter
|
||||
):
|
||||
"""Test integration connection check when prowler test raises exception."""
|
||||
mock_integration = MagicMock()
|
||||
mock_integration.id = self.integration_id
|
||||
|
||||
mock_queryset = MagicMock()
|
||||
mock_queryset.first.return_value = mock_integration
|
||||
mock_integration_filter.return_value = mock_queryset
|
||||
|
||||
test_exception = Exception("Unexpected error during connection test")
|
||||
mock_prowler_test.side_effect = test_exception
|
||||
|
||||
with pytest.raises(Exception, match="Unexpected error during connection test"):
|
||||
check_integration_connection(integration_id=self.integration_id)
|
||||
|
||||
# Verify that Integration.objects.filter was called with enabled=True filter
|
||||
mock_integration_filter.assert_called_once_with(
|
||||
pk=self.integration_id, enabled=True
|
||||
)
|
||||
mock_queryset.first.assert_called_once()
|
||||
mock_prowler_test.assert_called_once_with(mock_integration)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import os
|
||||
import uuid
|
||||
import zipfile
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
@@ -127,14 +129,26 @@ class TestOutputs:
|
||||
_upload_to_s3("tenant", str(zip_path), "scan")
|
||||
mock_logger.assert_called()
|
||||
|
||||
def test_generate_output_directory_creates_paths(self, tmpdir):
|
||||
from prowler.config.config import output_file_timestamp
|
||||
@patch("tasks.jobs.export.rls_transaction")
|
||||
@patch("tasks.jobs.export.Scan")
|
||||
def test_generate_output_directory_creates_paths(
|
||||
self, mock_scan, mock_rls_transaction, tmpdir
|
||||
):
|
||||
# Mock the scan object with a started_at timestamp
|
||||
mock_scan_instance = MagicMock()
|
||||
mock_scan_instance.started_at = datetime(2023, 6, 15, 10, 30, 45)
|
||||
mock_scan.objects.get.return_value = mock_scan_instance
|
||||
|
||||
# Mock rls_transaction as a context manager
|
||||
mock_rls_transaction.return_value.__enter__ = MagicMock()
|
||||
mock_rls_transaction.return_value.__exit__ = MagicMock(return_value=False)
|
||||
|
||||
base_tmp = Path(str(tmpdir.mkdir("generate_output")))
|
||||
base_dir = str(base_tmp)
|
||||
tenant_id = "t1"
|
||||
scan_id = "s1"
|
||||
tenant_id = str(uuid.uuid4())
|
||||
scan_id = str(uuid.uuid4())
|
||||
provider = "aws"
|
||||
expected_timestamp = "20230615103045"
|
||||
|
||||
path, compliance = _generate_output_directory(
|
||||
base_dir, provider, tenant_id, scan_id
|
||||
@@ -143,17 +157,29 @@ class TestOutputs:
|
||||
assert os.path.isdir(os.path.dirname(path))
|
||||
assert os.path.isdir(os.path.dirname(compliance))
|
||||
|
||||
assert path.endswith(f"{provider}-{output_file_timestamp}")
|
||||
assert compliance.endswith(f"{provider}-{output_file_timestamp}")
|
||||
assert path.endswith(f"{provider}-{expected_timestamp}")
|
||||
assert compliance.endswith(f"{provider}-{expected_timestamp}")
|
||||
|
||||
def test_generate_output_directory_invalid_character(self, tmpdir):
|
||||
from prowler.config.config import output_file_timestamp
|
||||
@patch("tasks.jobs.export.rls_transaction")
|
||||
@patch("tasks.jobs.export.Scan")
|
||||
def test_generate_output_directory_invalid_character(
|
||||
self, mock_scan, mock_rls_transaction, tmpdir
|
||||
):
|
||||
# Mock the scan object with a started_at timestamp
|
||||
mock_scan_instance = MagicMock()
|
||||
mock_scan_instance.started_at = datetime(2023, 6, 15, 10, 30, 45)
|
||||
mock_scan.objects.get.return_value = mock_scan_instance
|
||||
|
||||
# Mock rls_transaction as a context manager
|
||||
mock_rls_transaction.return_value.__enter__ = MagicMock()
|
||||
mock_rls_transaction.return_value.__exit__ = MagicMock(return_value=False)
|
||||
|
||||
base_tmp = Path(str(tmpdir.mkdir("generate_output")))
|
||||
base_dir = str(base_tmp)
|
||||
tenant_id = "t1"
|
||||
scan_id = "s1"
|
||||
tenant_id = str(uuid.uuid4())
|
||||
scan_id = str(uuid.uuid4())
|
||||
provider = "aws/test@check"
|
||||
expected_timestamp = "20230615103045"
|
||||
|
||||
path, compliance = _generate_output_directory(
|
||||
base_dir, provider, tenant_id, scan_id
|
||||
@@ -162,5 +188,5 @@ class TestOutputs:
|
||||
assert os.path.isdir(os.path.dirname(path))
|
||||
assert os.path.isdir(os.path.dirname(compliance))
|
||||
|
||||
assert path.endswith(f"aws-test-check-{output_file_timestamp}")
|
||||
assert compliance.endswith(f"aws-test-check-{output_file_timestamp}")
|
||||
assert path.endswith(f"aws-test-check-{expected_timestamp}")
|
||||
assert compliance.endswith(f"aws-test-check-{expected_timestamp}")
|
||||
|
||||
475
api/src/backend/tasks/tests/test_integrations.py
Normal file
@@ -0,0 +1,475 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from tasks.jobs.integrations import (
|
||||
get_s3_client_from_integration,
|
||||
upload_s3_integration,
|
||||
)
|
||||
|
||||
from api.models import Integration
|
||||
from api.utils import prowler_integration_connection_test
|
||||
from prowler.providers.common.models import Connection
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestS3IntegrationUploads:
|
||||
@patch("tasks.jobs.integrations.S3")
|
||||
def test_get_s3_client_from_integration_success(self, mock_s3_class):
|
||||
mock_integration = MagicMock()
|
||||
mock_integration.credentials = {
|
||||
"aws_access_key_id": "AKIA...",
|
||||
"aws_secret_access_key": "SECRET",
|
||||
}
|
||||
mock_integration.configuration = {
|
||||
"bucket_name": "test-bucket",
|
||||
"output_directory": "test-prefix",
|
||||
}
|
||||
|
||||
mock_s3 = MagicMock()
|
||||
mock_connection = MagicMock()
|
||||
mock_connection.is_connected = True
|
||||
mock_s3.test_connection.return_value = mock_connection
|
||||
mock_s3_class.return_value = mock_s3
|
||||
|
||||
connected, s3 = get_s3_client_from_integration(mock_integration)
|
||||
|
||||
assert connected is True
|
||||
assert s3 == mock_s3
|
||||
mock_s3_class.assert_called_once_with(
|
||||
**mock_integration.credentials,
|
||||
bucket_name="test-bucket",
|
||||
output_directory="test-prefix",
|
||||
)
|
||||
mock_s3.test_connection.assert_called_once_with(
|
||||
**mock_integration.credentials,
|
||||
bucket_name="test-bucket",
|
||||
)
|
||||
|
||||
@patch("tasks.jobs.integrations.S3")
|
||||
def test_get_s3_client_from_integration_failure(self, mock_s3_class):
|
||||
mock_integration = MagicMock()
|
||||
mock_integration.credentials = {}
|
||||
mock_integration.configuration = {
|
||||
"bucket_name": "test-bucket",
|
||||
"output_directory": "test-prefix",
|
||||
}
|
||||
|
||||
from prowler.providers.common.models import Connection
|
||||
|
||||
mock_connection = Connection()
|
||||
mock_connection.is_connected = False
|
||||
mock_connection.error = Exception("test error")
|
||||
|
||||
mock_s3 = MagicMock()
|
||||
mock_s3.test_connection.return_value = mock_connection
|
||||
mock_s3_class.return_value = mock_s3
|
||||
|
||||
connected, connection = get_s3_client_from_integration(mock_integration)
|
||||
|
||||
assert connected is False
|
||||
assert isinstance(connection, Connection)
|
||||
assert str(connection.error) == "test error"
|
||||
|
||||
@patch("tasks.jobs.integrations.GenericCompliance")
|
||||
@patch("tasks.jobs.integrations.ASFF")
|
||||
@patch("tasks.jobs.integrations.OCSF")
|
||||
@patch("tasks.jobs.integrations.HTML")
|
||||
@patch("tasks.jobs.integrations.CSV")
|
||||
@patch("tasks.jobs.integrations.glob")
|
||||
@patch("tasks.jobs.integrations.get_s3_client_from_integration")
|
||||
@patch("tasks.jobs.integrations.rls_transaction")
|
||||
@patch("tasks.jobs.integrations.Integration")
|
||||
def test_upload_s3_integration_uploads_serialized_outputs(
|
||||
self,
|
||||
mock_integration_model,
|
||||
mock_rls,
|
||||
mock_get_s3,
|
||||
mock_glob,
|
||||
mock_csv,
|
||||
mock_html,
|
||||
mock_ocsf,
|
||||
mock_asff,
|
||||
mock_compliance,
|
||||
):
|
||||
tenant_id = "tenant-id"
|
||||
provider_id = "provider-id"
|
||||
|
||||
integration = MagicMock()
|
||||
integration.id = "i-1"
|
||||
integration.configuration = {
|
||||
"bucket_name": "bucket",
|
||||
"output_directory": "prefix",
|
||||
}
|
||||
mock_integration_model.objects.filter.return_value = [integration]
|
||||
|
||||
mock_s3 = MagicMock()
|
||||
mock_get_s3.return_value = (True, mock_s3)
|
||||
|
||||
# Mock the output classes to return mock instances
|
||||
mock_csv_instance = MagicMock()
|
||||
mock_html_instance = MagicMock()
|
||||
mock_ocsf_instance = MagicMock()
|
||||
mock_asff_instance = MagicMock()
|
||||
mock_compliance_instance = MagicMock()
|
||||
|
||||
mock_csv.return_value = mock_csv_instance
|
||||
mock_html.return_value = mock_html_instance
|
||||
mock_ocsf.return_value = mock_ocsf_instance
|
||||
mock_asff.return_value = mock_asff_instance
|
||||
mock_compliance.return_value = mock_compliance_instance
|
||||
|
||||
# Mock glob to return test files
|
||||
output_directory = "/tmp/prowler_output/scan123"
|
||||
mock_glob.side_effect = [
|
||||
["/tmp/prowler_output/scan123.csv"],
|
||||
["/tmp/prowler_output/scan123.html"],
|
||||
["/tmp/prowler_output/scan123.ocsf.json"],
|
||||
["/tmp/prowler_output/scan123.asff.json"],
|
||||
["/tmp/prowler_output/compliance/compliance.csv"],
|
||||
]
|
||||
|
||||
with patch("os.path.exists", return_value=True):
|
||||
with patch("os.getenv", return_value="/tmp/prowler_api_output"):
|
||||
result = upload_s3_integration(tenant_id, provider_id, output_directory)
|
||||
|
||||
assert result is True
|
||||
mock_s3.send_to_bucket.assert_called_once()
|
||||
|
||||
@patch("tasks.jobs.integrations.get_s3_client_from_integration")
|
||||
@patch("tasks.jobs.integrations.rls_transaction")
|
||||
@patch("tasks.jobs.integrations.Integration")
|
||||
@patch("tasks.jobs.integrations.logger")
|
||||
def test_upload_s3_integration_fails_connection_logs_error(
|
||||
self, mock_logger, mock_integration_model, mock_rls, mock_get_s3
|
||||
):
|
||||
tenant_id = "tenant-id"
|
||||
provider_id = "provider-id"
|
||||
|
||||
integration = MagicMock()
|
||||
integration.id = "i-1"
|
||||
integration.connected = True
|
||||
mock_s3_client = MagicMock()
|
||||
mock_s3_client.error = "Connection failed"
|
||||
|
||||
mock_integration_model.objects.filter.return_value = [integration]
|
||||
mock_get_s3.return_value = (False, mock_s3_client)
|
||||
|
||||
output_directory = "/tmp/prowler_output/scan123"
|
||||
result = upload_s3_integration(tenant_id, provider_id, output_directory)
|
||||
|
||||
assert result is False
|
||||
integration.save.assert_called_once()
|
||||
assert integration.connected is False
|
||||
mock_logger.error.assert_any_call(
|
||||
"S3 upload failed, connection failed for integration i-1: Connection failed"
|
||||
)
|
||||
|
||||
@patch("tasks.jobs.integrations.rls_transaction")
|
||||
@patch("tasks.jobs.integrations.Integration")
|
||||
@patch("tasks.jobs.integrations.logger")
|
||||
def test_upload_s3_integration_logs_if_no_integrations(
|
||||
self, mock_logger, mock_integration_model, mock_rls
|
||||
):
|
||||
mock_integration_model.objects.filter.return_value = []
|
||||
output_directory = "/tmp/prowler_output/scan123"
|
||||
result = upload_s3_integration("tenant", "provider", output_directory)
|
||||
|
||||
assert result is False
|
||||
mock_logger.error.assert_called_once_with(
|
||||
"No S3 integrations found for provider provider"
|
||||
)
|
||||
|
||||
@patch(
|
||||
"tasks.jobs.integrations.get_s3_client_from_integration",
|
||||
side_effect=Exception("failed"),
|
||||
)
|
||||
@patch("tasks.jobs.integrations.rls_transaction")
|
||||
@patch("tasks.jobs.integrations.Integration")
|
||||
@patch("tasks.jobs.integrations.logger")
|
||||
def test_upload_s3_integration_logs_connection_exception_and_continues(
|
||||
self, mock_logger, mock_integration_model, mock_rls, mock_get_s3
|
||||
):
|
||||
tenant_id = "tenant-id"
|
||||
provider_id = "provider-id"
|
||||
|
||||
integration = MagicMock()
|
||||
integration.id = "i-1"
|
||||
integration.configuration = {
|
||||
"bucket_name": "bucket",
|
||||
"output_directory": "prefix",
|
||||
}
|
||||
mock_integration_model.objects.filter.return_value = [integration]
|
||||
|
||||
output_directory = "/tmp/prowler_output/scan123"
|
||||
result = upload_s3_integration(tenant_id, provider_id, output_directory)
|
||||
|
||||
assert result is False
|
||||
mock_logger.info.assert_any_call(
|
||||
"S3 connection failed for integration i-1: failed"
|
||||
)
|
||||
|
||||
@patch("tasks.jobs.integrations.rls_transaction")
|
||||
@patch("tasks.jobs.integrations.Integration.objects.filter")
|
||||
def test_upload_s3_integration_filters_enabled_only(
|
||||
self, mock_integration_filter, mock_rls
|
||||
):
|
||||
"""Test that upload_s3_integration only processes enabled integrations."""
|
||||
tenant_id = "tenant-id"
|
||||
provider_id = "provider-id"
|
||||
output_directory = "/tmp/prowler_output/scan123"
|
||||
|
||||
# Mock that no enabled integrations are found
|
||||
mock_integration_filter.return_value = []
|
||||
mock_rls.return_value.__enter__.return_value = None
|
||||
|
||||
result = upload_s3_integration(tenant_id, provider_id, output_directory)
|
||||
|
||||
assert result is False
|
||||
# Verify the filter includes the correct parameters including enabled=True
|
||||
mock_integration_filter.assert_called_once_with(
|
||||
integrationproviderrelationship__provider_id=provider_id,
|
||||
integration_type=Integration.IntegrationChoices.AMAZON_S3,
|
||||
enabled=True,
|
||||
)
|
||||
|
||||
def test_s3_integration_validates_and_normalizes_output_directory(self):
|
||||
"""Test that S3 integration validation normalizes output_directory paths."""
|
||||
from api.models import Integration
|
||||
from api.v1.serializers import BaseWriteIntegrationSerializer
|
||||
|
||||
integration_type = Integration.IntegrationChoices.AMAZON_S3
|
||||
providers = []
|
||||
configuration = {
|
||||
"bucket_name": "test-bucket",
|
||||
"output_directory": "///////test", # This should be normalized
|
||||
}
|
||||
credentials = {
|
||||
"aws_access_key_id": "AKIATEST",
|
||||
"aws_secret_access_key": "secret123",
|
||||
}
|
||||
|
||||
# Should not raise an exception and should normalize the path
|
||||
BaseWriteIntegrationSerializer.validate_integration_data(
|
||||
integration_type, providers, configuration, credentials
|
||||
)
|
||||
|
||||
# Verify that the path was normalized
|
||||
assert configuration["output_directory"] == "test"
|
||||
|
||||
def test_s3_integration_rejects_invalid_output_directory_characters(self):
|
||||
"""Test that S3 integration validation rejects invalid characters."""
|
||||
from rest_framework.exceptions import ValidationError
|
||||
|
||||
from api.models import Integration
|
||||
from api.v1.serializers import BaseWriteIntegrationSerializer
|
||||
|
||||
integration_type = Integration.IntegrationChoices.AMAZON_S3
|
||||
providers = []
|
||||
configuration = {
|
||||
"bucket_name": "test-bucket",
|
||||
"output_directory": "test<invalid", # Contains invalid character
|
||||
}
|
||||
credentials = {
|
||||
"aws_access_key_id": "AKIATEST",
|
||||
"aws_secret_access_key": "secret123",
|
||||
}
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
BaseWriteIntegrationSerializer.validate_integration_data(
|
||||
integration_type, providers, configuration, credentials
|
||||
)
|
||||
|
||||
# Should contain validation error about invalid characters
|
||||
assert "Output directory contains invalid characters" in str(exc_info.value)
|
||||
|
||||
def test_s3_integration_rejects_empty_output_directory(self):
|
||||
"""Test that S3 integration validation rejects empty directories."""
|
||||
from rest_framework.exceptions import ValidationError
|
||||
|
||||
from api.models import Integration
|
||||
from api.v1.serializers import BaseWriteIntegrationSerializer
|
||||
|
||||
integration_type = Integration.IntegrationChoices.AMAZON_S3
|
||||
providers = []
|
||||
configuration = {
|
||||
"bucket_name": "test-bucket",
|
||||
"output_directory": "/////", # This becomes empty after normalization
|
||||
}
|
||||
credentials = {
|
||||
"aws_access_key_id": "AKIATEST",
|
||||
"aws_secret_access_key": "secret123",
|
||||
}
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
BaseWriteIntegrationSerializer.validate_integration_data(
|
||||
integration_type, providers, configuration, credentials
|
||||
)
|
||||
|
||||
# Should contain validation error about empty directory
|
||||
assert "Output directory cannot be empty" in str(exc_info.value)
|
||||
|
||||
def test_s3_integration_normalizes_complex_paths(self):
|
||||
"""Test that S3 integration validation handles complex path normalization."""
|
||||
from api.models import Integration
|
||||
from api.v1.serializers import BaseWriteIntegrationSerializer
|
||||
|
||||
integration_type = Integration.IntegrationChoices.AMAZON_S3
|
||||
providers = []
|
||||
configuration = {
|
||||
"bucket_name": "test-bucket",
|
||||
"output_directory": "//test//folder///subfolder//",
|
||||
}
|
||||
credentials = {
|
||||
"aws_access_key_id": "AKIATEST",
|
||||
"aws_secret_access_key": "secret123",
|
||||
}
|
||||
|
||||
BaseWriteIntegrationSerializer.validate_integration_data(
|
||||
integration_type, providers, configuration, credentials
|
||||
)
|
||||
|
||||
# Verify complex path normalization
|
||||
assert configuration["output_directory"] == "test/folder/subfolder"
|
||||
|
||||
@patch("tasks.jobs.integrations.S3")
|
||||
def test_s3_client_uses_output_directory_in_object_paths(self, mock_s3_class):
|
||||
"""Test that S3 client uses output_directory correctly when generating object paths."""
|
||||
mock_integration = MagicMock()
|
||||
mock_integration.credentials = {
|
||||
"aws_access_key_id": "AKIA...",
|
||||
"aws_secret_access_key": "SECRET",
|
||||
}
|
||||
mock_integration.configuration = {
|
||||
"bucket_name": "test-bucket",
|
||||
"output_directory": "my-custom-prefix/scan-results",
|
||||
}
|
||||
|
||||
mock_s3_instance = MagicMock()
|
||||
mock_connection = MagicMock()
|
||||
mock_connection.is_connected = True
|
||||
mock_s3_instance.test_connection.return_value = mock_connection
|
||||
mock_s3_class.return_value = mock_s3_instance
|
||||
|
||||
connected, s3 = get_s3_client_from_integration(mock_integration)
|
||||
|
||||
assert connected is True
|
||||
# Verify S3 was initialized with the correct output_directory
|
||||
mock_s3_class.assert_called_once_with(
|
||||
**mock_integration.credentials,
|
||||
bucket_name="test-bucket",
|
||||
output_directory="my-custom-prefix/scan-results",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestProwlerIntegrationConnectionTest:
|
||||
@patch("api.utils.S3")
|
||||
def test_s3_integration_connection_success(self, mock_s3_class):
|
||||
"""Test successful S3 integration connection."""
|
||||
integration = MagicMock()
|
||||
integration.integration_type = Integration.IntegrationChoices.AMAZON_S3
|
||||
integration.credentials = {
|
||||
"aws_access_key_id": "AKIA...",
|
||||
"aws_secret_access_key": "SECRET",
|
||||
}
|
||||
integration.configuration = {"bucket_name": "test-bucket"}
|
||||
|
||||
mock_connection = Connection(is_connected=True)
|
||||
mock_s3_class.test_connection.return_value = mock_connection
|
||||
|
||||
result = prowler_integration_connection_test(integration)
|
||||
|
||||
assert result.is_connected is True
|
||||
mock_s3_class.test_connection.assert_called_once_with(
|
||||
**integration.credentials,
|
||||
bucket_name="test-bucket",
|
||||
raise_on_exception=False,
|
||||
)
|
||||
|
||||
@patch("api.utils.S3")
|
||||
def test_aws_provider_exception_handling(self, mock_s3_class):
|
||||
"""Test S3 connection exception is properly caught and returned."""
|
||||
integration = MagicMock()
|
||||
integration.integration_type = Integration.IntegrationChoices.AMAZON_S3
|
||||
integration.credentials = {
|
||||
"aws_access_key_id": "invalid",
|
||||
"aws_secret_access_key": "credentials",
|
||||
}
|
||||
integration.configuration = {"bucket_name": "test-bucket"}
|
||||
|
||||
test_exception = Exception("Invalid credentials")
|
||||
mock_connection = Connection(is_connected=False, error=test_exception)
|
||||
mock_s3_class.test_connection.return_value = mock_connection
|
||||
|
||||
result = prowler_integration_connection_test(integration)
|
||||
|
||||
assert result.is_connected is False
|
||||
assert result.error == test_exception
|
||||
mock_s3_class.test_connection.assert_called_once_with(
|
||||
aws_access_key_id="invalid",
|
||||
aws_secret_access_key="credentials",
|
||||
bucket_name="test-bucket",
|
||||
raise_on_exception=False,
|
||||
)
|
||||
|
||||
@patch("api.utils.AwsProvider")
|
||||
@patch("api.utils.S3")
|
||||
def test_s3_integration_connection_failure(self, mock_s3_class, mock_aws_provider):
|
||||
"""Test S3 integration connection failure."""
|
||||
integration = MagicMock()
|
||||
integration.integration_type = Integration.IntegrationChoices.AMAZON_S3
|
||||
integration.credentials = {
|
||||
"aws_access_key_id": "AKIA...",
|
||||
"aws_secret_access_key": "SECRET",
|
||||
}
|
||||
integration.configuration = {"bucket_name": "test-bucket"}
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_aws_provider.return_value.session.current_session = mock_session
|
||||
|
||||
mock_connection = Connection(
|
||||
is_connected=False, error=Exception("Bucket not found")
|
||||
)
|
||||
mock_s3_class.test_connection.return_value = mock_connection
|
||||
|
||||
result = prowler_integration_connection_test(integration)
|
||||
|
||||
assert result.is_connected is False
|
||||
assert str(result.error) == "Bucket not found"
|
||||
|
||||
@patch("api.utils.AwsProvider")
|
||||
@patch("api.utils.S3")
|
||||
def test_aws_security_hub_integration_connection(
|
||||
self, mock_s3_class, mock_aws_provider
|
||||
):
|
||||
"""Test AWS Security Hub integration only validates AWS session."""
|
||||
integration = MagicMock()
|
||||
integration.integration_type = Integration.IntegrationChoices.AWS_SECURITY_HUB
|
||||
integration.credentials = {
|
||||
"aws_access_key_id": "AKIA...",
|
||||
"aws_secret_access_key": "SECRET",
|
||||
}
|
||||
integration.configuration = {"region": "us-east-1"}
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_aws_provider.return_value.session.current_session = mock_session
|
||||
|
||||
# For AWS Security Hub, the function should return early after AWS session validation
|
||||
result = prowler_integration_connection_test(integration)
|
||||
|
||||
# The function should not reach S3 test_connection for AWS_SECURITY_HUB
|
||||
mock_s3_class.test_connection.assert_not_called()
|
||||
# Since no exception was raised during AWS session creation, return None (success)
|
||||
assert result is None
|
||||
|
||||
def test_unsupported_integration_type(self):
|
||||
"""Test unsupported integration type raises ValueError."""
|
||||
integration = MagicMock()
|
||||
integration.integration_type = "UNSUPPORTED_TYPE"
|
||||
integration.credentials = {}
|
||||
integration.configuration = {}
|
||||
|
||||
with pytest.raises(
|
||||
ValueError, match="Integration type UNSUPPORTED_TYPE not supported"
|
||||
):
|
||||
prowler_integration_connection_test(integration)
|
||||
@@ -1,9 +1,15 @@
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from tasks.tasks import _perform_scan_complete_tasks, generate_outputs_task
|
||||
from tasks.tasks import (
|
||||
_perform_scan_complete_tasks,
|
||||
check_integrations_task,
|
||||
generate_outputs_task,
|
||||
s3_integration_task,
|
||||
)
|
||||
|
||||
from api.models import Integration
|
||||
|
||||
|
||||
# TODO Move this to outputs/reports jobs
|
||||
@@ -27,7 +33,6 @@ class TestGenerateOutputs:
|
||||
assert result == {"upload": False}
|
||||
mock_filter.assert_called_once_with(scan_id=self.scan_id)
|
||||
|
||||
@patch("tasks.tasks.rmtree")
|
||||
@patch("tasks.tasks._upload_to_s3")
|
||||
@patch("tasks.tasks._compress_output_files")
|
||||
@patch("tasks.tasks.get_compliance_frameworks")
|
||||
@@ -46,7 +51,6 @@ class TestGenerateOutputs:
|
||||
mock_get_available_frameworks,
|
||||
mock_compress,
|
||||
mock_upload,
|
||||
mock_rmtree,
|
||||
):
|
||||
mock_scan_summary_filter.return_value.exists.return_value = True
|
||||
|
||||
@@ -96,6 +100,7 @@ class TestGenerateOutputs:
|
||||
return_value=("out-dir", "comp-dir"),
|
||||
),
|
||||
patch("tasks.tasks.Scan.all_objects.filter") as mock_scan_update,
|
||||
patch("tasks.tasks.rmtree"),
|
||||
):
|
||||
mock_compress.return_value = "/tmp/zipped.zip"
|
||||
mock_upload.return_value = "s3://bucket/zipped.zip"
|
||||
@@ -110,9 +115,6 @@ class TestGenerateOutputs:
|
||||
mock_scan_update.return_value.update.assert_called_once_with(
|
||||
output_location="s3://bucket/zipped.zip"
|
||||
)
|
||||
mock_rmtree.assert_called_once_with(
|
||||
Path("/tmp/zipped.zip").parent, ignore_errors=True
|
||||
)
|
||||
|
||||
def test_generate_outputs_fails_upload(self):
|
||||
with (
|
||||
@@ -144,6 +146,7 @@ class TestGenerateOutputs:
|
||||
patch("tasks.tasks._compress_output_files", return_value="/tmp/compressed"),
|
||||
patch("tasks.tasks._upload_to_s3", return_value=None),
|
||||
patch("tasks.tasks.Scan.all_objects.filter") as mock_scan_update,
|
||||
patch("tasks.tasks.rmtree"),
|
||||
):
|
||||
mock_filter.return_value.exists.return_value = True
|
||||
mock_findings.return_value.order_by.return_value.iterator.return_value = [
|
||||
@@ -153,7 +156,7 @@ class TestGenerateOutputs:
|
||||
|
||||
result = generate_outputs_task(
|
||||
scan_id="scan",
|
||||
provider_id="provider",
|
||||
provider_id=self.provider_id,
|
||||
tenant_id=self.tenant_id,
|
||||
)
|
||||
|
||||
@@ -185,6 +188,7 @@ class TestGenerateOutputs:
|
||||
patch("tasks.tasks._compress_output_files", return_value="/tmp/compressed"),
|
||||
patch("tasks.tasks._upload_to_s3", return_value="s3://bucket/f.zip"),
|
||||
patch("tasks.tasks.Scan.all_objects.filter"),
|
||||
patch("tasks.tasks.rmtree"),
|
||||
):
|
||||
mock_filter.return_value.exists.return_value = True
|
||||
mock_findings.return_value.order_by.return_value.iterator.return_value = [
|
||||
@@ -255,8 +259,8 @@ class TestGenerateOutputs:
|
||||
),
|
||||
patch("tasks.tasks._compress_output_files", return_value="outdir.zip"),
|
||||
patch("tasks.tasks._upload_to_s3", return_value="s3://bucket/outdir.zip"),
|
||||
patch("tasks.tasks.rmtree"),
|
||||
patch("tasks.tasks.Scan.all_objects.filter"),
|
||||
patch("tasks.tasks.rmtree"),
|
||||
patch(
|
||||
"tasks.tasks.batched",
|
||||
return_value=[
|
||||
@@ -333,13 +337,13 @@ class TestGenerateOutputs:
|
||||
),
|
||||
patch("tasks.tasks._compress_output_files", return_value="outdir.zip"),
|
||||
patch("tasks.tasks._upload_to_s3", return_value="s3://bucket/outdir.zip"),
|
||||
patch("tasks.tasks.rmtree"),
|
||||
patch(
|
||||
"tasks.tasks.Scan.all_objects.filter",
|
||||
return_value=MagicMock(update=lambda **kw: None),
|
||||
),
|
||||
patch("tasks.tasks.batched", return_value=two_batches),
|
||||
patch("tasks.tasks.OUTPUT_FORMATS_MAPPING", {}),
|
||||
patch("tasks.tasks.rmtree"),
|
||||
patch(
|
||||
"tasks.tasks.COMPLIANCE_CLASS_MAP",
|
||||
{"aws": [(lambda name: True, TrackingComplianceWriter)]},
|
||||
@@ -358,6 +362,7 @@ class TestGenerateOutputs:
|
||||
assert writer.transform_calls == [([raw2], compliance_obj, "cis")]
|
||||
assert result == {"upload": True}
|
||||
|
||||
# TODO: We need to add a periodic task to delete old output files
|
||||
def test_generate_outputs_logs_rmtree_exception(self, caplog):
|
||||
mock_finding_output = MagicMock()
|
||||
mock_finding_output.compliance = {"cis": ["requirement-1", "requirement-2"]}
|
||||
@@ -415,6 +420,56 @@ class TestGenerateOutputs:
|
||||
)
|
||||
assert "Error deleting output files" in caplog.text
|
||||
|
||||
@patch("tasks.tasks.rls_transaction")
|
||||
@patch("tasks.tasks.Integration.objects.filter")
|
||||
def test_generate_outputs_filters_enabled_s3_integrations(
|
||||
self, mock_integration_filter, mock_rls
|
||||
):
|
||||
"""Test that generate_outputs_task only processes enabled S3 integrations."""
|
||||
with (
|
||||
patch("tasks.tasks.ScanSummary.objects.filter") as mock_summary,
|
||||
patch("tasks.tasks.Provider.objects.get"),
|
||||
patch("tasks.tasks.initialize_prowler_provider"),
|
||||
patch("tasks.tasks.Compliance.get_bulk"),
|
||||
patch("tasks.tasks.get_compliance_frameworks", return_value=[]),
|
||||
patch("tasks.tasks.Finding.all_objects.filter") as mock_findings,
|
||||
patch(
|
||||
"tasks.tasks._generate_output_directory", return_value=("out", "comp")
|
||||
),
|
||||
patch("tasks.tasks.FindingOutput._transform_findings_stats"),
|
||||
patch("tasks.tasks.FindingOutput.transform_api_finding"),
|
||||
patch("tasks.tasks._compress_output_files", return_value="/tmp/compressed"),
|
||||
patch("tasks.tasks._upload_to_s3", return_value="s3://bucket/file.zip"),
|
||||
patch("tasks.tasks.Scan.all_objects.filter"),
|
||||
patch("tasks.tasks.rmtree"),
|
||||
patch("tasks.tasks.s3_integration_task.apply_async") as mock_s3_task,
|
||||
):
|
||||
mock_summary.return_value.exists.return_value = True
|
||||
mock_findings.return_value.order_by.return_value.iterator.return_value = [
|
||||
[MagicMock()],
|
||||
True,
|
||||
]
|
||||
mock_integration_filter.return_value = [MagicMock()]
|
||||
mock_rls.return_value.__enter__.return_value = None
|
||||
|
||||
with (
|
||||
patch("tasks.tasks.OUTPUT_FORMATS_MAPPING", {}),
|
||||
patch("tasks.tasks.COMPLIANCE_CLASS_MAP", {"aws": []}),
|
||||
):
|
||||
generate_outputs_task(
|
||||
scan_id=self.scan_id,
|
||||
provider_id=self.provider_id,
|
||||
tenant_id=self.tenant_id,
|
||||
)
|
||||
|
||||
# Verify the S3 integrations filters
|
||||
mock_integration_filter.assert_called_once_with(
|
||||
integrationproviderrelationship__provider_id=self.provider_id,
|
||||
integration_type=Integration.IntegrationChoices.AMAZON_S3,
|
||||
enabled=True,
|
||||
)
|
||||
mock_s3_task.assert_called_once()
|
||||
|
||||
|
||||
class TestScanCompleteTasks:
|
||||
@patch("tasks.tasks.create_compliance_requirements_task.apply_async")
|
||||
@@ -436,3 +491,110 @@ class TestScanCompleteTasks:
|
||||
provider_id="provider-id",
|
||||
tenant_id="tenant-id",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestCheckIntegrationsTask:
|
||||
def setup_method(self):
|
||||
self.scan_id = str(uuid.uuid4())
|
||||
self.provider_id = str(uuid.uuid4())
|
||||
self.tenant_id = str(uuid.uuid4())
|
||||
self.output_directory = "/tmp/some-output-dir"
|
||||
|
||||
@patch("tasks.tasks.rls_transaction")
|
||||
@patch("tasks.tasks.Integration.objects.filter")
|
||||
def test_check_integrations_no_integrations(
|
||||
self, mock_integration_filter, mock_rls
|
||||
):
|
||||
mock_integration_filter.return_value.exists.return_value = False
|
||||
# Ensure rls_transaction is mocked
|
||||
mock_rls.return_value.__enter__.return_value = None
|
||||
|
||||
result = check_integrations_task(
|
||||
tenant_id=self.tenant_id,
|
||||
provider_id=self.provider_id,
|
||||
)
|
||||
|
||||
assert result == {"integrations_processed": 0}
|
||||
mock_integration_filter.assert_called_once_with(
|
||||
integrationproviderrelationship__provider_id=self.provider_id,
|
||||
enabled=True,
|
||||
)
|
||||
|
||||
@patch("tasks.tasks.group")
|
||||
@patch("tasks.tasks.rls_transaction")
|
||||
@patch("tasks.tasks.Integration.objects.filter")
|
||||
def test_check_integrations_s3_success(
|
||||
self, mock_integration_filter, mock_rls, mock_group
|
||||
):
|
||||
# Mock that we have some integrations
|
||||
mock_integration_filter.return_value.exists.return_value = True
|
||||
# Ensure rls_transaction is mocked
|
||||
mock_rls.return_value.__enter__.return_value = None
|
||||
|
||||
# Since the current implementation doesn't actually create tasks yet (TODO comment),
|
||||
# we test that no tasks are created but the function returns the correct count
|
||||
result = check_integrations_task(
|
||||
tenant_id=self.tenant_id,
|
||||
provider_id=self.provider_id,
|
||||
)
|
||||
|
||||
assert result == {"integrations_processed": 0}
|
||||
mock_integration_filter.assert_called_once_with(
|
||||
integrationproviderrelationship__provider_id=self.provider_id,
|
||||
enabled=True,
|
||||
)
|
||||
# group should not be called since no integration tasks are created yet
|
||||
mock_group.assert_not_called()
|
||||
|
||||
@patch("tasks.tasks.rls_transaction")
|
||||
@patch("tasks.tasks.Integration.objects.filter")
|
||||
def test_check_integrations_disabled_integrations_ignored(
|
||||
self, mock_integration_filter, mock_rls
|
||||
):
|
||||
"""Test that disabled integrations are not processed."""
|
||||
mock_integration_filter.return_value.exists.return_value = False
|
||||
mock_rls.return_value.__enter__.return_value = None
|
||||
|
||||
result = check_integrations_task(
|
||||
tenant_id=self.tenant_id,
|
||||
provider_id=self.provider_id,
|
||||
)
|
||||
|
||||
assert result == {"integrations_processed": 0}
|
||||
mock_integration_filter.assert_called_once_with(
|
||||
integrationproviderrelationship__provider_id=self.provider_id,
|
||||
enabled=True,
|
||||
)
|
||||
|
||||
@patch("tasks.tasks.upload_s3_integration")
|
||||
def test_s3_integration_task_success(self, mock_upload):
|
||||
mock_upload.return_value = True
|
||||
output_directory = "/tmp/prowler_api_output/test"
|
||||
|
||||
result = s3_integration_task(
|
||||
tenant_id=self.tenant_id,
|
||||
provider_id=self.provider_id,
|
||||
output_directory=output_directory,
|
||||
)
|
||||
|
||||
assert result is True
|
||||
mock_upload.assert_called_once_with(
|
||||
self.tenant_id, self.provider_id, output_directory
|
||||
)
|
||||
|
||||
@patch("tasks.tasks.upload_s3_integration")
|
||||
def test_s3_integration_task_failure(self, mock_upload):
|
||||
mock_upload.return_value = False
|
||||
output_directory = "/tmp/prowler_api_output/test"
|
||||
|
||||
result = s3_integration_task(
|
||||
tenant_id=self.tenant_id,
|
||||
provider_id=self.provider_id,
|
||||
output_directory=output_directory,
|
||||
)
|
||||
|
||||
assert result is False
|
||||
mock_upload.assert_called_once_with(
|
||||
self.tenant_id, self.provider_id, output_directory
|
||||
)
|
||||
|
||||
234
api/tests/performance/scenarios/resources.py
Normal file
@@ -0,0 +1,234 @@
|
||||
from locust import events, task
|
||||
from utils.config import (
|
||||
L_PROVIDER_NAME,
|
||||
M_PROVIDER_NAME,
|
||||
RESOURCES_UI_FIELDS,
|
||||
S_PROVIDER_NAME,
|
||||
TARGET_INSERTED_AT,
|
||||
)
|
||||
from utils.helpers import (
|
||||
APIUserBase,
|
||||
get_api_token,
|
||||
get_auth_headers,
|
||||
get_dynamic_filters_pairs,
|
||||
get_next_resource_filter,
|
||||
get_scan_id_from_provider_name,
|
||||
)
|
||||
|
||||
GLOBAL = {
|
||||
"token": None,
|
||||
"scan_ids": {},
|
||||
"resource_filters": None,
|
||||
"large_resource_filters": None,
|
||||
}
|
||||
|
||||
|
||||
@events.test_start.add_listener
|
||||
def on_test_start(environment, **kwargs):
|
||||
GLOBAL["token"] = get_api_token(environment.host)
|
||||
|
||||
GLOBAL["scan_ids"]["small"] = get_scan_id_from_provider_name(
|
||||
environment.host, GLOBAL["token"], S_PROVIDER_NAME
|
||||
)
|
||||
GLOBAL["scan_ids"]["medium"] = get_scan_id_from_provider_name(
|
||||
environment.host, GLOBAL["token"], M_PROVIDER_NAME
|
||||
)
|
||||
GLOBAL["scan_ids"]["large"] = get_scan_id_from_provider_name(
|
||||
environment.host, GLOBAL["token"], L_PROVIDER_NAME
|
||||
)
|
||||
|
||||
GLOBAL["resource_filters"] = get_dynamic_filters_pairs(
|
||||
environment.host, GLOBAL["token"], "resources"
|
||||
)
|
||||
GLOBAL["large_resource_filters"] = get_dynamic_filters_pairs(
|
||||
environment.host, GLOBAL["token"], "resources", GLOBAL["scan_ids"]["large"]
|
||||
)
|
||||
|
||||
|
||||
class APIUser(APIUserBase):
|
||||
def on_start(self):
|
||||
self.token = GLOBAL["token"]
|
||||
self.s_scan_id = GLOBAL["scan_ids"]["small"]
|
||||
self.m_scan_id = GLOBAL["scan_ids"]["medium"]
|
||||
self.l_scan_id = GLOBAL["scan_ids"]["large"]
|
||||
self.available_resource_filters = GLOBAL["resource_filters"]
|
||||
self.available_resource_filters_large_scan = GLOBAL["large_resource_filters"]
|
||||
|
||||
@task
|
||||
def resources_default(self):
|
||||
name = "/resources"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/resources?page[number]={page_number}"
|
||||
f"&filter[updated_at]={TARGET_INSERTED_AT}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(3)
|
||||
def resources_default_ui_fields(self):
|
||||
name = "/resources?fields"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/resources?page[number]={page_number}"
|
||||
f"&fields[resources]={','.join(RESOURCES_UI_FIELDS)}"
|
||||
f"&filter[updated_at]={TARGET_INSERTED_AT}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(3)
|
||||
def resources_default_include(self):
|
||||
name = "/resources?include"
|
||||
page = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/resources?page[number]={page}"
|
||||
f"&filter[updated_at]={TARGET_INSERTED_AT}"
|
||||
f"&include=provider"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(3)
|
||||
def resources_metadata(self):
|
||||
name = "/resources/metadata"
|
||||
endpoint = f"/resources/metadata?filter[updated_at]={TARGET_INSERTED_AT}"
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task
|
||||
def resources_scan_small(self):
|
||||
name = "/resources?filter[scan_id] - 50k"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/resources?page[number]={page_number}" f"&filter[scan]={self.s_scan_id}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task
|
||||
def resources_metadata_scan_small(self):
|
||||
name = "/resources/metadata?filter[scan_id] - 50k"
|
||||
endpoint = f"/resources/metadata?&filter[scan]={self.s_scan_id}"
|
||||
self.client.get(
|
||||
endpoint,
|
||||
headers=get_auth_headers(self.token),
|
||||
name=name,
|
||||
)
|
||||
|
||||
@task(2)
|
||||
def resources_scan_medium(self):
|
||||
name = "/resources?filter[scan_id] - 250k"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/resources?page[number]={page_number}" f"&filter[scan]={self.m_scan_id}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task
|
||||
def resources_metadata_scan_medium(self):
|
||||
name = "/resources/metadata?filter[scan_id] - 250k"
|
||||
endpoint = f"/resources/metadata?&filter[scan]={self.m_scan_id}"
|
||||
self.client.get(
|
||||
endpoint,
|
||||
headers=get_auth_headers(self.token),
|
||||
name=name,
|
||||
)
|
||||
|
||||
@task
|
||||
def resources_scan_large(self):
|
||||
name = "/resources?filter[scan_id] - 500k"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/resources?page[number]={page_number}" f"&filter[scan]={self.l_scan_id}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task
|
||||
def resources_scan_large_include(self):
|
||||
name = "/resources?filter[scan_id]&include - 500k"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/resources?page[number]={page_number}"
|
||||
f"&filter[scan]={self.l_scan_id}"
|
||||
f"&include=provider"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task
|
||||
def resources_metadata_scan_large(self):
|
||||
endpoint = f"/resources/metadata?&filter[scan]={self.l_scan_id}"
|
||||
self.client.get(
|
||||
endpoint,
|
||||
headers=get_auth_headers(self.token),
|
||||
name="/resources/metadata?filter[scan_id] - 500k",
|
||||
)
|
||||
|
||||
@task(2)
|
||||
def resources_filters(self):
|
||||
name = "/resources?filter[resource_filter]&include"
|
||||
filter_name, filter_value = get_next_resource_filter(
|
||||
self.available_resource_filters
|
||||
)
|
||||
|
||||
endpoint = (
|
||||
f"/resources?filter[{filter_name}]={filter_value}"
|
||||
f"&filter[updated_at]={TARGET_INSERTED_AT}"
|
||||
f"&include=provider"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(3)
|
||||
def resources_metadata_filters(self):
|
||||
name = "/resources/metadata?filter[resource_filter]"
|
||||
filter_name, filter_value = get_next_resource_filter(
|
||||
self.available_resource_filters
|
||||
)
|
||||
|
||||
endpoint = (
|
||||
f"/resources/metadata?filter[{filter_name}]={filter_value}"
|
||||
f"&filter[updated_at]={TARGET_INSERTED_AT}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(3)
|
||||
def resources_metadata_filters_scan_large(self):
|
||||
name = "/resources/metadata?filter[resource_filter]&filter[scan_id] - 500k"
|
||||
filter_name, filter_value = get_next_resource_filter(
|
||||
self.available_resource_filters
|
||||
)
|
||||
|
||||
endpoint = (
|
||||
f"/resources/metadata?filter[{filter_name}]={filter_value}"
|
||||
f"&filter[scan]={self.l_scan_id}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(2)
|
||||
def resourcess_filter_large_scan_include(self):
|
||||
name = "/resources?filter[resource_filter][scan]&include - 500k"
|
||||
filter_name, filter_value = get_next_resource_filter(
|
||||
self.available_resource_filters
|
||||
)
|
||||
|
||||
endpoint = (
|
||||
f"/resources?filter[{filter_name}]={filter_value}"
|
||||
f"&filter[scan]={self.l_scan_id}"
|
||||
f"&include=provider"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(3)
|
||||
def resources_latest_default_ui_fields(self):
|
||||
name = "/resources/latest?fields"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/resources/latest?page[number]={page_number}"
|
||||
f"&fields[resources]={','.join(RESOURCES_UI_FIELDS)}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(3)
|
||||
def resources_latest_metadata_filters(self):
|
||||
name = "/resources/metadata/latest?filter[resource_filter]"
|
||||
filter_name, filter_value = get_next_resource_filter(
|
||||
self.available_resource_filters
|
||||
)
|
||||
|
||||
endpoint = f"/resources/metadata/latest?filter[{filter_name}]={filter_value}"
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
@@ -13,6 +13,23 @@ FINDINGS_RESOURCE_METADATA = {
|
||||
"resource_types": "resource_type",
|
||||
"services": "service",
|
||||
}
|
||||
RESOURCE_METADATA = {
|
||||
"regions": "region",
|
||||
"types": "type",
|
||||
"services": "service",
|
||||
}
|
||||
|
||||
RESOURCES_UI_FIELDS = [
|
||||
"name",
|
||||
"failed_findings_count",
|
||||
"region",
|
||||
"service",
|
||||
"type",
|
||||
"provider",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"uid",
|
||||
]
|
||||
|
||||
S_PROVIDER_NAME = "provider-50k"
|
||||
M_PROVIDER_NAME = "provider-250k"
|
||||
|
||||
@@ -7,6 +7,7 @@ from locust import HttpUser, between
|
||||
from utils.config import (
|
||||
BASE_HEADERS,
|
||||
FINDINGS_RESOURCE_METADATA,
|
||||
RESOURCE_METADATA,
|
||||
TARGET_INSERTED_AT,
|
||||
USER_EMAIL,
|
||||
USER_PASSWORD,
|
||||
@@ -121,13 +122,16 @@ def get_scan_id_from_provider_name(host: str, token: str, provider_name: str) ->
|
||||
return response.json()["data"][0]["id"]
|
||||
|
||||
|
||||
def get_resource_filters_pairs(host: str, token: str, scan_id: str = "") -> dict:
|
||||
def get_dynamic_filters_pairs(
|
||||
host: str, token: str, endpoint: str, scan_id: str = ""
|
||||
) -> dict:
|
||||
"""
|
||||
Retrieves and maps resource metadata filter values from the findings endpoint.
|
||||
Retrieves and maps metadata filter values from a given endpoint.
|
||||
|
||||
Args:
|
||||
host (str): The host URL of the API.
|
||||
token (str): Bearer token for authentication.
|
||||
endpoint (str): The API endpoint to query for metadata.
|
||||
scan_id (str, optional): Optional scan ID to filter metadata. Defaults to using inserted_at timestamp.
|
||||
|
||||
Returns:
|
||||
@@ -136,22 +140,28 @@ def get_resource_filters_pairs(host: str, token: str, scan_id: str = "") -> dict
|
||||
Raises:
|
||||
AssertionError: If the request fails or does not return a 200 status code.
|
||||
"""
|
||||
metadata_mapping = (
|
||||
FINDINGS_RESOURCE_METADATA if endpoint == "findings" else RESOURCE_METADATA
|
||||
)
|
||||
date_filter = "inserted_at" if endpoint == "findings" else "updated_at"
|
||||
metadata_filters = (
|
||||
f"filter[scan]={scan_id}"
|
||||
if scan_id
|
||||
else f"filter[inserted_at]={TARGET_INSERTED_AT}"
|
||||
else f"filter[{date_filter}]={TARGET_INSERTED_AT}"
|
||||
)
|
||||
response = requests.get(
|
||||
f"{host}/findings/metadata?{metadata_filters}", headers=get_auth_headers(token)
|
||||
f"{host}/{endpoint}/metadata?{metadata_filters}",
|
||||
headers=get_auth_headers(token),
|
||||
)
|
||||
assert (
|
||||
response.status_code == 200
|
||||
), f"Failed to get resource filters values: {response.text}"
|
||||
attributes = response.json()["data"]["attributes"]
|
||||
|
||||
return {
|
||||
FINDINGS_RESOURCE_METADATA[key]: values
|
||||
metadata_mapping[key]: values
|
||||
for key, values in attributes.items()
|
||||
if key in FINDINGS_RESOURCE_METADATA.keys()
|
||||
if key in metadata_mapping.keys()
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
@@ -145,11 +146,11 @@ def _get_script_arguments():
|
||||
|
||||
def _run_prowler(prowler_args):
|
||||
_debug("Running prowler with args: {0}".format(prowler_args), 1)
|
||||
_prowler_command = "{prowler}/prowler {args}".format(
|
||||
prowler=PATH_TO_PROWLER, args=prowler_args
|
||||
_prowler_command = shlex.split(
|
||||
"{prowler}/prowler {args}".format(prowler=PATH_TO_PROWLER, args=prowler_args)
|
||||
)
|
||||
_debug("Running command: {0}".format(_prowler_command), 2)
|
||||
_process = subprocess.Popen(_prowler_command, stdout=subprocess.PIPE, shell=True)
|
||||
_debug("Running command: {0}".format(" ".join(_prowler_command)), 2)
|
||||
_process = subprocess.Popen(_prowler_command, stdout=subprocess.PIPE)
|
||||
_output, _error = _process.communicate()
|
||||
_debug("Raw prowler output: {0}".format(_output), 3)
|
||||
_debug("Raw prowler error: {0}".format(_error), 3)
|
||||
|
||||
25
dashboard/compliance/cis_4_0_azure.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -16,7 +16,7 @@ services:
|
||||
volumes:
|
||||
- "./api/src/backend:/home/prowler/backend"
|
||||
- "./api/pyproject.toml:/home/prowler/pyproject.toml"
|
||||
- "/tmp/prowler_api_output:/tmp/prowler_api_output"
|
||||
- "outputs:/tmp/prowler_api_output"
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
@@ -87,7 +87,7 @@ services:
|
||||
- path: .env
|
||||
required: false
|
||||
volumes:
|
||||
- "/tmp/prowler_api_output:/tmp/prowler_api_output"
|
||||
- "outputs:/tmp/prowler_api_output"
|
||||
depends_on:
|
||||
valkey:
|
||||
condition: service_healthy
|
||||
@@ -115,3 +115,7 @@ services:
|
||||
entrypoint:
|
||||
- "../docker-entrypoint.sh"
|
||||
- "beat"
|
||||
|
||||
volumes:
|
||||
outputs:
|
||||
driver: local
|
||||
|
||||
@@ -8,7 +8,7 @@ services:
|
||||
ports:
|
||||
- "${DJANGO_PORT:-8080}:${DJANGO_PORT:-8080}"
|
||||
volumes:
|
||||
- "/tmp/prowler_api_output:/tmp/prowler_api_output"
|
||||
- "output:/tmp/prowler_api_output"
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
@@ -68,7 +68,7 @@ services:
|
||||
- path: .env
|
||||
required: false
|
||||
volumes:
|
||||
- "/tmp/prowler_api_output:/tmp/prowler_api_output"
|
||||
- "output:/tmp/prowler_api_output"
|
||||
depends_on:
|
||||
valkey:
|
||||
condition: service_healthy
|
||||
@@ -91,3 +91,7 @@ services:
|
||||
entrypoint:
|
||||
- "../docker-entrypoint.sh"
|
||||
- "beat"
|
||||
|
||||
volumes:
|
||||
output:
|
||||
driver: local
|
||||
|
||||
@@ -8,15 +8,20 @@ Checks are the core component of Prowler. A check is a piece of code designed to
|
||||
|
||||
### Creating a Check
|
||||
|
||||
To create a new check:
|
||||
The most common high level steps to create a new check are:
|
||||
|
||||
- Prerequisites: A Prowler provider and service must exist. Verify support and check for pre-existing checks via [Prowler Hub](https://hub.prowler.com). If the provider or service is not present, please refer to the [Provider](./provider.md) and [Service](./services.md) documentation for creation instructions.
|
||||
|
||||
- Navigate to the service directory. The path should be as follows: `prowler/providers/<provider>/services/<service>`.
|
||||
|
||||
- Create a check-specific folder. The path should follow this pattern: `prowler/providers/<provider>/services/<service>/<check_name>`. Adhere to the [Naming Format for Checks](#naming-format-for-checks).
|
||||
|
||||
- Populate the folder with files as specified in [File Creation](#file-creation).
|
||||
1. Prerequisites:
|
||||
- Verify the check does not already exist by searching [Prowler Hub](https://hub.prowler.com) or checking `prowler/providers/<provider>/services/<service>/<check_name_want_to_implement>/`.
|
||||
- Ensure required provider and service exist. If not, follow the [Provider](./provider.md) and [Service](./services.md) documentation to create them.
|
||||
- Confirm the service has implemented all required methods and attributes for the check (in most cases, you will need to add or modify some methods in the service to get the data you need for the check).
|
||||
2. Navigate to the service directory. The path should be as follows: `prowler/providers/<provider>/services/<service>`.
|
||||
3. Create a check-specific folder. The path should follow this pattern: `prowler/providers/<provider>/services/<service>/<check_name_want_to_implement>`. Adhere to the [Naming Format for Checks](#naming-format-for-checks).
|
||||
4. Populate the folder with files as specified in [File Creation](#file-creation).
|
||||
5. Run the check locally to ensure it works as expected. For checking you can use the CLI in the next way:
|
||||
- To ensure the check has been detected by Prowler: `poetry run python prowler-cli.py <provider> --list-checks | grep <check_name>`.
|
||||
- To run the check, to find possible issues: `poetry run python prowler-cli.py <provider> --log-level ERROR --verbose --check <check_name>`.
|
||||
6. Create comprehensive tests for the check that cover multiple scenarios including both PASS (compliant) and FAIL (non-compliant) cases. For detailed information about test structure and implementation guidelines, refer to the [Testing](./unit-testing.md) documentation.
|
||||
7. If the check and its corresponding tests are working as expected, you can submit a PR to Prowler.
|
||||
|
||||
### Naming Format for Checks
|
||||
|
||||
@@ -59,13 +64,19 @@ from prowler.providers.<provider>.services.<service>.<service>_client import <se
|
||||
# Each check must be implemented as a Python class with the same name as its corresponding file.
|
||||
# The class must inherit from the Check base class.
|
||||
class <check_name>(Check):
|
||||
"""Short description of what is being checked"""
|
||||
"""
|
||||
Ensure that <resource> meets <security_requirement>.
|
||||
|
||||
This check evaluates whether <specific_condition> to ensure <security_benefit>.
|
||||
- PASS: <description_of_compliant_state(s)>.
|
||||
- FAIL: <description_of_non_compliant_state(s)>.
|
||||
"""
|
||||
|
||||
def execute(self):
|
||||
"""Execute <check short description>
|
||||
"""Execute the check logic.
|
||||
|
||||
Returns:
|
||||
List[CheckReport<Provider>]: A list of reports containing the result of the check.
|
||||
A list of reports containing the result of the check.
|
||||
"""
|
||||
findings = []
|
||||
# Iterate over the target resources using the provider service client
|
||||
@@ -147,12 +158,10 @@ else:
|
||||
Each check **must** populate the report with an unique identifier for the audited resource. This identifier or identifiers are going to depend on the provider and the resource that is being audited. Here are the criteria for each provider:
|
||||
|
||||
- AWS
|
||||
|
||||
- Amazon Resource ID — `report.resource_id`.
|
||||
- The resource identifier. This is the name of the resource, the ID of the resource, or a resource path. Some resource identifiers include a parent resource (sub-resource-type/parent-resource/sub-resource) or a qualifier such as a version (resource-type:resource-name:qualifier).
|
||||
- If the resource ID cannot be retrieved directly from the audited resource, it can be extracted from the ARN. It is the last part of the ARN after the last slash (`/`) or colon (`:`).
|
||||
- If no actual resource to audit exists, this format can be used: `<resource_type>/unknown`
|
||||
|
||||
- Amazon Resource Name — `report.resource_arn`.
|
||||
- The [Amazon Resource Name (ARN)](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html) of the audited entity.
|
||||
- If the ARN cannot be retrieved directly from the audited resource, construct a valid ARN using the `resource_id` component as the audited entity. Examples:
|
||||
@@ -163,32 +172,24 @@ Each check **must** populate the report with an unique identifier for the audite
|
||||
- AWS Security Hub — `arn:<partition>:security-hub:<region>:<account-id>:hub/unknown`.
|
||||
- Access Analyzer — `arn:<partition>:access-analyzer:<region>:<account-id>:analyzer/unknown`.
|
||||
- GuardDuty — `arn:<partition>:guardduty:<region>:<account-id>:detector/unknown`.
|
||||
|
||||
- GCP
|
||||
|
||||
- Resource ID — `report.resource_id`.
|
||||
- Resource ID represents the full, [unambiguous path to a resource](https://google.aip.dev/122#full-resource-names), known as the full resource name. Typically, it follows the format: `//{api_service/resource_path}`.
|
||||
- If the resource ID cannot be retrieved directly from the audited resource, by default the resource name is used.
|
||||
- Resource Name — `report.resource_name`.
|
||||
- Resource Name usually refers to the name of a resource within its service.
|
||||
|
||||
- Azure
|
||||
|
||||
- Resource ID — `report.resource_id`.
|
||||
- Resource ID represents the full Azure Resource Manager path to a resource, which follows the format: `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}`.
|
||||
- Resource Name — `report.resource_name`.
|
||||
- Resource Name usually refers to the name of a resource within its service.
|
||||
- If the [resource name](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-name-rules) cannot be retrieved directly from the audited resource, the last part of the resource ID can be used.
|
||||
|
||||
- Kubernetes
|
||||
|
||||
- Resource ID — `report.resource_id`.
|
||||
- The UID of the Kubernetes object. This is a system-generated string that uniquely identifies the object within the cluster for its entire lifetime. See [Kubernetes Object Names and IDs - UIDs](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids).
|
||||
- Resource Name — `report.resource_name`.
|
||||
- The name of the Kubernetes object. This is a client-provided string that must be unique for the resource type within a namespace (for namespaced resources) or cluster (for cluster-scoped resources). Names typically follow DNS subdomain or label conventions. See [Kubernetes Object Names and IDs - Names](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names).
|
||||
|
||||
- M365
|
||||
|
||||
- Resource ID — `report.resource_id`.
|
||||
- If the audited resource has a globally unique identifier such as a `guid`, use it as the `resource_id`.
|
||||
- If no `guid` exists, use another unique and relevant identifier for the resource, such as the tenant domain, the internal policy ID, or a representative string following the format `<resource_type>/<name_or_id>`.
|
||||
@@ -204,9 +205,7 @@ Each check **must** populate the report with an unique identifier for the audite
|
||||
- For global configurations:
|
||||
- `resource_id`: Tenant domain or representative string (e.g., "userSettings")
|
||||
- `resource_name`: Description of the configuration (e.g., "SharePoint Settings")
|
||||
|
||||
- GitHub
|
||||
|
||||
- Resource ID — `report.resource_id`.
|
||||
- The ID of the Github resource. This is a system-generated integer that uniquely identifies the resource within the Github platform.
|
||||
- Resource Name — `report.resource_name`.
|
||||
@@ -260,44 +259,25 @@ Below is a generic example of a check metadata file. **Do not include comments i
|
||||
### Metadata Fields and Their Purpose
|
||||
|
||||
- **Provider** — The Prowler provider related to the check. The name **must** be lowercase and match the provider folder name. For supported providers refer to [Prowler Hub](https://hub.prowler.com/check) or directly to [Prowler Code](https://github.com/prowler-cloud/prowler/tree/master/prowler/providers).
|
||||
|
||||
- **CheckID** — The unique identifier for the check inside the provider, this field **must** match the check's folder and python file and json metadata file name. For more information about the naming refer to the [Naming Format for Checks](#naming-format-for-checks) section.
|
||||
|
||||
- **CheckTitle** — A concise, descriptive title for the check.
|
||||
|
||||
- **CheckType** — *For now this field is only standardized for the AWS provider*.
|
||||
- For AWS this field must follow the [AWS Security Hub Types](https://docs.aws.amazon.com/securityhub/latest/userguide/asff-required-attributes.html#Types) format. So the common pattern to follow is `namespace/category/classifier`, refer to the attached documentation for the valid values for this fields.
|
||||
|
||||
- **ServiceName** — The name of the provider service being audited. This field **must** be in lowercase and match with the service folder name. For supported services refer to [Prowler Hub](https://hub.prowler.com/check) or directly to [Prowler Code](https://github.com/prowler-cloud/prowler/tree/master/prowler/providers).
|
||||
|
||||
- **SubServiceName** — The subservice or resource within the service, if applicable. For more information refer to the [Naming Format for Checks](#naming-format-for-checks) section.
|
||||
|
||||
- **ResourceIdTemplate** — A template for the unique resource identifier. For more information refer to the [Prowler's Resource Identification](#prowlers-resource-identification) section.
|
||||
|
||||
- **Severity** — The severity of the finding if the check fails. Must be one of: `critical`, `high`, `medium`, `low`, or `informational`, this field **must** be in lowercase. To get more information about the severity levels refer to the [Prowler's Check Severity Levels](#prowlers-check-severity-levels) section.
|
||||
|
||||
- **ResourceType** — The type of resource being audited. *For now this field is only standardized for the AWS provider*.
|
||||
|
||||
- For AWS use the [Security Hub resource types](https://docs.aws.amazon.com/securityhub/latest/userguide/asff-resources.html) or, if not available, the PascalCase version of the [CloudFormation type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) (e.g., `AwsEc2Instance`). Use "Other" if no match exists.
|
||||
|
||||
- **Description** — A short description of what the check does.
|
||||
|
||||
- **Risk** — The risk or impact if the check fails, explaining why the finding matters.
|
||||
|
||||
- **RelatedUrl** — A URL to official documentation or further reading about the check's purpose. If no official documentation is available, use the risk and recommendation text from trusted third-party sources.
|
||||
|
||||
- **Remediation** — Guidance for fixing a failed check, including:
|
||||
|
||||
- **Code** — Remediation commands or code snippets for CLI, Terraform, native IaC, or other tools like the Web Console.
|
||||
|
||||
- **Recommendation** — A textual human readable recommendation. Here it is not necessary to include actual steps, but rather a general recommendation about what to do to fix the check.
|
||||
|
||||
- **Categories** — One or more categories for grouping checks in execution (e.g., `internet-exposed`). For the current list of categories, refer to the [Prowler Hub](https://hub.prowler.com/check).
|
||||
|
||||
- **DependsOn** — Currently not used.
|
||||
|
||||
- **RelatedTo** — Currently not used.
|
||||
|
||||
- **Notes** — Any additional information not covered by other fields.
|
||||
|
||||
### Remediation Code Guidelines
|
||||
@@ -312,3 +292,28 @@ When providing remediation steps, reference the following sources:
|
||||
### Python Model Reference
|
||||
|
||||
The metadata structure is enforced in code using a Pydantic model. For reference, see the [`CheckMetadata`](https://github.com/prowler-cloud/prowler/blob/master/prowler/lib/check/models.py).
|
||||
|
||||
## Generic Check Patterns and Best Practices
|
||||
|
||||
### Common Patterns
|
||||
|
||||
- Every check is implemented as a class inheriting from `Check` (from `prowler.lib.check.models`).
|
||||
- The main logic is implemented in the `execute()` method (**only method that must be implemented**), which always returns a list of provider-specific report objects (e.g., `CheckReport<Provider>`)—one per finding/resource. If there are no findings/resources, return an empty list.
|
||||
- **Never** use the provider's client directly; instead, use the service client (e.g., `<service>_client`) and iterate over its resources.
|
||||
- For each resource, create a provider-specific report object, populate it with metadata, resource details, status (`PASS`, `FAIL`, etc.), and a human-readable `status_extended` message.
|
||||
- Use the `metadata()` method to attach check metadata to each report.
|
||||
- Checks are designed to be idempotent and stateless: they do not modify resources, only report on their state.
|
||||
|
||||
### Best Practices
|
||||
|
||||
- Use clear, actionable, and user-friendly language in `status_extended` to explain the result. Always provide information to identify the resource.
|
||||
- Use helper functions/utilities for repeated logic to avoid code duplication. Save them in the `lib` folder of the service.
|
||||
- Handle exceptions gracefully: catch errors per resource, log them, and continue processing other resources.
|
||||
- Document the check with a class and function level docstring explaining what it does, what it checks, and any caveats or provider-specific behaviors.
|
||||
- Use type hints for the `execute()` method (e.g., `-> list[CheckReport<Provider>]`) for clarity and static analysis.
|
||||
- Ensure checks are efficient; avoid excessive nested loops. If the complexity is high, consider refactoring the check.
|
||||
- Keep the check logic focused: one check = one control/requirement. Avoid combining unrelated logic in a single check.
|
||||
|
||||
## Specific Check Patterns
|
||||
|
||||
Details for specific providers can be found in documentation pages named using the pattern `<provider_name>-details`.
|
||||
|
||||
@@ -50,6 +50,49 @@ The GCP provider implementation follows the general [Provider structure](./provi
|
||||
- **Location:** [`prowler/providers/gcp/lib/`](https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/gcp/lib/)
|
||||
- **Purpose:** Helpers for argument parsing, mutelist management, and other cross-cutting concerns.
|
||||
|
||||
## Retry Configuration
|
||||
|
||||
GCP services implement automatic retry functionality for rate limiting errors (HTTP 429). This is configured centrally and must be included in all API calls:
|
||||
|
||||
### Required Implementation
|
||||
|
||||
```python
|
||||
from prowler.providers.gcp.config import DEFAULT_RETRY_ATTEMPTS
|
||||
|
||||
# In discovery.build()
|
||||
client = discovery.build(
|
||||
service, version, credentials=credentials,
|
||||
num_retries=DEFAULT_RETRY_ATTEMPTS
|
||||
)
|
||||
|
||||
# In request.execute()
|
||||
response = request.execute(num_retries=DEFAULT_RETRY_ATTEMPTS)
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
- **Default Value**: 3 attempts (configurable in `prowler/providers/gcp/config.py`)
|
||||
- **Command Line Flag**: `--gcp-retries-max-attempts` for runtime configuration
|
||||
- **Error Types**: HTTP 429 and quota exceeded errors
|
||||
- **Backoff Strategy**: Exponential backoff with randomization
|
||||
|
||||
### Example Service Implementation
|
||||
|
||||
```python
|
||||
def _get_instances(self):
|
||||
for project_id in self.project_ids:
|
||||
try:
|
||||
client = discovery.build(
|
||||
"compute", "v1", credentials=self.credentials,
|
||||
num_retries=DEFAULT_RETRY_ATTEMPTS
|
||||
)
|
||||
request = client.instances().list(project=project_id)
|
||||
response = request.execute(num_retries=DEFAULT_RETRY_ATTEMPTS)
|
||||
# Process response...
|
||||
except Exception as error:
|
||||
logger.error(f"{error.__class__.__name__}: {error}")
|
||||
```
|
||||
|
||||
## Specific Patterns in GCP Services
|
||||
|
||||
The generic service pattern is described in [service page](./services.md#service-structure-and-initialisation). You can find all the currently implemented services in the following locations:
|
||||
@@ -69,6 +112,7 @@ The best reference to understand how to implement a new service is following the
|
||||
- Resource discovery and attribute collection can be parallelized using `self.__threading_call__`, typically by region/zone or resource.
|
||||
- All GCP resources are represented as Pydantic `BaseModel` classes, providing type safety and structured access to resource attributes.
|
||||
- Each GCP API calls are wrapped in try/except blocks, always logging errors.
|
||||
- **Retry Configuration**: All `request.execute()` calls must include `num_retries=DEFAULT_RETRY_ATTEMPTS` for automatic retry on rate limiting errors (HTTP 429).
|
||||
- Tags and additional attributes that cannot be retrieved from the default call should be collected and stored for each resource using dedicated methods and threading.
|
||||
|
||||
## Specific Patterns in GCP Checks
|
||||
|
||||
@@ -21,6 +21,8 @@ Within this folder the following files are also to be created:
|
||||
- `<new_service_name>_service.py` – Contains all the logic and API calls of the service.
|
||||
- `<new_service_name>_client_.py` – Contains the initialization of the freshly created service's class so that the checks can use it.
|
||||
|
||||
Once the files are create, you can check that the service has been created by running the following command: `poetry run python prowler-cli.py <provider> --list-services | grep <new_service_name>`.
|
||||
|
||||
## Service Structure and Initialisation
|
||||
|
||||
The Prowler's service structure is as outlined below. To initialise it, just import the service client in a check.
|
||||
@@ -75,7 +77,7 @@ class <Service>(ServiceParentClass):
|
||||
# String in case the provider's API service name is different.
|
||||
super().__init__(__class__.__name__, provider)
|
||||
|
||||
# Create an empty dictionary of items to be gathered, using the unique ID as the dictionary’s key, e.g., instances.
|
||||
# Create an empty dictionary of items to be gathered, using the unique ID as the dictionary's key, e.g., instances.
|
||||
self.<items> = {}
|
||||
|
||||
# If parallelization can be carried out by regions or locations, the function __threading_call__ to be used must be implemented in the Service Parent Class.
|
||||
@@ -160,11 +162,9 @@ class <Service>(ServiceParentClass):
|
||||
???+note
|
||||
To prevent false findings, when Prowler fails to retrieve items due to Access Denied or similar errors, the affected item's value is set to `None`.
|
||||
|
||||
#### Service Models
|
||||
#### Resource Models
|
||||
|
||||
Service models define structured classes used within services to store and process data extracted from API calls.
|
||||
|
||||
Using Pydantic for Data Validation
|
||||
Resource models define structured classes used within services to store and process data extracted from API calls. They are defined in the same file as the service class, but outside of the class, usually at the bottom of the file.
|
||||
|
||||
Prowler leverages Pydantic's [BaseModel](https://docs.pydantic.dev/latest/api/base_model/#pydantic.BaseModel) to enforce data validation.
|
||||
|
||||
@@ -227,7 +227,7 @@ from prowler.providers.<provider>.services.<new_service_name>.<new_service_name>
|
||||
|
||||
## Provider Permissions in Prowler
|
||||
|
||||
Before implementing a new service, verify that Prowler’s existing permissions for each provider are sufficient. If additional permissions are required, refer to the relevant documentation and update accordingly.
|
||||
Before implementing a new service, verify that Prowler's existing permissions for each provider are sufficient. If additional permissions are required, refer to the relevant documentation and update accordingly.
|
||||
|
||||
Provider-Specific Permissions Documentation:
|
||||
|
||||
@@ -235,3 +235,16 @@ Provider-Specific Permissions Documentation:
|
||||
- [Azure](../getting-started/requirements.md#needed-permissions)
|
||||
- [GCP](../getting-started/requirements.md#needed-permissions_1)
|
||||
- [M365](../getting-started/requirements.md#needed-permissions_2)
|
||||
- [GitHub](../getting-started/requirements.md#authentication_2)
|
||||
|
||||
## Best Practices
|
||||
|
||||
- When available in the provider, use threading or parallelization utilities for all methods that can be parallelized by to maximize performance and reduce scan time.
|
||||
- Define a Pydantic `BaseModel` for every resource you manage, and use these models for all resource data handling.
|
||||
- Log every major step (start, success, error) in resource discovery and attribute collection for traceability and debugging; include as much context as possible.
|
||||
- Catch and log all exceptions, providing detailed context (region, subscription, resource, error type, line number) to aid troubleshooting.
|
||||
- Use consistent naming for resource containers, unique identifiers, and model attributes to improve code readability and maintainability.
|
||||
- Add docstrings to every method and comments to explain any service-specific logic, especially where provider APIs behave differently or have quirks.
|
||||
- Collect and store resource tags and additional attributes to support richer checks and reporting.
|
||||
- Leverage shared utility helpers for session setup, identifier parsing, and other cross-cutting concerns to avoid code duplication. This kind of code is typically stored in a `lib` folder in the service folder.
|
||||
- Keep code modular, maintainable, and well-documented for ease of extension and troubleshooting.
|
||||
|
||||
BIN
docs/img/mutelist-ui-1.png
Normal file
|
After Width: | Height: | Size: 321 KiB |
BIN
docs/img/mutelist-ui-2.png
Normal file
|
After Width: | Height: | Size: 276 KiB |
BIN
docs/img/mutelist-ui-3.png
Normal file
|
After Width: | Height: | Size: 326 KiB |
BIN
docs/img/mutelist-ui-4.png
Normal file
|
After Width: | Height: | Size: 260 KiB |
BIN
docs/img/mutelist-ui-5.png
Normal file
|
After Width: | Height: | Size: 269 KiB |
BIN
docs/img/mutelist-ui-6.png
Normal file
|
After Width: | Height: | Size: 234 KiB |
BIN
docs/img/mutelist-ui-7.png
Normal file
|
After Width: | Height: | Size: 273 KiB |
BIN
docs/img/mutelist-ui-8.png
Normal file
|
After Width: | Height: | Size: 243 KiB |
BIN
docs/img/mutelist-ui-9.png
Normal file
|
After Width: | Height: | Size: 649 KiB |
|
Before Width: | Height: | Size: 328 KiB After Width: | Height: | Size: 289 KiB |
346
docs/index.md
@@ -1,12 +1,33 @@
|
||||
**Prowler** is an Open Source security tool to perform AWS, Azure, Google Cloud and Kubernetes security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness, and also remediations! We have Prowler CLI (Command Line Interface) that we call Prowler Open Source and a service on top of it that we call <a href="https://prowler.com">Prowler Cloud</a>.
|
||||
**Prowler** is the open source cloud security platform trusted by thousands to **automate security and compliance** in any cloud environment. With hundreds of ready-to-use checks and compliance frameworks, Prowler delivers real-time, customizable monitoring and seamless integrations, making cloud security simple, scalable, and cost-effective for organizations of any size.
|
||||
|
||||
The official supported providers right now are:
|
||||
|
||||
- **AWS**
|
||||
- **Azure**
|
||||
- **Google Cloud**
|
||||
- **Kubernetes**
|
||||
- **M365**
|
||||
- **Github**
|
||||
|
||||
Prowler supports **auditing, incident response, continuous monitoring, hardening, forensic readiness, and remediation**.
|
||||
|
||||
### Prowler Components
|
||||
|
||||
- **Prowler CLI** (Command Line Interface) – Known as **Prowler Open Source**.
|
||||
- **Prowler Cloud** – A managed service built on top of Prowler CLI.
|
||||
More information: [Prowler Cloud](https://prowler.com)
|
||||
|
||||
## Prowler App
|
||||
|
||||

|
||||
|
||||
Prowler App is a web application that allows you to run Prowler in a simple way. It provides a user-friendly interface to configure and run scans, view results, and manage your security findings.
|
||||
Prowler App is a web application that simplifies running Prowler. It provides:
|
||||
|
||||
See how to install the Prowler App in the [Quick Start](#prowler-app-installation) section.
|
||||
- A **user-friendly interface** for configuring and executing scans.
|
||||
- A dashboard to **view results** and manage **security findings**.
|
||||
|
||||
### Installation Guide
|
||||
Refer to the [Quick Start](#prowler-app-installation) section for installation steps.
|
||||
|
||||
## Prowler CLI
|
||||
|
||||
@@ -22,14 +43,37 @@ prowler dashboard
|
||||
```
|
||||

|
||||
|
||||
It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks.
|
||||
Prowler includes hundreds of security controls aligned with widely recognized industry frameworks and standards, including:
|
||||
|
||||
- CIS Benchmarks (AWS, Azure, Microsoft 365, Kubernetes, GitHub)
|
||||
- NIST SP 800-53 (rev. 4 and 5) and NIST SP 800-171
|
||||
- NIST Cybersecurity Framework (CSF)
|
||||
- CISA Guidelines
|
||||
- FedRAMP Low & Moderate
|
||||
- PCI DSS v3.2.1 and v4.0
|
||||
- ISO/IEC 27001:2013 and 2022
|
||||
- SOC 2
|
||||
- GDPR (General Data Protection Regulation)
|
||||
- HIPAA (Health Insurance Portability and Accountability Act)
|
||||
- FFIEC (Federal Financial Institutions Examination Council)
|
||||
- ENS RD2022 (Spanish National Security Framework)
|
||||
- GxP 21 CFR Part 11 and EU Annex 11
|
||||
- RBI Cybersecurity Framework (Reserve Bank of India)
|
||||
- KISA ISMS-P (Korean Information Security Management System)
|
||||
- MITRE ATT&CK
|
||||
- AWS Well-Architected Framework (Security & Reliability Pillars)
|
||||
- AWS Foundational Technical Review (FTR)
|
||||
- Microsoft NIS2 Directive (EU)
|
||||
- Custom threat scoring frameworks (prowler_threatscore)
|
||||
- Custom security frameworks for enterprise needs
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Prowler App Installation
|
||||
|
||||
Prowler App can be installed in different ways, depending on your environment:
|
||||
Prowler App supports multiple installation methods based on your environment.
|
||||
|
||||
> See how to use Prowler App in the [Prowler App Tutorial](tutorials/prowler-app.md) section.
|
||||
Refer to the [Prowler App Tutorial](tutorials/prowler-app.md) for detailed usage instructions.
|
||||
|
||||
=== "Docker Compose"
|
||||
|
||||
@@ -136,7 +180,7 @@ Prowler App can be installed in different ways, depending on your environment:
|
||||
|
||||
### Prowler CLI Installation
|
||||
|
||||
Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/), thus can be installed as Python package with `Python >= 3.9, <= 3.12`:
|
||||
Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/). Consequently, it can be installed as Python package with `Python >= 3.9, <= 3.12`:
|
||||
|
||||
=== "pipx"
|
||||
|
||||
@@ -274,7 +318,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/),
|
||||
|
||||
=== "AWS CloudShell"
|
||||
|
||||
After the migration of AWS CloudShell from Amazon Linux 2 to Amazon Linux 2023 [[1]](https://aws.amazon.com/about-aws/whats-new/2023/12/aws-cloudshell-migrated-al2023/) [[2]](https://docs.aws.amazon.com/cloudshell/latest/userguide/cloudshell-AL2023-migration.html), there is no longer a need to manually compile Python 3.9 as it's already included in AL2023. Prowler can thus be easily installed following the Generic method of installation via pip. Follow the steps below to successfully execute Prowler v4 in AWS CloudShell:
|
||||
After the migration of AWS CloudShell from Amazon Linux 2 to Amazon Linux 2023 [[1]](https://aws.amazon.com/about-aws/whats-new/2023/12/aws-cloudshell-migrated-al2023/) [[2]](https://docs.aws.amazon.com/cloudshell/latest/userguide/cloudshell-AL2023-migration.html), there is no longer a need to manually compile Python 3.9 as it is already included in AL2023. Prowler can thus be easily installed following the generic method of installation via pip. Follow the steps below to successfully execute Prowler v4 in AWS CloudShell:
|
||||
|
||||
_Requirements_:
|
||||
|
||||
@@ -312,13 +356,58 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/),
|
||||
prowler azure --az-cli-auth
|
||||
```
|
||||
|
||||
### Prowler App Update
|
||||
|
||||
You have two options to upgrade your Prowler App installation:
|
||||
|
||||
#### Option 1: Change env file with the following values
|
||||
|
||||
Edit your `.env` file and change the version values:
|
||||
|
||||
```env
|
||||
PROWLER_UI_VERSION="5.9.0"
|
||||
PROWLER_API_VERSION="5.9.0"
|
||||
```
|
||||
|
||||
#### Option 2: Run the following command
|
||||
|
||||
```bash
|
||||
docker compose pull --policy always
|
||||
```
|
||||
|
||||
The `--policy always` flag ensures that Docker pulls the latest images even if they already exist locally.
|
||||
|
||||
|
||||
???+ note "What Gets Preserved During Upgrade"
|
||||
|
||||
Everything is preserved, nothing will be deleted after the update.
|
||||
|
||||
#### Troubleshooting
|
||||
|
||||
If containers don't start, check logs for errors:
|
||||
|
||||
```bash
|
||||
# Check logs for errors
|
||||
docker compose logs
|
||||
|
||||
# Verify image versions
|
||||
docker images | grep prowler
|
||||
```
|
||||
|
||||
If you encounter issues, you can rollback to the previous version by changing the `.env` file back to your previous version and running:
|
||||
|
||||
```bash
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Prowler container versions
|
||||
|
||||
The available versions of Prowler CLI are the following:
|
||||
|
||||
- `latest`: in sync with `master` branch (bear in mind that it is not a stable version)
|
||||
- `v4-latest`: in sync with `v4` branch (bear in mind that it is not a stable version)
|
||||
- `v3-latest`: in sync with `v3` branch (bear in mind that it is not a stable version)
|
||||
- `latest`: in sync with `master` branch (please note that it is not a stable version)
|
||||
- `v4-latest`: in sync with `v4` branch (please note that it is not a stable version)
|
||||
- `v3-latest`: in sync with `v3` branch (please note that it is not a stable version)
|
||||
- `<x.y.z>` (release): you can find the releases [here](https://github.com/prowler-cloud/prowler/releases), those are stable releases.
|
||||
- `stable`: this tag always point to the latest release.
|
||||
- `v4-stable`: this tag always point to the latest release for v4.
|
||||
@@ -348,7 +437,7 @@ The **Prowler App** consists of three main components:
|
||||
|
||||
- **Prowler UI**: A user-friendly web interface for running Prowler and viewing results, powered by Next.js.
|
||||
- **Prowler API**: The backend API that executes Prowler scans and stores the results, built with Django REST Framework.
|
||||
- **Prowler SDK**: A Python SDK that integrates with the Prowler CLI for advanced functionality.
|
||||
- **Prowler SDK**: A Python SDK that integrates with Prowler CLI for advanced functionality.
|
||||
|
||||
The app leverages the following supporting infrastructure:
|
||||
|
||||
@@ -360,24 +449,29 @@ The app leverages the following supporting infrastructure:
|
||||
|
||||
## Deprecations from v3
|
||||
|
||||
### General
|
||||
- `Allowlist` now is called `Mutelist`.
|
||||
- The `--quiet` option has been deprecated, now use the `--status` flag to select the finding's status you want to get from PASS, FAIL or MANUAL.
|
||||
- All `INFO` finding's status has changed to `MANUAL`.
|
||||
- The CSV output format is common for all the providers.
|
||||
The following are the deprecations carried out from v3.
|
||||
|
||||
We have deprecated some of our outputs formats:
|
||||
### General
|
||||
|
||||
- `Allowlist` now is called `Mutelist`.
|
||||
- The `--quiet` option has been deprecated. From now on use the `--status` flag to select the finding's status you want to get: PASS, FAIL or MANUAL.
|
||||
- All `INFO` finding's status has changed to `MANUAL`.
|
||||
- The CSV output format is common for all providers.
|
||||
|
||||
Some output formats are now deprecated:
|
||||
|
||||
- The native JSON is replaced for the JSON [OCSF](https://schema.ocsf.io/) v1.1.0, common for all the providers.
|
||||
|
||||
### AWS
|
||||
- Deprecate the AWS flag --sts-endpoint-region since we use AWS STS regional tokens.
|
||||
- To send only FAILS to AWS Security Hub, now use either `--send-sh-only-fails` or `--security-hub --status FAIL`.
|
||||
- Deprecate the AWS flag `--sts-endpoint-region` since AWS STS regional tokens are used.
|
||||
- To send only FAILS to AWS Security Hub, now you must use either `--send-sh-only-fails` or `--security-hub --status FAIL`.
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Prowler App
|
||||
|
||||
#### **Access the App**
|
||||
|
||||
Go to [http://localhost:3000](http://localhost:3000) after installing the app (see [Quick Start](#prowler-app-installation)). Sign up with your email and password.
|
||||
|
||||
<img src="img/sign-up-button.png" alt="Sign Up Button" width="320"/>
|
||||
@@ -391,42 +485,61 @@ Go to [http://localhost:3000](http://localhost:3000) after installing the app (s
|
||||
|
||||
- A new tenant is automatically created.
|
||||
- The new user is assigned to this tenant.
|
||||
- A set of **RBAC admin permissions** is generated and assigned to the user for the newly created tenant.
|
||||
- A set of **RBAC admin permissions** is generated and assigned to the user for the newly-created tenant.
|
||||
|
||||
- **With an invitation**: The user is added to the specified tenant with the permissions defined in the invitation.
|
||||
|
||||
This mechanism ensures that the first user in a newly created tenant has administrative permissions within that tenant.
|
||||
|
||||
#### **Log In**
|
||||
Log in with your email and password to start using the Prowler App.
|
||||
#### Log In
|
||||
|
||||
Log in using your **email and password** to access the Prowler App.
|
||||
|
||||
<img src="img/log-in.png" alt="Log In" width="285"/>
|
||||
|
||||
#### **Add a Provider**
|
||||
- Go to `Settings > Cloud Providers` and click `Add Account`.
|
||||
- Select the provider you want to scan (AWS, GCP, Azure, Kubernetes).
|
||||
- Enter the provider's ID (AWS Account ID, GCP Project ID, Azure Subscription ID, Kubernetes Cluster) and optional alias.
|
||||
- Follow the instructions to add your credentials.
|
||||
#### Add a Cloud Provider
|
||||
|
||||
#### **Start a Scan**
|
||||
After successfully adding and testing your credentials, Prowler will start scanning your cloud environment, click on the `Go to Scans` button to see the progress.
|
||||
To configure a cloud provider for scanning:
|
||||
|
||||
#### **View Results**
|
||||
While the scan is running, start exploring the findings in these sections:
|
||||
1. Navigate to `Settings > Cloud Providers` and click `Add Account`.
|
||||
2. Select the cloud provider you wish to scan (**AWS, GCP, Azure, Kubernetes**).
|
||||
3. Enter the provider's identifier (Optional: Add an alias):
|
||||
- **AWS**: Account ID
|
||||
- **GCP**: Project ID
|
||||
- **Azure**: Subscription ID
|
||||
- **Kubernetes**: Cluster ID
|
||||
- **M36**: Domain ID
|
||||
4. Follow the guided instructions to add and authenticate your credentials.
|
||||
|
||||
- **Overview**: High-level summary of the scans. <img src="img/overview.png" alt="Overview" width="700"/>
|
||||
- **Compliance**: Insights into compliance status. <img src="img/compliance.png" alt="Compliance" width="700"/>
|
||||
#### Start a Scan
|
||||
|
||||
> See more details about the Prowler App usage in the [Prowler App](tutorials/prowler-app.md) section.
|
||||
Once credentials are successfully added and validated, Prowler initiates a scan of your cloud environment.
|
||||
|
||||
Click `Go to Scans` to monitor progress.
|
||||
|
||||
#### View Results
|
||||
|
||||
While the scan is running, you can review findings in the following sections:
|
||||
|
||||
- **Overview** – Provides a high-level summary of your scans.
|
||||
<img src="img/overview.png" alt="Overview" width="700"/>
|
||||
|
||||
- **Compliance** – Displays compliance insights based on security frameworks.
|
||||
<img src="img/compliance.png" alt="Compliance" width="700"/>
|
||||
|
||||
> For detailed usage instructions, refer to the [Prowler App Guide](tutorials/prowler-app.md).
|
||||
|
||||
???+ note
|
||||
Prowler will automatically scan all configured providers every **24 hours**, ensuring your cloud environment stays continuously monitored.
|
||||
|
||||
### Prowler CLI
|
||||
|
||||
To run Prowler, you will need to specify the provider (e.g `aws`, `gcp`, `azure`, `m365` or `kubernetes`):
|
||||
#### Running Prowler
|
||||
|
||||
To run Prowler, you will need to specify the provider (e.g `aws`, `gcp`, `azure`, `m365`, `github` or `kubernetes`):
|
||||
|
||||
???+ note
|
||||
If no provider specified, AWS will be used for backward compatibility with most of v2 options.
|
||||
If no provider is specified, AWS is used by default for backward compatibility with Prowler v2.
|
||||
|
||||
```console
|
||||
prowler <provider>
|
||||
@@ -434,27 +547,34 @@ prowler <provider>
|
||||

|
||||
|
||||
???+ note
|
||||
Running the `prowler` command without options will use your environment variable credentials, see [Requirements](./getting-started/requirements.md) section to review the credentials settings.
|
||||
Running the `prowler` command without options will uses environment variable credentials. Refer to the [Requirements](./getting-started/requirements.md) section for credential configuration details.
|
||||
|
||||
If you miss the former output you can use `--verbose` but Prowler v4 is smoking fast, so you won't see much ;
|
||||
#### Verbose Output
|
||||
|
||||
By default, Prowler generates CSV, JSON-OCSF and HTML reports. However, you can generate a JSON-ASFF report (used by AWS Security Hub) with `-M` or `--output-modes`:
|
||||
If you prefer the former verbose output, use: `--verbose`. This allows seeing more info while Prowler is running, minimal output is displayed unless verbosity is enabled.
|
||||
|
||||
#### Report Generation
|
||||
|
||||
By default, Prowler generates CSV, JSON-OCSF, and HTML reports. To generate a JSON-ASFF report (used by AWS Security Hub), specify `-M` or `--output-modes`:
|
||||
|
||||
```console
|
||||
prowler <provider> -M csv json-asff json-ocsf html
|
||||
```
|
||||
The html report will be located in the output directory as the other files and it will look like:
|
||||
The HTML report is saved in the output directory, alongside other reports. It will look like this:
|
||||
|
||||

|
||||
|
||||
You can use `-l`/`--list-checks` or `--list-services` to list all available checks or services within the provider.
|
||||
#### Listing Available Checks and Services
|
||||
|
||||
To view all available checks or services within a provider:, use `-l`/`--list-checks` or `--list-services`.
|
||||
|
||||
```console
|
||||
prowler <provider> --list-checks
|
||||
prowler <provider> --list-services
|
||||
```
|
||||
#### Running Specific Checks or Services
|
||||
|
||||
For executing specific checks or services you can use options `-c`/`checks` or `-s`/`services`:
|
||||
Execute specific checks or services using `-c`/`checks` or `-s`/`services`:
|
||||
|
||||
```console
|
||||
prowler azure --checks storage_blob_public_access_level_is_disabled
|
||||
@@ -462,8 +582,9 @@ prowler aws --services s3 ec2
|
||||
prowler gcp --services iam compute
|
||||
prowler kubernetes --services etcd apiserver
|
||||
```
|
||||
#### Excluding Checks and Services
|
||||
|
||||
Also, checks and services can be excluded with options `-e`/`--excluded-checks` or `--excluded-services`:
|
||||
Checks and services can be excluded with `-e`/`--excluded-checks` or `--excluded-services`:
|
||||
|
||||
```console
|
||||
prowler aws --excluded-checks s3_bucket_public_access
|
||||
@@ -471,10 +592,11 @@ prowler azure --excluded-services defender iam
|
||||
prowler gcp --excluded-services kms
|
||||
prowler kubernetes --excluded-services controllermanager
|
||||
```
|
||||
#### Additional Options
|
||||
|
||||
More options and executions methods that will save your time in [Miscellaneous](tutorials/misc.md).
|
||||
Explore more advanced time-saving execution methods in the [Miscellaneous](tutorials/misc.md) section.
|
||||
|
||||
You can always use `-h`/`--help` to access to the usage information and all the possible options:
|
||||
To access the help menu and view all available options, use: `-h`/`--help`:
|
||||
|
||||
```console
|
||||
prowler --help
|
||||
@@ -482,7 +604,7 @@ prowler --help
|
||||
|
||||
#### AWS
|
||||
|
||||
Use a custom AWS profile with `-p`/`--profile` and/or AWS regions which you want to audit with `-f`/`--filter-region`:
|
||||
Use a custom AWS profile with `-p`/`--profile` and/or the AWS regions you want to audit with `-f`/`--filter-region`:
|
||||
|
||||
```console
|
||||
prowler aws --profile custom-profile -f us-east-1 eu-south-2
|
||||
@@ -491,11 +613,11 @@ prowler aws --profile custom-profile -f us-east-1 eu-south-2
|
||||
???+ note
|
||||
By default, `prowler` will scan all AWS regions.
|
||||
|
||||
See more details about AWS Authentication in [Requirements](getting-started/requirements.md#aws)
|
||||
See more details about AWS Authentication in the [Requirements](getting-started/requirements.md#aws) section.
|
||||
|
||||
#### Azure
|
||||
|
||||
With Azure you need to specify which auth method is going to be used:
|
||||
Azure requires specifying the auth method:
|
||||
|
||||
```console
|
||||
# To use service principal authentication
|
||||
@@ -513,62 +635,83 @@ prowler azure --managed-identity-auth
|
||||
|
||||
See more details about Azure Authentication in [Requirements](getting-started/requirements.md#azure)
|
||||
|
||||
Prowler by default scans all the subscriptions that is allowed to scan, if you want to scan a single subscription or various specific subscriptions you can use the following flag (using az cli auth as example):
|
||||
By default, Prowler scans all the subscriptions for which it has permissions. To scan a single or various specific subscription you can use the following flag (using az cli auth as example):
|
||||
|
||||
```console
|
||||
prowler azure --az-cli-auth --subscription-ids <subscription ID 1> <subscription ID 2> ... <subscription ID N>
|
||||
```
|
||||
|
||||
#### Google Cloud
|
||||
|
||||
Prowler will use by default your User Account credentials, you can configure it using:
|
||||
- **User Account Credentials**
|
||||
|
||||
- `gcloud init` to use a new account
|
||||
- `gcloud config set account <account>` to use an existing account
|
||||
By default, Prowler uses **User Account credentials**. You can configure your account using:
|
||||
|
||||
Then, obtain your access credentials using: `gcloud auth application-default login`
|
||||
- `gcloud init` – Set up a new account.
|
||||
- `gcloud config set account <account>` – Switch to an existing account.
|
||||
|
||||
Otherwise, you can generate and download Service Account keys in JSON format (refer to https://cloud.google.com/iam/docs/creating-managing-service-account-keys) and provide the location of the file with the following argument:
|
||||
Once configured, obtain access credentials using: `gcloud auth application-default login`.
|
||||
|
||||
```console
|
||||
prowler gcp --credentials-file path
|
||||
```
|
||||
- **Service Account Authentication**
|
||||
|
||||
Prowler by default scans all the GCP Projects that is allowed to scan, if you want to scan a single project or various specific projects you can use the following flag:
|
||||
```console
|
||||
prowler gcp --project-ids <Project ID 1> <Project ID 2> ... <Project ID N>
|
||||
```
|
||||
Alternatively, you can use Service Account credentials:
|
||||
|
||||
See more details about GCP Authentication in [Requirements](getting-started/requirements.md#google-cloud)
|
||||
Generate and download Service Account keys in JSON format. Refer to [Google IAM documentation](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) for details.
|
||||
|
||||
Provide the key file location using this argument:
|
||||
|
||||
```console
|
||||
prowler gcp --credentials-file path
|
||||
```
|
||||
|
||||
- **Scanning Specific GCP Projects**
|
||||
|
||||
By default, Prowler scans all accessible GCP projects. To scan specific projects, use the `--project-ids` flag:
|
||||
|
||||
```console
|
||||
prowler gcp --project-ids <Project ID 1> <Project ID 2> ... <Project ID N>
|
||||
```
|
||||
|
||||
- **GCP Retry Configuration**
|
||||
|
||||
To configure the maximum number of retry attempts for Google Cloud SDK API calls, use the `--gcp-retries-max-attempts` flag:
|
||||
|
||||
```console
|
||||
prowler gcp --gcp-retries-max-attempts 5
|
||||
```
|
||||
|
||||
This is useful when experiencing quota exceeded errors (HTTP 429) to increase the number of automatic retry attempts.
|
||||
|
||||
#### Kubernetes
|
||||
|
||||
Prowler allows you to scan your Kubernetes Cluster either from within the cluster or from outside the cluster.
|
||||
Prowler enables security scanning of Kubernetes clusters, supporting both **in-cluster** and **external** execution.
|
||||
|
||||
For non in-cluster execution, you can provide the location of the KubeConfig file with the following argument:
|
||||
- **Non In-Cluster Execution**
|
||||
|
||||
```console
|
||||
prowler kubernetes --kubeconfig-file path
|
||||
```
|
||||
???+ note
|
||||
If no `--kubeconfig-file` is provided, Prowler will use the default KubeConfig file location (`~/.kube/config`).
|
||||
```console
|
||||
prowler kubernetes --kubeconfig-file path
|
||||
```
|
||||
???+ note
|
||||
If no `--kubeconfig-file` is provided, Prowler will use the default KubeConfig file location (`~/.kube/config`).
|
||||
|
||||
For in-cluster execution, you can use the supplied yaml to run Prowler as a job within a new Prowler namespace:
|
||||
```console
|
||||
kubectl apply -f kubernetes/prowler-sa.yaml
|
||||
kubectl apply -f kubernetes/job.yaml
|
||||
kubectl apply -f kubernetes/prowler-role.yaml
|
||||
kubectl apply -f kubernetes/prowler-rolebinding.yaml
|
||||
kubectl get pods --namespace prowler-ns --> prowler-XXXXX
|
||||
kubectl logs prowler-XXXXX --namespace prowler-ns
|
||||
```
|
||||
- **In-Cluster Execution**
|
||||
|
||||
???+ note
|
||||
By default, `prowler` will scan all namespaces in your active Kubernetes context. Use the flag `--context` to specify the context to be scanned and `--namespaces` to specify the namespaces to be scanned.
|
||||
To run Prowler inside the cluster, apply the provided YAML configuration to deploy a job in a new namespace:
|
||||
|
||||
```console
|
||||
kubectl apply -f kubernetes/prowler-sa.yaml
|
||||
kubectl apply -f kubernetes/job.yaml
|
||||
kubectl apply -f kubernetes/prowler-role.yaml
|
||||
kubectl apply -f kubernetes/prowler-rolebinding.yaml
|
||||
kubectl get pods --namespace prowler-ns --> prowler-XXXXX
|
||||
kubectl logs prowler-XXXXX --namespace prowler-ns
|
||||
```
|
||||
|
||||
???+ note
|
||||
By default, Prowler scans all namespaces in the active Kubernetes context. Use the `--context`flag to specify the context to be scanned and `--namespaces` to restrict scanning to specific namespaces.
|
||||
|
||||
#### Microsoft 365
|
||||
|
||||
With M365 you need to specify which auth method is going to be used:
|
||||
Microsoft 365 requires specifying the auth method:
|
||||
|
||||
```console
|
||||
|
||||
@@ -586,31 +729,33 @@ prowler m365 --browser-auth --tenant-id "XXXXXXXX"
|
||||
|
||||
```
|
||||
|
||||
See more details about M365 Authentication in [Requirements](getting-started/requirements.md#microsoft-365)
|
||||
See more details about M365 Authentication in the [Requirements](getting-started/requirements.md#microsoft-365) section.
|
||||
|
||||
#### GitHub
|
||||
|
||||
Prowler allows you to scan your GitHub account, including your repositories, organizations or applications.
|
||||
Prowler enables security scanning of your **GitHub account**, including **Repositories**, **Organizations** and **Applications**.
|
||||
|
||||
There are several supported login methods:
|
||||
- **Supported Authentication Methods**
|
||||
|
||||
```console
|
||||
# Personal Access Token (PAT):
|
||||
prowler github --personal-access-token pat
|
||||
Authenticate using one of the following methods:
|
||||
|
||||
# OAuth App Token:
|
||||
prowler github --oauth-app-token oauth_token
|
||||
```console
|
||||
# Personal Access Token (PAT):
|
||||
prowler github --personal-access-token pat
|
||||
|
||||
# GitHub App Credentials:
|
||||
prowler github --github-app-id app_id --github-app-key app_key
|
||||
```
|
||||
# OAuth App Token:
|
||||
prowler github --oauth-app-token oauth_token
|
||||
|
||||
???+ note
|
||||
If no login method is explicitly provided, Prowler will automatically attempt to authenticate using environment variables in the following order of precedence:
|
||||
# GitHub App Credentials:
|
||||
prowler github --github-app-id app_id --github-app-key app_key
|
||||
```
|
||||
|
||||
1. `GITHUB_PERSONAL_ACCESS_TOKEN`
|
||||
2. `OAUTH_APP_TOKEN`
|
||||
3. `GITHUB_APP_ID` and `GITHUB_APP_KEY`
|
||||
???+ note
|
||||
If no login method is explicitly provided, Prowler will automatically attempt to authenticate using environment variables in the following order of precedence:
|
||||
|
||||
1. `GITHUB_PERSONAL_ACCESS_TOKEN`
|
||||
2. `OAUTH_APP_TOKEN`
|
||||
3. `GITHUB_APP_ID` and `GITHUB_APP_KEY`
|
||||
|
||||
#### Infrastructure as Code (IaC)
|
||||
|
||||
@@ -648,4 +793,5 @@ prowler iac --scan-path ./my-iac-directory --exclude-path ./my-iac-directory/tes
|
||||
See more details about IaC scanning in the [IaC Tutorial](tutorials/iac/getting-started-iac.md) section.
|
||||
|
||||
## Prowler v2 Documentation
|
||||
For **Prowler v2 Documentation**, please check it out [here](https://github.com/prowler-cloud/prowler/blob/8818f47333a0c1c1a457453c87af0ea5b89a385f/README.md).
|
||||
|
||||
For **Prowler v2 Documentation**, refer to the [official repository](https://github.com/prowler-cloud/prowler/blob/8818f47333a0c1c1a457453c87af0ea5b89a385f/README.md).
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
# AWS Authentication
|
||||
# AWS Authentication in Prowler
|
||||
|
||||
Make sure you have properly configured your AWS-CLI with a valid Access Key and Region or declare AWS variables properly (or instance profile/role):
|
||||
Proper authentication is required for Prowler to perform security checks across AWS resources. Ensure that AWS-CLI is correctly configured or manually declare AWS credentials before running scans.
|
||||
|
||||
## Configure AWS Credentials
|
||||
|
||||
Use one of the following methods to authenticate:
|
||||
|
||||
```console
|
||||
aws configure
|
||||
@@ -14,25 +18,32 @@ export AWS_SECRET_ACCESS_KEY="XXXXXXXXX"
|
||||
export AWS_SESSION_TOKEN="XXXXXXXXX"
|
||||
```
|
||||
|
||||
Those credentials must be associated to a user or role with proper permissions to do all checks. To make sure, add the following AWS managed policies to the user or role being used:
|
||||
These credentials must be associated with a user or role with the necessary permissions to perform security checks.
|
||||
|
||||
- `arn:aws:iam::aws:policy/SecurityAudit`
|
||||
- `arn:aws:iam::aws:policy/job-function/ViewOnlyAccess`
|
||||
## Assign Required AWS Permissions
|
||||
To ensure full functionality, attach the following AWS managed policies to the designated user or role:
|
||||
|
||||
- `arn:aws:iam::aws:policy/SecurityAudit`
|
||||
- `arn:aws:iam::aws:policy/job-function/ViewOnlyAccess`
|
||||
|
||||
???+ note
|
||||
Moreover, some read-only additional permissions are needed for several checks, make sure you attach also the custom policy [prowler-additions-policy.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-additions-policy.json) to the role you are using. If you want Prowler to send findings to [AWS Security Hub](https://aws.amazon.com/security-hub), make sure you also attach the custom policy [prowler-security-hub.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-security-hub.json).
|
||||
Some security checks require read-only additional permissions. Attach the following custom policies to the role: [prowler-additions-policy.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-additions-policy.json). If you want Prowler to send findings to [AWS Security Hub](https://aws.amazon.com/security-hub), make sure to also attach the custom policy: [prowler-security-hub.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-security-hub.json).
|
||||
|
||||
## AWS Profiles and Service Scanning in Prowler
|
||||
|
||||
## Profiles
|
||||
Prowler supports authentication and security assessments using custom AWS profiles and can optionally scan unused services.
|
||||
|
||||
**Using Custom AWS Profiles**
|
||||
|
||||
Prowler allows you to specify a custom AWS profile using the following command:
|
||||
|
||||
Prowler can use your custom AWS Profile with:
|
||||
```console
|
||||
prowler aws -p/--profile <profile_name>
|
||||
```
|
||||
|
||||
## Multi-Factor Authentication
|
||||
## Multi-Factor Authentication (MFA)
|
||||
|
||||
If your IAM entity enforces MFA you can use `--mfa` and Prowler will ask you to input the following values to get a new session:
|
||||
If MFA enforcement is required for your IAM entity, you can use `--mfa`. Prowler will prompt you to enter the following in order to get a new session:
|
||||
|
||||
- ARN of your MFA device
|
||||
- TOTP (Time-Based One-Time Password)
|
||||
|
||||
@@ -1,45 +1,52 @@
|
||||
# Boto3 Retrier Configuration
|
||||
# Boto3 Retrier Configuration in Prowler
|
||||
|
||||
Prowler's AWS Provider uses the Boto3 [Standard](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html) retry mode to assist in retrying client calls to AWS services when these kinds of errors or exceptions are experienced. This mode includes the following behaviours:
|
||||
Prowler's AWS Provider leverages Boto3's [Standard](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html) retry mode to automatically retry client calls to AWS services when encountering errors or exceptions.
|
||||
|
||||
- A default value of 3 for maximum retry attempts. This can be overwritten with the `--aws-retries-max-attempts 5` argument.
|
||||
## Retry Behavior Overview
|
||||
|
||||
- Retry attempts for an expanded list of errors/exceptions:
|
||||
```
|
||||
# Transient errors/exceptions
|
||||
RequestTimeout
|
||||
RequestTimeoutException
|
||||
PriorRequestNotComplete
|
||||
ConnectionError
|
||||
HTTPClientError
|
||||
Boto3's Standard retry mode includes the following mechanisms:
|
||||
|
||||
# Service-side throttling/limit errors and exceptions
|
||||
Throttling
|
||||
ThrottlingException
|
||||
ThrottledException
|
||||
RequestThrottledException
|
||||
TooManyRequestsException
|
||||
ProvisionedThroughputExceededException
|
||||
TransactionInProgressException
|
||||
RequestLimitExceeded
|
||||
BandwidthLimitExceeded
|
||||
LimitExceededException
|
||||
RequestThrottled
|
||||
SlowDown
|
||||
EC2ThrottledException
|
||||
```
|
||||
- Maximum Retry Attempts: Default value set to 3, configurable via the `--aws-retries-max-attempts 5` argument.
|
||||
|
||||
- Retry attempts on nondescriptive, transient error codes. Specifically, these HTTP status codes: 500, 502, 503, 504.
|
||||
- Expanded Error Handling: Retries occur for a comprehensive set of errors.
|
||||
|
||||
- Any retry attempt will include an exponential backoff by a base factor of 2 for a maximum backoff time of 20 seconds.
|
||||
```
|
||||
# *Transient Errors/Exceptions*
|
||||
The retrier handles various temporary failures:
|
||||
RequestTimeout
|
||||
RequestTimeoutException
|
||||
PriorRequestNotComplete
|
||||
ConnectionError
|
||||
HTTPClientError
|
||||
|
||||
## Notes for validating retry attempts
|
||||
# *Service-Side Throttling and Limit Errors*
|
||||
Retries occur for service-imposed rate limits and resource constraints:
|
||||
Throttling
|
||||
ThrottlingException
|
||||
ThrottledException
|
||||
RequestThrottledException
|
||||
TooManyRequestsException
|
||||
ProvisionedThroughputExceededException
|
||||
TransactionInProgressException
|
||||
RequestLimitExceeded
|
||||
BandwidthLimitExceeded
|
||||
LimitExceededException
|
||||
RequestThrottled
|
||||
SlowDown
|
||||
EC2ThrottledException
|
||||
```
|
||||
|
||||
If you are making changes to Prowler, and want to validate if requests are being retried or given up on, you can take the following approach
|
||||
- Nondescriptive Transient Error Codes: The retrier applies retry logic to standard HTTP status codes signaling transient errors: 500, 502, 503, 504.
|
||||
|
||||
- Exponential Backoff Strategy: Each retry attempt follows exponential backoff with a base factor of 2, ensuring progressive delay between retries. Maximum backoff time: 20 seconds
|
||||
|
||||
## Validating Retry Attempts
|
||||
|
||||
For testing or modifying Prowler's behavior, use the following steps to confirm whether requests are being retried or abandoned:
|
||||
|
||||
* Run prowler with `--log-level DEBUG` and `--log-file debuglogs.txt`
|
||||
* Search for retry attempts using `grep -i 'Retry needed' debuglogs.txt`
|
||||
|
||||
This is based off of the [AWS documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#checking-retry-attempts-in-your-client-logs), which states that if a retry is performed, you will see a message starting with "Retry needed".
|
||||
This approach follows the [AWS documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#checking-retry-attempts-in-your-client-logs), which states that if a retry is performed, a message starting with "Retry needed” will be prompted.
|
||||
|
||||
You can determine the total number of calls made using `grep -i 'Sending http request' debuglogs.txt | wc -l`
|
||||
It is possible to determine the total number of calls made using `grep -i 'Sending http request' debuglogs.txt | wc -l`
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
# AWS CloudShell
|
||||
# Installing Prowler in AWS CloudShell
|
||||
|
||||
## Following the migration of AWS CloudShell from Amazon Linux 2 to Amazon Linux 2023
|
||||
|
||||
AWS CloudShell has migrated from Amazon Linux 2 to Amazon Linux 2023 [[1]](https://aws.amazon.com/about-aws/whats-new/2023/12/aws-cloudshell-migrated-al2023/) [[2]](https://docs.aws.amazon.com/cloudshell/latest/userguide/cloudshell-AL2023-migration.html). With this transition, Python 3.9 is now included by default in AL2023, eliminating the need for manual compilation.
|
||||
|
||||
To install Prowler v4 in AWS CloudShell, follow the standard installation method using pip:
|
||||
|
||||
## Installation
|
||||
After the migration of AWS CloudShell from Amazon Linux 2 to Amazon Linux 2023 [[1]](https://aws.amazon.com/about-aws/whats-new/2023/12/aws-cloudshell-migrated-al2023/) [[2]](https://docs.aws.amazon.com/cloudshell/latest/userguide/cloudshell-AL2023-migration.html), there is no longer a need to manually compile Python 3.9 as it's already included in AL2023. Prowler can thus be easily installed following the Generic method of installation via pip. Follow the steps below to successfully execute Prowler v4 in AWS CloudShell:
|
||||
```shell
|
||||
sudo bash
|
||||
adduser prowler
|
||||
@@ -11,13 +15,20 @@ cd /tmp
|
||||
prowler aws
|
||||
```
|
||||
|
||||
## Download Files
|
||||
## Downloading Files from AWS CloudShell
|
||||
|
||||
To download the results from AWS CloudShell, select Actions -> Download File and add the full path of each file. For the CSV file it will be something like `/home/cloudshell-user/output/prowler-output-123456789012-20221220191331.csv`
|
||||
To download results from AWS CloudShell:
|
||||
|
||||
## Clone Prowler from Github
|
||||
- Select Actions → Download File.
|
||||
|
||||
- Specify the full file path of each file you wish to download. For example, downloading a CSV file would require providing its complete path, as in: `/home/cloudshell-user/output/prowler-output-123456789012-20221220191331.csv`
|
||||
|
||||
## Cloning Prowler from GitHub
|
||||
|
||||
Due to the limited storage in AWS CloudShell's home directory, installing Poetry dependencies for running Prowler from GitHub can be problematic.
|
||||
|
||||
The following workaround ensures successful installation:
|
||||
|
||||
The limited storage that AWS CloudShell provides for the user's home directory causes issues when installing the poetry dependencies to run Prowler from GitHub. Here is a workaround:
|
||||
```shell
|
||||
sudo bash
|
||||
adduser prowler
|
||||
@@ -31,8 +42,8 @@ eval $(poetry env activate)
|
||||
poetry install
|
||||
python prowler-cli.py -v
|
||||
```
|
||||
> [!IMPORTANT]
|
||||
> Starting from Poetry v2.0.0, `poetry shell` has been deprecated in favor of `poetry env activate`.
|
||||
>
|
||||
> If your poetry version is below 2.0.0 you must keep using `poetry shell` to activate your environment.
|
||||
> In case you have any doubts, consult the Poetry environment activation guide: https://python-poetry.org/docs/managing-environments/#activating-the-environment
|
||||
|
||||
???+ important
|
||||
Starting from Poetry v2.0.0, `poetry shell` has been deprecated in favor of `poetry env activate`.
|
||||
|
||||
If your Poetry version is below v2.0.0, continue using `poetry shell` to activate your environment. For further guidance, refer to the Poetry Environment Activation Guide https://python-poetry.org/docs/managing-environments/#activating-the-environment.
|
||||
|
||||
@@ -1,34 +1,39 @@
|
||||
# Scan Multiple AWS Accounts
|
||||
# Scanning Multiple AWS Accounts with Prowler
|
||||
|
||||
Prowler can scan multiple accounts when it is executed from one account that can assume a role in those given accounts to scan using [Assume Role feature](role-assumption.md) and [AWS Organizations integration feature](organizations.md).
|
||||
Prowler enables security scanning across multiple AWS accounts by utilizing the [Assume Role feature](role-assumption.md) and [integration with AWS Organizations feature](organizations.md).
|
||||
|
||||
This approach allows execution from a single account with permissions to assume roles in the target accounts.
|
||||
|
||||
## Scan multiple specific accounts sequentially
|
||||
## Scanning Multiple Accounts Sequentially
|
||||
|
||||
- Declare a variable with all the accounts to scan:
|
||||
To scan specific accounts one at a time:
|
||||
|
||||
- Define a variable containing the AWS account IDs to be scanned:
|
||||
|
||||
```
|
||||
ACCOUNTS_LIST='11111111111 2222222222 333333333'
|
||||
```
|
||||
|
||||
- Then run Prowler to assume a role (change `<role_name>` below to yours, that must be the same in all accounts):
|
||||
- Run Prowler with an IAM role that exists in all target accounts: (replace the `<role_name>` with to yours, that is to be consistent throughout all accounts):
|
||||
|
||||
```
|
||||
ROLE_TO_ASSUME=<role_name>
|
||||
for accountId in $ACCOUNTS_LIST; do
|
||||
prowler aws --role arn:aws:iam::$accountId:role/$ROLE_TO_ASSUME
|
||||
for accountId in $ACCOUNTS_LIST; do
|
||||
prowler aws --role arn:aws:iam::$accountId:role/$ROLE_TO_ASSUME
|
||||
done
|
||||
```
|
||||
|
||||
## Scan multiple specific accounts in parallel
|
||||
## Scanning Multiple Accounts in Parallel
|
||||
|
||||
- Declare a variable with all the accounts to scan:
|
||||
- To scan multiple accounts simultaneously:
|
||||
|
||||
Define the AWS accounts to be scanned with a variable:
|
||||
|
||||
```
|
||||
ACCOUNTS_LIST='11111111111 2222222222 333333333'
|
||||
```
|
||||
|
||||
- Then run Prowler to assume a role (change `<role_name>` below to yours, that must be the same in all accounts), in this example it will scan 3 accounts in parallel:
|
||||
- Run Prowler with an IAM role that exists in all target accounts: (replace the `<role_name>` with to yours, that is to be consistent throughout all accounts). The following example executes scanning across three accounts in parallel:
|
||||
|
||||
```
|
||||
ROLE_TO_ASSUME=<role_name>
|
||||
@@ -41,25 +46,35 @@ for accountId in $ACCOUNTS_LIST; do
|
||||
done
|
||||
```
|
||||
|
||||
## Scan multiple accounts from AWS Organizations in parallel
|
||||
## Scanning Multiple AWS Organization Accounts in Parallel
|
||||
|
||||
- Declare a variable with all the accounts to scan. To do so, get the list of your AWS accounts in your AWS Organization by running the following command (will create a variable with all your ACTIVE accounts). Remember to run that command with the permissions needed to get that information in your AWS Organizations Management account.
|
||||
Prowler enables parallel security scans across multiple AWS accounts within an AWS Organization.
|
||||
|
||||
### Retrieve Active AWS Accounts
|
||||
|
||||
To efficiently scan multiple accounts within an AWS Organization, follow these steps:
|
||||
|
||||
- Step 1: Retrieve a List of Active Accounts
|
||||
|
||||
First, declare a variable containing all active accounts in your AWS Organization. Run the following command in your AWS Organizations Management account, ensuring that you have the necessary permissions:
|
||||
|
||||
```
|
||||
ACCOUNTS_IN_ORG=$(aws organizations list-accounts --query Accounts[?Status==`ACTIVE`].Id --output text)
|
||||
```
|
||||
|
||||
- Then run Prowler to assume a role (change `<role_name>` that must be the same in all accounts and `<management_organizations_account_id>` that must be your AWS Organizations management account ID):
|
||||
- Step 2: Run Prowler with Assumed Roles
|
||||
|
||||
Use Prowler to assume roles across accounts in parallel. Modify <role_name> to match the role that exists in all accounts and <management_organizations_account_id> to your AWS Organizations Management account ID.
|
||||
|
||||
```
|
||||
ROLE_TO_ASSUME=<role_name>
|
||||
MGMT_ACCOUNT_ID=<management_organizations_account_id>
|
||||
PARALLEL_ACCOUNTS="3"
|
||||
for accountId in $ACCOUNTS_IN_ORG; do
|
||||
test "$(jobs | wc -l)" -ge $PARALLEL_ACCOUNTS && wait || true
|
||||
{
|
||||
prowler aws --role arn:aws:iam::$accountId:role/$ROLE_TO_ASSUME \
|
||||
--organizations-role arn:aws:iam::$MGMT_ACCOUNT_ID:role/$ROLE_TO_ASSUME
|
||||
} &
|
||||
test "$(jobs | wc -l)" -ge $PARALLEL_ACCOUNTS && wait || true
|
||||
{
|
||||
prowler aws --role arn:aws:iam::$accountId:role/$ROLE_TO_ASSUME \
|
||||
--organizations-role arn:aws:iam::$MGMT_ACCOUNT_ID:role/$ROLE_TO_ASSUME
|
||||
} &
|
||||
done
|
||||
```
|
||||
|
||||
@@ -1,28 +1,39 @@
|
||||
# AWS Organizations
|
||||
# AWS Organizations in Prowler
|
||||
|
||||
## Get AWS Account details from your AWS Organization
|
||||
## Retrieving AWS Account Details
|
||||
|
||||
Prowler allows you to get additional information of the scanned account from AWS Organizations.
|
||||
If AWS Organizations is enabled, Prowler can fetch detailed account information during scans, including:
|
||||
|
||||
If you have AWS Organizations enabled, Prowler can get your account details like account name, email, ARN, organization id and tags and you will have them next to every finding's output.
|
||||
- Account Name
|
||||
- Email Address
|
||||
- ARN
|
||||
- Organization ID
|
||||
- Tags
|
||||
|
||||
In order to do that you can use the argument `-O`/`--organizations-role <organizations_role_arn>`. If this argument is not present Prowler will try to fetch that information automatically if the AWS account is a delegated administrator for the AWS Organization.
|
||||
These details will be included alongside each security finding in the output.
|
||||
|
||||
### Enabling AWS Organizations Data Retrieval
|
||||
|
||||
To retrieve AWS Organizations account details, use the `-O`/`--organizations-role <organizations_role_arn>` argument. If this argument is not provided, Prowler will attempt to fetch the data automatically—provided the AWS account is a delegated administrator for the AWS Organization.
|
||||
|
||||
???+ note
|
||||
Refer [here](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_delegate_policies.html) for more information about AWS Organizations delegated administrator.
|
||||
For more information on AWS Organizations delegated administrator, refer to the official documentation [here](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_delegate_policies.html).
|
||||
|
||||
See the following sample command:
|
||||
The following command is an example:
|
||||
|
||||
```shell
|
||||
prowler aws \
|
||||
-O arn:aws:iam::<management_organizations_account_id>:role/<role_name>
|
||||
```
|
||||
|
||||
???+ note
|
||||
Make sure the role in your AWS Organizations management account has the permissions `organizations:DescribeAccount` and `organizations:ListTagsForResource`.
|
||||
Ensure the IAM role used in your AWS Organizations management account has the following permissions:`organizations:DescribeAccount` and `organizations:ListTagsForResource`.
|
||||
|
||||
Prowler will scan the AWS account and get the account details from AWS Organizations.
|
||||
|
||||
In the JSON output below you can see tags coded in base64 to prevent breaking CSV or JSON due to its format:
|
||||
### Handling JSON Output
|
||||
|
||||
In Prowler’s JSON output, tags are encoded in Base64 to prevent formatting errors in CSV or JSON outputs. This ensures compatibility when exporting findings.
|
||||
|
||||
```json
|
||||
"Account Email": "my-prod-account@domain.com",
|
||||
@@ -34,17 +45,17 @@ In the JSON output below you can see tags coded in base64 to prevent breaking CS
|
||||
|
||||
The additional fields in CSV header output are as follows:
|
||||
|
||||
- ACCOUNT_DETAILS_EMAIL
|
||||
- ACCOUNT_DETAILS_NAME
|
||||
- ACCOUNT_DETAILS_ARN
|
||||
- ACCOUNT_DETAILS_ORG
|
||||
- ACCOUNT_DETAILS_TAGS
|
||||
- ACCOUNT\_DETAILS\_EMAIL
|
||||
- ACCOUNT\_DETAILS\_NAME
|
||||
- ACCOUNT\_DETAILS\_ARN
|
||||
- ACCOUNT\_DETAILS\_ORG
|
||||
- ACCOUNT\_DETAILS\_TAGS
|
||||
|
||||
## Extra: Run Prowler across all accounts in AWS Organizations by assuming roles
|
||||
|
||||
If you want to run Prowler across all accounts of AWS Organizations you can do this:
|
||||
### Running Prowler Across All AWS Organization Accounts
|
||||
|
||||
1. First get a list of accounts that are not suspended:
|
||||
1. To run Prowler across all accounts in AWS Organizations, first retrieve a list of accounts that are not suspended:
|
||||
|
||||
```shell
|
||||
ACCOUNTS_IN_ORGS=$(aws organizations list-accounts \
|
||||
@@ -65,5 +76,4 @@ If you want to run Prowler across all accounts of AWS Organizations you can do t
|
||||
```
|
||||
|
||||
???+ note
|
||||
Using the same for loop it can be scanned a list of accounts with a variable like:
|
||||
</br>`ACCOUNTS_LIST='11111111111 2222222222 333333333'`
|
||||
This same loop structure can be adapted to scan a predefined list of accounts using a variable like the following: </br>`ACCOUNTS_LIST='11111111111 2222222222 333333333'`
|
||||
|
||||
@@ -7,62 +7,81 @@ By default Prowler is able to scan the following AWS partitions:
|
||||
- GovCloud (US): `aws-us-gov`
|
||||
|
||||
???+ note
|
||||
To check the available regions for each partition and service please refer to the following document [aws_regions_by_service.json](https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/aws/aws_regions_by_service.json)
|
||||
To check the available regions for each partition and service, refer to: [aws\_regions\_by\_service.json](https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/aws/aws_regions_by_service.json)
|
||||
|
||||
It is important to take into consideration that to scan the China (`aws-cn`) or GovCloud (`aws-us-gov`) partitions it is either required to have a valid region for that partition in your AWS credentials or to specify the regions you want to audit for that partition using the `-f/--region` flag.
|
||||
## Scanning AWS China and GovCloud Partitions in Prowler
|
||||
|
||||
When scanning the China (`aws-cn`) or GovCloud (`aws-us-gov`), ensure one of the following:
|
||||
|
||||
- Your AWS credentials include a valid region within the desired partition.
|
||||
|
||||
- Specify the regions to audit within that partition using the `-f/--region` flag.
|
||||
|
||||
???+ note
|
||||
Please, refer to https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#configuring-credentials for more information about the AWS credentials configuration.
|
||||
Refer to: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#configuring-credentials for more information about the AWS credential configuration.
|
||||
|
||||
### Scanning Specific Regions
|
||||
|
||||
To scan a particular AWS region with Prowler, use:
|
||||
|
||||
Prowler can scan specific region(s) with:
|
||||
```console
|
||||
prowler aws -f/--region eu-west-1 us-east-1
|
||||
```
|
||||
|
||||
You can get more information about the available partitions and regions in the following [Botocore](https://github.com/boto/botocore) [file](https://github.com/boto/botocore/blob/22a19ea7c4c2c4dd7df4ab8c32733cba0c7597a4/botocore/data/partitions.json).
|
||||
### AWS Credentials Configuration
|
||||
|
||||
For details on configuring AWS credentials, refer to the following [Botocore](https://github.com/boto/botocore) [file](https://github.com/boto/botocore/blob/22a19ea7c4c2c4dd7df4ab8c32733cba0c7597a4/botocore/data/partitions.json).
|
||||
|
||||
## AWS China
|
||||
## Scanning AWS Partitions in Prowler
|
||||
|
||||
To scan your AWS account in the China partition (`aws-cn`):
|
||||
### AWS China
|
||||
|
||||
To scan an account in the AWS China partition (`aws-cn`):
|
||||
|
||||
- By using the `-f/--region` flag:
|
||||
|
||||
```
|
||||
prowler aws --region cn-north-1 cn-northwest-1
|
||||
```
|
||||
|
||||
- By using the region configured in your AWS profile at `~/.aws/credentials` or `~/.aws/config`:
|
||||
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id = XXXXXXXXXXXXXXXXXXX
|
||||
aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
|
||||
region = cn-north-1
|
||||
```
|
||||
|
||||
- Using the `-f/--region` flag:
|
||||
```
|
||||
prowler aws --region cn-north-1 cn-northwest-1
|
||||
```
|
||||
- Using the region configured in your AWS profile at `~/.aws/credentials` or `~/.aws/config`:
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id = XXXXXXXXXXXXXXXXXXX
|
||||
aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
|
||||
region = cn-north-1
|
||||
```
|
||||
???+ note
|
||||
With this option all the partition regions will be scanned without the need of use the `-f/--region` flag
|
||||
With this configuration, all partition regions will be scanned without needing the `-f/--region` flag
|
||||
|
||||
### AWS GovCloud (US)
|
||||
|
||||
## AWS GovCloud (US)
|
||||
To scan an account in the AWS GovCloud (US) partition (`aws-us-gov`):
|
||||
|
||||
To scan your AWS account in the GovCloud (US) partition (`aws-us-gov`):
|
||||
- By using the `-f/--region` flag:
|
||||
|
||||
```
|
||||
prowler aws --region us-gov-east-1 us-gov-west-1
|
||||
```
|
||||
|
||||
- By using the region configured in your AWS profile at `~/.aws/credentials` or `~/.aws/config`:
|
||||
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id = XXXXXXXXXXXXXXXXXXX
|
||||
aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
|
||||
region = us-gov-east-1
|
||||
```
|
||||
|
||||
- Using the `-f/--region` flag:
|
||||
```
|
||||
prowler aws --region us-gov-east-1 us-gov-west-1
|
||||
```
|
||||
- Using the region configured in your AWS profile at `~/.aws/credentials` or `~/.aws/config`:
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id = XXXXXXXXXXXXXXXXXXX
|
||||
aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
|
||||
region = us-gov-east-1
|
||||
```
|
||||
???+ note
|
||||
With this option all the partition regions will be scanned without the need of use the `-f/--region` flag
|
||||
With this configuration, all partition regions will be scanned without needing the `-f/--region` flag
|
||||
|
||||
### AWS ISO (US \& Europe)
|
||||
|
||||
## AWS ISO (US & Europe)
|
||||
The AWS ISO partitions—commonly referred to as "secret partitions"—are air-gapped from the Internet, and Prowler does not have a built-in way to scan them. To audit an AWS ISO partition, manually update [aws\_regions\_by\_service.json](https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/aws/aws_regions_by_service.json) to include the partition, region, and services. For example:
|
||||
|
||||
For the AWS ISO partitions, which are known as "secret partitions" and are air-gapped from the Internet, there is no builtin way to scan it. If you want to audit an AWS account in one of the AWS ISO partitions you should manually update the [aws_regions_by_service.json](https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/aws/aws_regions_by_service.json) and include the partition, region and services, e.g.:
|
||||
```json
|
||||
"iam": {
|
||||
"regions": {
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
# Resource ARNs based Scan
|
||||
# Resource ARN-based Scanning
|
||||
|
||||
Prowler allows you to scan only the resources with specific AWS Resource ARNs. This can be done with the flag `--resource-arn` followed by one or more [Amazon Resource Names (ARNs)](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) separated by space:
|
||||
Prowler enables scanning of resources based on specific AWS Resource ARNs.
|
||||
|
||||
## Resource ARN-Based Scanning
|
||||
|
||||
Prowler enables scanning of resources based on specific AWS Resource [Amazon Resource Names (ARNs)](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). To perform this scan, use the designated flag `--resource-arn` followed by one or more ARNs, separated by spaces.
|
||||
|
||||
```
|
||||
prowler aws --resource-arn arn:aws:iam::012345678910:user/test arn:aws:ec2:us-east-1:123456789012:vpc/vpc-12345678
|
||||
```
|
||||
|
||||
This example will only scan the two resources with those ARNs.
|
||||
Example: This configuration scans only the specified two resources using their ARNs.
|
||||
|
||||
@@ -1,50 +1,74 @@
|
||||
# AWS Assume Role
|
||||
# AWS Assume Role in Prowler
|
||||
|
||||
Prowler uses the AWS SDK (Boto3) underneath so it uses the same authentication methods.
|
||||
## Authentication Overview
|
||||
|
||||
However, there are few ways to run Prowler against multiple accounts using IAM Assume Role feature depending on each use case:
|
||||
Prowler leverages the AWS SDK (Boto3) for authentication, following standard AWS authentication methods.
|
||||
|
||||
1. You can just set up your custom profile inside `~/.aws/config` with all needed information about the role to assume then call it with `prowler aws -p/--profile your-custom-profile`.
|
||||
- An example profile that performs role-chaining is given below. The `credential_source` can either be set to `Environment`, `Ec2InstanceMetadata`, or `EcsContainer`.
|
||||
- Alternatively, you could use the `source_profile` instead of `credential_source` to specify a separate named profile that contains IAM user credentials with permission to assume the target the role. More information can be found [here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html).
|
||||
```
|
||||
[profile crossaccountrole]
|
||||
role_arn = arn:aws:iam::234567890123:role/SomeRole
|
||||
credential_source = EcsContainer
|
||||
```
|
||||
### Running Prowler Against Multiple Accounts
|
||||
|
||||
2. You can use `-R`/`--role <role_arn>` and Prowler will get those temporary credentials using `Boto3` and run against that given account.
|
||||
```sh
|
||||
prowler aws -R arn:aws:iam::<account_id>:role/<role_name>
|
||||
```
|
||||
- Optionally, the session duration (in seconds, by default 3600) and the external ID of this role assumption can be defined:
|
||||
To execute Prowler across multiple AWS accounts using IAM Assume Role, choose one of the following approaches:
|
||||
|
||||
```sh
|
||||
prowler aws -T/--session-duration <seconds> -I/--external-id <external_id> -R arn:aws:iam::<account_id>:role/<role_name>
|
||||
```
|
||||
1. Custom Profile Configuration
|
||||
|
||||
## Custom Role Session Name
|
||||
Set up a custom profile inside `~/.aws/config` with the necessary role information.
|
||||
|
||||
Then call the profile using `prowler aws -p/--profile your-custom-profile`.
|
||||
|
||||
- Role-Chaining Example Profile The `credential_source` parameter can be set to `Environment`, `Ec2InstanceMetadata`, or `EcsContainer`.
|
||||
|
||||
- Using an Alternative Named Profile
|
||||
|
||||
Instead of the `credential_source` parameter, `source_profile` can be used to specify a separate named profile.
|
||||
|
||||
This profile must contain IAM user credentials with permissions to assume the target role. For additional details, refer to the AWS Assume Role documentation: [here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html).
|
||||
|
||||
```
|
||||
[profile crossaccountrole]
|
||||
role_arn = arn:aws:iam::234567890123:role/SomeRole
|
||||
credential_source = EcsContainer
|
||||
```
|
||||
|
||||
2. Using IAM Role Assumption in Prowler
|
||||
|
||||
To allow Prowler to retrieve temporary credentials by using `Boto3` and run assessments on the specified account, use the `-R`/`--role <role_arn>` flag.
|
||||
|
||||
```sh
|
||||
prowler aws -R arn:aws:iam::<account_id>:role/<role_name>
|
||||
```
|
||||
|
||||
**Defining Session Duration and External ID**
|
||||
|
||||
Optionally, specify the session duration (in seconds, default: 3600) and the external ID for role assumption:
|
||||
|
||||
```sh
|
||||
prowler aws -T/--session-duration <seconds> -I/--external-id <external_id> -R arn:aws:iam::<account_id>:role/<role_name>
|
||||
```
|
||||
|
||||
## Custom Role Session Name in Prowler
|
||||
|
||||
### Setting a Custom Session Name
|
||||
|
||||
Prowler allows you to specify a custom Role Session name using the following flag:
|
||||
|
||||
Prowler can use your custom Role Session name with:
|
||||
```console
|
||||
prowler aws --role-session-name <role_session_name>
|
||||
```
|
||||
|
||||
???+ note
|
||||
It defaults to `ProwlerAssessmentSession`.
|
||||
If not specified, it defaults to `ProwlerAssessmentSession`.
|
||||
|
||||
## Role MFA
|
||||
## Role MFA Authentication
|
||||
|
||||
If your IAM Role has MFA configured you can use `--mfa` along with `-R`/`--role <role_arn>` and Prowler will ask you to input the following values to get a new temporary session for the IAM Role provided:
|
||||
If your IAM Role is configured with Multi-Factor Authentication (MFA), use `--mfa` along with `-R`/`--role <role_arn>`. Prowler will prompt you to input the following values to obtain a temporary session for the IAM Role provided:
|
||||
|
||||
- ARN of your MFA device
|
||||
- TOTP (Time-Based One-Time Password)
|
||||
|
||||
## Create Role
|
||||
## Creating a Role for One or Multiple Accounts
|
||||
|
||||
To create a role to be assumed in one or multiple accounts you can use either as CloudFormation Stack or StackSet the following [template](https://github.com/prowler-cloud/prowler/blob/master/permissions/create_role_to_assume_cfn.yaml) and adapt it.
|
||||
To create an IAM role that can be assumed in one or multiple AWS accounts, use either a CloudFormation Stack or StackSet and adapt the provided [template](https://github.com/prowler-cloud/prowler/blob/master/permissions/create_role_to_assume_cfn.yaml).
|
||||
|
||||
???+ note "About Session Duration"
|
||||
Depending on the amount of checks you run and the size of your infrastructure, Prowler may require more than 1 hour to finish. Use option `-T <seconds>` to allow up to 12h (43200 seconds). To allow more than 1h you need to modify _"Maximum CLI/API session duration"_ for that particular role, read more [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session).
|
||||
???+ note
|
||||
**Session Duration Considerations**: Depending on the number of checks performed and the size of your infrastructure, Prowler may require more than 1 hour to complete. Use the `-T <seconds>` option to allow up to 12 hours (43,200 seconds). If you need more than 1 hour, modify the _“Maximum CLI/API session duration”_ setting for the role. Learn more [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session).
|
||||
|
||||
Bear in mind that if you are using roles assumed by role chaining there is a hard limit of 1 hour so consider not using role chaining if possible, read more about that, in foot note 1 below the table [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html).
|
||||
⚠️ Important: If assuming roles via role chaining, there is a hard limit of 1 hour. Whenever possible, avoid role chaining to prevent session expiration issues. More details are available in footnote 1 below the table in the [AWS IAM guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html).
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
# Send report to AWS S3 Bucket
|
||||
# Sending Reports to an AWS S3 Bucket
|
||||
|
||||
To save your report in an S3 bucket, use `-B`/`--output-bucket`.
|
||||
To save reports directly in an S3 bucket, use: `-B`/`--output-bucket`.
|
||||
|
||||
```sh
|
||||
prowler aws -B my-bucket
|
||||
```
|
||||
|
||||
If you can use a custom folder and/or filename, use `-o`/`--output-directory` and/or `-F`/`--output-filename`.
|
||||
### Custom Folder and Filename
|
||||
|
||||
For a custom folder and/or filename, specify: `-o`/`--output-directory` and/or `-F`/`--output-filename`.
|
||||
|
||||
```sh
|
||||
prowler aws \
|
||||
@@ -15,14 +17,16 @@ prowler aws \
|
||||
--output-filename output-filename
|
||||
```
|
||||
|
||||
By default Prowler sends HTML, JSON and CSV output formats, if you want to send a custom output format or a single one of the defaults you can specify it with the `-M`/`--output-modes` flag.
|
||||
### Custom Output Formats
|
||||
|
||||
By default, Prowler sends HTML, JSON, and CSV output formats. To specify a single output format, use the `-M`/`--output-modes` flag.
|
||||
|
||||
```sh
|
||||
prowler aws -M csv -B my-bucket
|
||||
```
|
||||
|
||||
|
||||
???+ note
|
||||
In the case you do not want to use the assumed role credentials but the initial credentials to put the reports into the S3 bucket, use `-D`/`--output-bucket-no-assume` instead of `-B`/`--output-bucket`.
|
||||
If you prefer using the initial credentials instead of the assumed role credentials for uploading reports, use `-D`/`--output-bucket-no-assume` instead of `-B`/`--output-bucket`.
|
||||
|
||||
???+ warning
|
||||
Make sure that the used credentials have `s3:PutObject` permissions in the S3 path where the reports are going to be uploaded.
|
||||
Ensure the credentials used have write permissions for the `s3:PutObject` where reports will be uploaded.
|
||||
|
||||
@@ -1,84 +1,92 @@
|
||||
# AWS Security Hub Integration
|
||||
# AWS Security Hub Integration with Prowler
|
||||
|
||||
Prowler supports natively and as **official integration** sending findings to [AWS Security Hub](https://aws.amazon.com/security-hub). This integration allows **Prowler** to import its findings to AWS Security Hub.
|
||||
Prowler natively supports **official integration** with [AWS Security Hub](https://aws.amazon.com/security-hub), allowing security findings to be sent directly. This integration enables **Prowler** to import its findings into AWS Security Hub.
|
||||
|
||||
To activate the integration, follow these steps in at least one AWS region within your AWS account:
|
||||
|
||||
Before sending findings, you will need to enable AWS Security Hub and the **Prowler** integration.
|
||||
## Enabling AWS Security Hub for Prowler Integration
|
||||
|
||||
## Enable AWS Security Hub
|
||||
To enable the integration, follow these steps in **at least** one AWS region within your AWS account.
|
||||
|
||||
To enable the integration you have to perform the following steps, in _at least_ one AWS region of a given AWS account, to enable **AWS Security Hub** and **Prowler** as a partner integration.
|
||||
Since **AWS Security Hub** is a region-based service, it must be activated in each region where security findings need to be collected.
|
||||
|
||||
Since **AWS Security Hub** is a region based service, you will need to enable it in the region or regions you require. You can configure it using the AWS Management Console or the AWS CLI.
|
||||
**Configuration Options**
|
||||
|
||||
AWS Security Hub can be enabled using either of the following methods:
|
||||
|
||||
???+ note
|
||||
Take into account that enabling this integration will incur in costs in AWS Security Hub, please refer to its pricing [here](https://aws.amazon.com/security-hub/pricing/) for more information.
|
||||
Enabling this integration incurs costs in AWS Security Hub. Refer to [this information](https://aws.amazon.com/security-hub/pricing/) for details.
|
||||
|
||||
### Using the AWS Management Console
|
||||
|
||||
#### Enable AWS Security Hub
|
||||
#### Enabling AWS Security Hub for Prowler Integration
|
||||
|
||||
If you have currently AWS Security Hub enabled you can skip to the [next section](#enable-prowler-integration).
|
||||
If AWS Security Hub is already enabled, you can proceed to the [next section](#enable-prowler-integration).
|
||||
|
||||
1. Open the **AWS Security Hub** console at https://console.aws.amazon.com/securityhub/.
|
||||
1. Enable AWS Security Hub via Console: Open the **AWS Security Hub** console: https://console.aws.amazon.com/securityhub/.
|
||||
|
||||
2. When you open the Security Hub console for the first time make sure that you are in the region you want to enable, then choose **Go to Security Hub**.
|
||||

|
||||
2. Ensure you are in the correct AWS region, then select “**Go to Security Hub**”. 
|
||||
|
||||
3. On the next page, the Security standards section lists the security standards that Security Hub supports. Select the check box for a standard to enable it, and clear the check box to disable it.
|
||||
3. In the “Security Standards” section, review the supported security standards. Select the checkbox for each standard you want to enable, or clear it to disable a standard.
|
||||
|
||||
4. Choose **Enable Security Hub**.
|
||||

|
||||
4. Choose “**Enable Security Hub**”. 
|
||||
|
||||
#### Enable Prowler Integration
|
||||
#### Enabling Prowler Integration in AWS Security Hub
|
||||
|
||||
If you have currently the Prowler integration enabled in AWS Security Hub you can skip to the [next section](#send-findings) and start sending findings.
|
||||
If the Prowler integration is already enabled in AWS Security Hub, you can proceed to the [next section](#send-findings) and begin sending findings.
|
||||
|
||||
Once **AWS Security Hub** is enabled you will need to enable **Prowler** as partner integration to allow **Prowler** to send findings to your **AWS Security Hub**.
|
||||
Once **AWS Security Hub** is activated, **Prowler** must be enabled as partner integration to allow security findings to be sent to it.
|
||||
|
||||
1. Open the **AWS Security Hub** console at https://console.aws.amazon.com/securityhub/.
|
||||
1. Enabling AWS Security Hub via Console
|
||||
Open the **AWS Security Hub** console: https://console.aws.amazon.com/securityhub/.
|
||||
|
||||
2. Select the **Integrations** tab in the right-side menu bar.
|
||||

|
||||
2. Select the “**Integrations**” tab from the right-side menu bar. 
|
||||
|
||||
3. Search for _Prowler_ in the text search box and the **Prowler** integration will appear.
|
||||
3. Search for “_Prowler_” in the text search box and the **Prowler** integration will appear.
|
||||
|
||||
4. Once there, click on **Accept Findings** to allow **AWS Security Hub** to receive findings from **Prowler**.
|
||||

|
||||
4. Click “**Accept Findings**” to authorize **AWS Security Hub** to receive findings from **Prowler**. 
|
||||
|
||||
5. A new modal will appear to confirm that you are enabling the **Prowler** integration.
|
||||

|
||||
5. A new modal will appear to confirm that the integration with **Prowler** is being enabled. 
|
||||
|
||||
6. Right after click on **Accept Findings**, you will see that the integration is enabled in **AWS Security Hub**.
|
||||

|
||||
6. Click “**Accept Findings**”, to authorize **AWS Security Hub** to receive findings from Prowler. 
|
||||
|
||||
### Using the AWS CLI
|
||||
### Using AWS CLI
|
||||
|
||||
To enable **AWS Security Hub** and the **Prowler** integration you have to run the following commands using the AWS CLI:
|
||||
To enable **AWS Security Hub** and integrate **Prowler**, execute the following AWS CLI commands:
|
||||
|
||||
**Step 1: Enable AWS Security Hub**
|
||||
|
||||
Run the following command to activate AWS Security Hub in the desired region:
|
||||
|
||||
```shell
|
||||
aws securityhub enable-security-hub --region <region>
|
||||
```
|
||||
???+ note
|
||||
For this command to work you will need the `securityhub:EnableSecurityHub` permission. You will need to set the AWS region where you want to enable AWS Security Hub.
|
||||
|
||||
Once **AWS Security Hub** is enabled you will need to enable **Prowler** as partner integration to allow **Prowler** to send findings to your AWS Security Hub. You have to run the following commands using the AWS CLI:
|
||||
???+ note
|
||||
This command requires the `securityhub:EnableSecurityHub` permission. Ensure you set the correct AWS region where you want to enable AWS Security Hub.
|
||||
|
||||
**Step 2: Enable Prowler Integration**
|
||||
|
||||
Once **AWS Security Hub** is activated, **Prowler** must be enabled as partner integration to allow security findings to be sent to it. Run the following AWS CLI commands:
|
||||
|
||||
```shell
|
||||
aws securityhub enable-import-findings-for-product --region eu-west-1 --product-arn arn:aws:securityhub:<region>::product/prowler/prowler
|
||||
```
|
||||
|
||||
???+ note
|
||||
You will need to set the AWS region where you want to enable the integration and also the AWS region also within the ARN. For this command to work you will need the `securityhub:securityhub:EnableImportFindingsForProduct` permission.
|
||||
Specify the AWS region where you want to enable the integration. Ensure the region is correctly set within the ARN value. This command requires the`securityhub:securityhub:EnableImportFindingsForProduct` permission.
|
||||
|
||||
## Sending Findings to AWS Security Hub
|
||||
|
||||
## Send Findings
|
||||
Once it is enabled, it is as simple as running the command below (for all regions):
|
||||
Once AWS Security Hub is enabled, findings can be sent using the following commands:
|
||||
|
||||
For all regions:
|
||||
|
||||
```sh
|
||||
prowler aws --security-hub
|
||||
```
|
||||
|
||||
or for only one filtered region like eu-west-1:
|
||||
For a specific region (e.g., eu-west-1):
|
||||
|
||||
```sh
|
||||
prowler --security-hub --region eu-west-1
|
||||
@@ -91,52 +99,60 @@ prowler --security-hub --region eu-west-1
|
||||
|
||||
To have updated findings in Security Hub you have to run Prowler periodically. Once a day or every certain amount of hours.
|
||||
|
||||
### See you Prowler findings in AWS Security Hub
|
||||
### Viewing Prowler Findings in AWS Security Hub
|
||||
|
||||
Once configured the **AWS Security Hub** in your next scan you will receive the **Prowler** findings in the AWS regions configured. To review those findings in **AWS Security Hub**:
|
||||
After enabling **AWS Security Hub**, findings from **Prowler** will be available in the configured AWS regions. Reviewing Prowler Findings in **AWS Security Hub**:
|
||||
|
||||
1. Open the **AWS Security Hub** console at https://console.aws.amazon.com/securityhub/.
|
||||
1. Enabling AWS Security Hub via Console
|
||||
|
||||
2. Select the **Findings** tab in the right-side menu bar.
|
||||

|
||||
Open the **AWS Security Hub** console: https://console.aws.amazon.com/securityhub/.
|
||||
|
||||
3. Use the search box filters and use the **Product Name** filter with the value _Prowler_ to see the findings sent from **Prowler**.
|
||||
2. Select the “**Findings**” tab from the right-side menu bar. 
|
||||
|
||||
4. Then, you can click on the check **Title** to see the details and the history of a finding.
|
||||

|
||||
3. Use the search box filters and apply the “**Product Name**” filter with the value _Prowler_ to display findings sent by **Prowler**.
|
||||
|
||||
As you can see in the related requirements section, in the detailed view of the findings, **Prowler** also sends compliance information related to every finding.
|
||||
4. Click the check “**Title**” to access its detailed view, including its history and status. 
|
||||
|
||||
## Send findings to Security Hub assuming an IAM Role
|
||||
#### Compliance Information
|
||||
|
||||
When you are auditing a multi-account AWS environment, you can send findings to a Security Hub of another account by assuming an IAM role from that account using the `-R` flag in the Prowler command:
|
||||
As outlined in the Requirements section, the detailed view includes compliance details for each finding reported by **Prowler**.
|
||||
|
||||
## Sending Findings to Security Hub with IAM Role Assumption
|
||||
|
||||
### Multi-Account AWS Auditing
|
||||
|
||||
When auditing a multi-account AWS environment, Prowler allows you to send findings to a Security Hub in another account by assuming an IAM role from that target account.
|
||||
|
||||
#### Using an IAM Role to Send Findings
|
||||
|
||||
To send findings to Security Hub, use the `-R` flag in the Prowler command:
|
||||
|
||||
```sh
|
||||
prowler --security-hub --role arn:aws:iam::123456789012:role/ProwlerExecutionRole
|
||||
```
|
||||
|
||||
???+ note
|
||||
Remember that the used role needs to have permissions to send findings to Security Hub. To get more information about the permissions required, please refer to the following IAM policy [prowler-security-hub.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-security-hub.json)
|
||||
The specified IAM role must have the necessary permissions to send findings to Security Hub. For details on the required permissions, refer to the IAM policy: [prowler-security-hub.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-security-hub.json)
|
||||
|
||||
## Sending Only Failed Findings to AWS Security Hub
|
||||
|
||||
## Send only failed findings to Security Hub
|
||||
|
||||
When using the **AWS Security Hub** integration you can send only the `FAIL` findings generated by **Prowler**. Therefore, the **AWS Security Hub** usage costs eventually would be lower. To follow that recommendation you could add the `--status FAIL` flag to the Prowler command:
|
||||
When using **AWS Security Hub** integration, **Prowler** allows sending only failed findings (`FAIL`), helping reduce **AWS Security Hub** usage costs. To enable this, add the `--status FAIL` flag to the Prowler command:
|
||||
|
||||
```sh
|
||||
prowler --security-hub --status FAIL
|
||||
```
|
||||
|
||||
You can use, instead of the `--status FAIL` argument, the `--send-sh-only-fails` argument to save all the findings in the Prowler outputs but just to send FAIL findings to AWS Security Hub:
|
||||
**Configuring Findings Output**
|
||||
|
||||
Instead of using `--status FAIL`, the `--send-sh-only-fails` argument to store all findings in Prowler outputs while sending only FAIL findings to AWS Security:
|
||||
|
||||
```sh
|
||||
prowler --security-hub --send-sh-only-fails
|
||||
```
|
||||
|
||||
## Skip sending updates of findings to Security Hub
|
||||
## Skipping Updates for Findings in Security Hub
|
||||
|
||||
By default, Prowler archives all its findings in Security Hub that have not appeared in the last scan.
|
||||
You can skip this logic by using the option `--skip-sh-update` so Prowler will not archive older findings:
|
||||
By default, Prowler archives any findings in Security Hub that were not detected in the latest scan. To prevent older findings from being archived, use the `--skip-sh-update` option:
|
||||
|
||||
```sh
|
||||
prowler --security-hub --skip-sh-update
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# Tags-based Scan
|
||||
# Tag-based scan
|
||||
|
||||
Prowler allows you to scan only the resources that contain specific tags. This can be done with the flag `--resource-tags` followed by the tags `Key=Value` separated by space:
|
||||
Prowler provides the capability to scan only resources containing specific tags. To execute this, use the designated flag `--resource-tags` followed by the tags `Key=Value`, separated by spaces.
|
||||
|
||||
```
|
||||
prowler aws --resource-tags Environment=dev Project=prowler
|
||||
```
|
||||
|
||||
This example will only scan the resources that contains both tags.
|
||||
This configuration scans only resources that contain both specified tags.
|
||||
|
||||
@@ -1,28 +1,32 @@
|
||||
# Threat Detection
|
||||
# Threat Detection in AWS with Prowler
|
||||
|
||||
Prowler enables threat detection in AWS by analyzing CloudTrail log records. To execute threat detection checks, use the following command:
|
||||
|
||||
Prowler allows you to do threat detection in AWS based on the CloudTrail log records. To run checks related with threat detection use:
|
||||
```
|
||||
prowler aws --category threat-detection
|
||||
```
|
||||
This command will run these checks:
|
||||
|
||||
* `cloudtrail_threat_detection_privilege_escalation` -> Detects privilege escalation attacks.
|
||||
* `cloudtrail_threat_detection_enumeration` -> Detects enumeration attacks.
|
||||
* `cloudtrail_threat_detection_llm_jacking` -> Detects LLM Jacking attacks.
|
||||
This command runs checks to detect:
|
||||
|
||||
* `cloudtrail_threat_detection_privilege_escalation`: Privilege escalation attacks
|
||||
* `cloudtrail_threat_detection_enumeration`: Enumeration attacks
|
||||
* `cloudtrail_threat_detection_llm_jacking`: LLM Jacking attacks
|
||||
|
||||
???+ note
|
||||
Threat Detection checks will be only executed using `--category threat-detection` flag due to performance.
|
||||
Threat detection checks are executed only when the `--category threat-detection` flag is used, due to performance considerations.
|
||||
|
||||
## Config File
|
||||
## Config File for Threat Detection
|
||||
|
||||
If you want to manage the behavior of the Threat Detection checks you can edit `config.yaml` file from `/prowler/config`. In this file you can edit the following attributes related with Threat Detection:
|
||||
To manage the behavior of threat detection checks, edit the configuration file located in `config.yaml` file from `/prowler/config`. The following attributes can be modified, all related to threat detection:
|
||||
|
||||
* `threat_detection_privilege_escalation_threshold`: determines the percentage of actions found to decide if it is an privilege_scalation attack event, by default is 0.2 (20%)
|
||||
* `threat_detection_privilege_escalation_minutes`: it is the past minutes to search from now for privilege_escalation attacks, by default is 1440 minutes (24 hours)
|
||||
* `threat_detection_privilege_escalation_actions`: these are the default actions related with privilege escalation.
|
||||
* `threat_detection_enumeration_threshold`: determines the percentage of actions found to decide if it is an enumeration attack event, by default is 0.3 (30%)
|
||||
* `threat_detection_enumeration_minutes`: it is the past minutes to search from now for enumeration attacks, by default is 1440 minutes (24 hours)
|
||||
* `threat_detection_enumeration_actions`: these are the default actions related with enumeration attacks.
|
||||
* `threat_detection_llm_jacking_threshold`: determines the percentage of actions found to decide if it is an LLM Jacking attack event, by default is 0.4 (40%)
|
||||
* `threat_detection_llm_jacking_minutes`: it is the past minutes to search from now for LLM Jacking attacks, by default is 1440 minutes (24 hours)
|
||||
* `threat_detection_llm_jacking_actions`: these are the default actions related with LLM Jacking attacks.
|
||||
* `threat_detection_privilege_escalation_threshold`: Defines the percentage of actions required to classify an event as a privilege escalation attack. Default: 0.2 (20%)
|
||||
* `threat_detection_privilege_escalation_minutes`: Specifies the time window (in minutes) to search for privilege escalation attack patterns. Default: 1440 minutes (24 hours).
|
||||
* `threat_detection_privilege_escalation_actions`: Lists the default actions associated with privilege escalation attacks.
|
||||
* `threat_detection_enumeration_threshold`: Defines the percentage of actions required to classify an event as an enumeration attack. Default: 0.3 (30%)
|
||||
* `threat_detection_enumeration_minutes`: Specifies the time window (in minutes) to search for enumeration attack patterns. Default: 1440 minutes (24 hours).
|
||||
* `threat_detection_enumeration_actions`: Lists the default actions associated with enumeration attacks.
|
||||
* `threat_detection_llm_jacking_threshold`: Defines the percentage of actions required to classify an event as LLM jacking attack. Default: 0.4 (40%)
|
||||
* `threat_detection_llm_jacking_minutes`: Specifies the time window (in minutes) to search for LLM jacking attack patterns. Default: 1440 minutes (24 hours).
|
||||
* `threat_detection_llm_jacking_actions`: Lists the default actions associated with LLM jacking attacks.
|
||||
|
||||
Modify these attributes in the configuration file to fine-tune threat detection checks based on your security requirements.
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Check mapping between Prowler v4/v3 and v2
|
||||
# Check Mapping Prowler v4/v3 to v2
|
||||
|
||||
Prowler v3 and v4 comes with different identifiers but we maintained the same checks that were implemented in v2. The reason for this change is because in previous versions of Prowler, check names were mostly based on CIS Benchmark for AWS. In v4 and v3 all checks are independent from any security framework and they have its own name and ID.
|
||||
Prowler v3 and v4 introduce distinct identifiers while preserving the checks originally implemented in v2. This change was made because, in previous versions, check names were primarily derived from the CIS Benchmark for AWS. Starting with v3 and v4, all checks are independent of any security framework and have unique names and IDs.
|
||||
|
||||
If you need more information about how new compliance implementation works in Prowler v4 and v3 see [Compliance](../compliance.md) section.
|
||||
For more details on the updated compliance implementation in Prowler v4 and v3, refer to the [Compliance](../compliance.md) section.
|
||||
|
||||
```
|
||||
checks_v4_v3_to_v2_mapping = {
|
||||
@@ -17,7 +17,7 @@ checks_v4_v3_to_v2_mapping = {
|
||||
"apigateway_restapi_public": "extra745",
|
||||
"apigateway_restapi_logging_enabled": "extra722",
|
||||
"apigateway_restapi_waf_acl_attached": "extra744",
|
||||
"apigatewayv2_api_access_logging_enabled": "extra7156",
|
||||
“apigatewayv2_api_access_logging_enabled": "extra7156",
|
||||
"apigatewayv2_api_authorizers_enabled": "extra7157",
|
||||
"appstream_fleet_default_internet_access_disabled": "extra7193",
|
||||
"appstream_fleet_maximum_session_duration": "extra7190",
|
||||
|
||||
@@ -1,27 +1,28 @@
|
||||
# Azure authentication
|
||||
# Azure Authentication in Prowler
|
||||
|
||||
By default Prowler uses Azure Python SDK identity package authentication methods using the classes `DefaultAzureCredential` and `InteractiveBrowserCredential`.
|
||||
This allows Prowler to authenticate against azure using the following methods:
|
||||
By default, Prowler utilizes the Azure Python SDK identity package for authentication, leveraging the classes `DefaultAzureCredential` and `InteractiveBrowserCredential`. This enables authentication against Azure using the following approaches:
|
||||
|
||||
- Service principal authentication by environment variables (Enterprise Application)
|
||||
- Current AZ CLI credentials stored
|
||||
- Service principal authentication via environment variables (Enterprise Application)
|
||||
- Currently stored AZ CLI credentials
|
||||
- Interactive browser authentication
|
||||
- Managed identity authentication
|
||||
|
||||
To launch the tool it is required to specify which method is used through the following flags:
|
||||
Before launching the tool, specify the desired method using the following flags:
|
||||
|
||||
```console
|
||||
# To use service principal authentication
|
||||
# Service principal authentication:
|
||||
prowler azure --sp-env-auth
|
||||
|
||||
# To use az cli authentication
|
||||
# AZ CLI authentication
|
||||
prowler azure --az-cli-auth
|
||||
|
||||
# To use browser authentication
|
||||
# Browser authentication
|
||||
prowler azure --browser-auth --tenant-id "XXXXXXXX"
|
||||
|
||||
# To use managed identity auth
|
||||
# Managed identity authentication
|
||||
prowler azure --managed-identity-auth
|
||||
```
|
||||
|
||||
To use Prowler you need to set up also the permissions required to access your resources in your Azure account, to more details refer to [Requirements](../../getting-started/requirements.md)
|
||||
## Permission Configuration
|
||||
|
||||
To ensure Prowler can access the required resources within your Azure account, proper permissions must be configured. Refer to the [Requirements](../../getting-started/requirements.md) section for details on setting up necessary privileges.
|
||||
|
||||
@@ -1,79 +1,100 @@
|
||||
# How to create Prowler Service Principal Application
|
||||
# Creating a Prowler Service Principal Application
|
||||
|
||||
To allow Prowler assume an identity to start the scan with the required privileges is necesary to create a Service Principal. This Service Principal is going to be used to authenticate against Azure and retrieve the metadata needed to perform the checks.
|
||||
To enable Prowler to assume an identity for scanning with the required privileges, a Service Principal must be created. This Service Principal authenticates against Azure and retrieves necessary metadata for checks.
|
||||
|
||||
To create a Service Principal Application you can use the Azure Portal or the Azure CLI.
|
||||
### Methods for Creating a Service Principal
|
||||
|
||||
## From Azure Portal / Entra Admin Center
|
||||
Service Principal Applications can be created using either the Azure Portal or the Azure CLI.
|
||||
|
||||
1. Access to Microsoft Entra ID
|
||||
2. In the left menu bar, go to "App registrations"
|
||||
3. Once there, in the menu bar click on "+ New registration" to register a new application
|
||||
4. Fill the "Name, select the "Supported account types" and click on "Register. You will be redirected to the applications page.
|
||||
5. Once in the application page, in the left menu bar, select "Certificates & secrets"
|
||||
6. In the "Certificates & secrets" view, click on "+ New client secret"
|
||||
7. Fill the "Description" and "Expires" fields and click on "Add"
|
||||
8. Copy the value of the secret, it is going to be used as `AZURE_CLIENT_SECRET` environment variable.
|
||||
## Creating a Service Principal via Azure Portal / Entra Admin Center
|
||||
|
||||

|
||||
1. Access Microsoft Entra ID.
|
||||
2. In the left menu bar, navigate to **"App registrations"**.
|
||||
3. Click **"+ New registration"** in the menu bar to register a new application
|
||||
4. Fill the **"Name"**, select the **"Supported account types"** and click **"Register"**. You will be redirected to the applications page.
|
||||
5. In the left menu bar, select **"Certificates & secrets"**.
|
||||
6. Under the **"Certificates & secrets"** view, click **"+ New client secret"**.
|
||||
7. Fill the **"Description"** and **"Expires"** fields, then click **"Add"**.
|
||||
8. Copy the secret value, as it will be used as `AZURE_CLIENT_SECRET` environment variable.
|
||||
|
||||

|
||||
|
||||
## From Azure CLI
|
||||
|
||||
To create a Service Principal using the Azure CLI, follow the next steps:
|
||||
### Creating a Service Principal
|
||||
|
||||
1. Open a terminal and execute the following command to create a new Service Principal application:
|
||||
```console
|
||||
az ad sp create-for-rbac --name "ProwlerApp"
|
||||
```
|
||||
2. The output of the command is going to be similar to the following:
|
||||
```json
|
||||
{
|
||||
"appId": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"displayName": "ProwlerApp",
|
||||
"password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"tenant": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
}
|
||||
```
|
||||
3. Save the values of `appId`, `password` and `tenant` to be used as credentials in Prowler.
|
||||
To create a Service Principal using the Azure CLI, follow these steps:
|
||||
|
||||
# Assigning the proper permissions
|
||||
1. Open a terminal and execute the following command:
|
||||
|
||||
To allow Prowler to retrieve metadata from the identity assumed and run specific Entra checks, it is needed to assign the following permissions:
|
||||
```console
|
||||
az ad sp create-for-rbac --name "ProwlerApp"
|
||||
```
|
||||
|
||||
- `Domain.Read.All`
|
||||
2. The output will be similar to:
|
||||
|
||||
```json
|
||||
{
|
||||
"appId": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"displayName": "ProwlerApp",
|
||||
"password": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"tenant": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
}
|
||||
```
|
||||
|
||||
3. Save the values of `appId`, `password` and `tenant`, as they will be used as credentials in Prowler.
|
||||
|
||||
## Assigning Proper Permissions
|
||||
|
||||
To allow Prowler to retrieve metadata from the assumed identity and run Entra checks, assign the following permissions:
|
||||
|
||||
- `Directory.Read.All`
|
||||
- `Policy.Read.All`
|
||||
- `UserAuthenticationMethod.Read.All` (used only for the Entra checks related with multifactor authentication)
|
||||
|
||||
To assign the permissions you can make it from the Azure Portal or using the Azure CLI.
|
||||
Permissions can be assigned via the Azure Portal or the Azure CLI.
|
||||
|
||||
???+ note
|
||||
Once you have created and assigned the proper Entra permissions to the application, you can go to this [tutorial](../azure/subscriptions.md) to add the subscription permissions to the application and start scanning your resources.
|
||||
After creating and assigning the necessary Entra permissions, follow this [tutorial](../azure/subscriptions.md) to add subscription permissions to the application and start scanning your resources.
|
||||
|
||||
## From Azure Portal
|
||||
### Assigning the Reader Role in Azure Portal
|
||||
|
||||
1. Access Microsoft Entra ID.
|
||||
|
||||
2. In the left menu bar, navigate to “App registrations”.
|
||||
|
||||
3. Select the created application.
|
||||
|
||||
4. In the left menu bar, select “API permissions”.
|
||||
|
||||
5. Click “+ Add a permission” and select “Microsoft Graph”.
|
||||
|
||||
6. In the “Microsoft Graph” view, select “Application permissions”.
|
||||
|
||||
1. Access to Microsoft Entra ID
|
||||
2. In the left menu bar, go to "App registrations"
|
||||
3. Once there, select the application that you have created
|
||||
4. In the left menu bar, select "API permissions"
|
||||
5. Then click on "+ Add a permission" and select "Microsoft Graph"
|
||||
6. Once in the "Microsoft Graph" view, select "Application permissions"
|
||||
7. Finally, search for "Directory", "Policy" and "UserAuthenticationMethod" select the following permissions:
|
||||
|
||||
- `Domain.Read.All`
|
||||
|
||||
- `Policy.Read.All`
|
||||
|
||||
- `UserAuthenticationMethod.Read.All`
|
||||
8. Click on "Add permissions" to apply the new permissions.
|
||||
9. Finally, an admin should click on "Grant admin consent for [your tenant]" to apply the permissions.
|
||||
|
||||
8. Click “Add permissions” to apply the new permissions.
|
||||
|
||||

|
||||
9. Finally, an admin must click “Grant admin consent for \[your tenant]” to apply the permissions.
|
||||
|
||||
## From Azure CLI
|
||||

|
||||
|
||||
1. Open a terminal and execute the following command to assign the permissions to the Service Principal:
|
||||
```console
|
||||
az ad app permission add --id {appId} --api 00000003-0000-0000-c000-000000000000 --api-permissions 7ab1d382-f21e-4acd-a863-ba3e13f7da61=Role 246dd0d5-5bd0-4def-940b-0421030a5b68=Role 38d9df27-64da-44fd-b7c5-a6fbac20248f=Role
|
||||
```
|
||||
2. The admin consent is needed to apply the permissions, an admin should execute the following command:
|
||||
```console
|
||||
az ad app permission admin-consent --id {appId}
|
||||
```
|
||||
### From Azure CLI
|
||||
|
||||
1. To grant permissions to a Service Principal, execute the following command in a terminal:
|
||||
|
||||
```console
|
||||
az ad app permission add --id {appId} --api 00000003-0000-0000-c000-000000000000 --api-permissions 7ab1d382-f21e-4acd-a863-ba3e13f7da61=Role 246dd0d5-5bd0-4def-940b-0421030a5b68=Role 38d9df27-64da-44fd-b7c5-a6fbac20248f=Role
|
||||
```
|
||||
|
||||
2. Once the permissions are assigned, admin consent is required to finalize the changes. An administrator should run:
|
||||
|
||||
```console
|
||||
az ad app permission admin-consent --id {appId}
|
||||
```
|
||||
|
||||
@@ -144,8 +144,8 @@ Assign the following Microsoft Graph permissions:
|
||||
|
||||
6. Return to `Access control (IAM)` > `+ Add` > `Add role assignment`
|
||||
|
||||
- Assign the `Reader` role
|
||||
- Then repeat and assign the custom `ProwlerRole`
|
||||
- Assign the `Reader` role to the Application created in the previous step
|
||||
- Then repeat the same process assigning the custom `ProwlerRole`
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -1,134 +1,159 @@
|
||||
# Azure subscriptions scope
|
||||
# Azure Subscription Scope
|
||||
|
||||
The main target for performing the scans in Azure is the subscription scope. Prowler needs to have the proper permissions to access the subscription and retrieve the metadata needed to perform the checks.
|
||||
Prowler performs security scans within the subscription scope in Azure. To execute checks, it requires appropriate permissions to access the subscription and retrieve necessary metadata.
|
||||
|
||||
By default, Prowler is multi-subscription, which means that is going to scan all the subscriptions is able to list. If you only assign permissions to one subscription, it is going to scan a single one.
|
||||
Prowler also has the ability to limit the subscriptions to scan to a set passed as input argument, to do so:
|
||||
By default, Prowler operates multi-subscription, scanning all subscriptions it has permission to list. If permissions are granted for only a single subscription, Prowler will limit scans to that subscription.
|
||||
|
||||
## Configuring Specific Subscription Scans in Prowler
|
||||
|
||||
Additionally, Prowler supports restricting scans to specific subscriptions by passing a set of subscription IDs as an input argument. To configure this limitation, use the appropriate command options:
|
||||
|
||||
```console
|
||||
prowler azure --az-cli-auth --subscription-ids <subscription ID 1> <subscription ID 2> ... <subscription ID N>
|
||||
```
|
||||
|
||||
Where you can pass from 1 up to N subscriptions to be scanned.
|
||||
Prowler allows you to specify one or more subscriptions for scanning (up to N), enabling flexible audit configurations.
|
||||
|
||||
???+ warning
|
||||
The multi-subscription feature is only available for the CLI, in the case of Prowler App is only possible to scan one subscription per scan.
|
||||
The multi-subscription feature is available only in the CLI. In Prowler App, each scan is limited to a single subscription.
|
||||
|
||||
## Assign the appropriate permissions to the identity that is going to be assumed by Prowler
|
||||
## Assigning Permissions for Subscription Scans
|
||||
|
||||
To perform scans, ensure that the identity assumed by Prowler has the appropriate permissions.
|
||||
|
||||
Regarding the subscription scope, Prowler, by default, scans all subscriptions it can access. Therefore, it is necessary to add a `Reader` role assignment for each subscription you want to audit. To make it easier and less repetitive to assign roles in environments with multiple subscriptions check the [following section](#recommendation-for-multiple-subscriptions).
|
||||
By default, Prowler scans all accessible subscriptions. If you need to audit specific subscriptions, you must assign the necessary role `Reader` for each one. For streamlined and less repetitive role assignments in multi-subscription environments, refer to the [following section](#recommendation-for-multiple-subscriptions).
|
||||
|
||||
### From Azure Portal
|
||||
### Assigning the Reader Role in Azure Portal
|
||||
|
||||
1. Access to the subscription you want to scan with Prowler.
|
||||
2. Select "Access control (IAM)" in the left menu.
|
||||
3. Click on "+ Add" and select "Add role assignment".
|
||||
4. In the search bar, type `Reader`, select it and click on "Next".
|
||||
5. In the Members tab, click on "+ Select members" and add the members you want to assign this role.
|
||||
6. Click on "Review + assign" to apply the new role.
|
||||
1. To grant Prowler access to scan a specific Azure subscription, follow these steps in Azure Portal:
|
||||
Navigate to the subscription you want to audit with Prowler.
|
||||
|
||||

|
||||
2. In the left menu, select “Access control (IAM)”.
|
||||
|
||||
3. Click “+ Add” and select “Add role assignment”.
|
||||
|
||||
4. In the search bar, enter `Reader`, select it and click “Next”.
|
||||
|
||||
5. In the “Members” tab, click “+ Select members”, then add the accounts to assign this role.
|
||||
|
||||
6. Click “Review + assign” to finalize and apply the role assignment.
|
||||
|
||||

|
||||
|
||||
### From Azure CLI
|
||||
|
||||
1. Open a terminal and execute the following command to assign the `Reader` role to the identity that is going to be assumed by Prowler:
|
||||
```console
|
||||
az role assignment create --role "Reader" --assignee <user, group, or service principal> --scope /subscriptions/<subscription-id>
|
||||
```
|
||||
|
||||
```console
|
||||
az role assignment create --role "Reader" --assignee <user, group, or service principal> --scope /subscriptions/<subscription-id>
|
||||
```
|
||||
|
||||
2. If the command is executed successfully, the output is going to be similar to the following:
|
||||
```json
|
||||
{
|
||||
"condition": null,
|
||||
"conditionVersion": null,
|
||||
"createdBy": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"createdOn": "YYYY-MM-DDTHH:MM:SS.SSSSSS+00:00",
|
||||
"delegatedManagedIdentityResourceId": null,
|
||||
"description": null,
|
||||
"id": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/providers/Microsoft.Authorization/roleAssignments/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"name": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"principalId": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"principalName": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"principalType": "ServicePrincipal",
|
||||
"roleDefinitionId": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/providers/Microsoft.Authorization/roleDefinitions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"roleDefinitionName": "Reader",
|
||||
"scope": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"type": "Microsoft.Authorization/roleAssignments",
|
||||
"updatedBy": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"updatedOn": "YYYY-MM-DDTHH:MM:SS.SSSSSS+00:00"
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"condition": null,
|
||||
"conditionVersion": null,
|
||||
"createdBy": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"createdOn": "YYYY-MM-DDTHH:MM:SS.SSSSSS+00:00",
|
||||
"delegatedManagedIdentityResourceId": null,
|
||||
"description": null,
|
||||
"id": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/providers/Microsoft.Authorization/roleAssignments/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"name": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"principalId": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"principalName": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"principalType": "ServicePrincipal",
|
||||
"roleDefinitionId": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/providers/Microsoft.Authorization/roleDefinitions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"roleDefinitionName": "Reader",
|
||||
"scope": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"type": "Microsoft.Authorization/roleAssignments",
|
||||
"updatedBy": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"updatedOn": "YYYY-MM-DDTHH:MM:SS.SSSSSS+00:00"
|
||||
}
|
||||
```
|
||||
|
||||
### Prowler Custom Role
|
||||
|
||||
Moreover, some additional read-only permissions not included in the built-in reader role are needed for some checks, for this kind of checks we use a custom role. This role is defined in [prowler-azure-custom-role](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-azure-custom-role.json). Once the custom role is created you can assign it in the same way as the `Reader` role.
|
||||
Some read-only permissions required for specific security checks are not included in the built-in Reader role. To support these checks, Prowler utilizes a custom role, defined in [prowler-azure-custom-role](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-azure-custom-role.json). Once created, this role can be assigned following the same process as the `Reader` role.
|
||||
|
||||
The checks that needs the `ProwlerRole` can be consulted in the [requirements section](../../getting-started/requirements.md#checks-that-require-prowlerrole).
|
||||
The checks requiring this `ProwlerRole` can be found in the [requirements section](../../getting-started/requirements.md#checks-that-require-prowlerrole).
|
||||
|
||||
#### Create ProwlerRole from Azure Portal
|
||||
#### Create ProwlerRole via Azure Portal
|
||||
|
||||
1. Download the [prowler-azure-custom-role](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-azure-custom-role.json) file and modify the `assignableScopes` field to be the subscription ID where the role assignment is going to be made, it should be shomething like `/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX`.
|
||||
2. Access your subscription.
|
||||
3. Select "Access control (IAM)".
|
||||
4. Click on "+ Add" and select "Add custom role".
|
||||
5. In the "Baseline permissions" select "Start from JSON" and upload the file downloaded and modified in the step 1.
|
||||
7. Click on "Review + create" to create the new role.
|
||||
1. Download the [prowler-azure-custom-role](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-azure-custom-role.json) file and modify the `assignableScopes` field to match the target subscription. Example format: `/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX`.
|
||||
|
||||
#### Create ProwlerRole from Azure CLI
|
||||
2. Access your Azure subscription.
|
||||
|
||||
1. Open a terminal and execute the following command to create a new custom role:
|
||||
```console
|
||||
az role definition create --role-definition '{ 640ms lun 16 dic 17:04:17 2024
|
||||
"Name": "ProwlerRole",
|
||||
"IsCustom": true,
|
||||
"Description": "Role used for checks that require read-only access to Azure resources and are not covered by the Reader role.",
|
||||
"AssignableScopes": [
|
||||
"/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" // USE YOUR SUBSCRIPTION ID
|
||||
],
|
||||
"Actions": [
|
||||
"Microsoft.Web/sites/host/listkeys/action",
|
||||
"Microsoft.Web/sites/config/list/Action"
|
||||
]
|
||||
}'
|
||||
```
|
||||
3. If the command is executed successfully, the output is going to be similar to the following:
|
||||
```json
|
||||
{
|
||||
"assignableScopes": [
|
||||
"/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
],
|
||||
"createdBy": null,
|
||||
"createdOn": "YYYY-MM-DDTHH:MM:SS.SSSSSS+00:00",
|
||||
"description": "Role used for checks that require read-only access to Azure resources and are not covered by the Reader role.",
|
||||
"id": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/providers/Microsoft.Authorization/roleDefinitions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"name": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"permissions": [
|
||||
{
|
||||
"actions": [
|
||||
"Microsoft.Web/sites/host/listkeys/action",
|
||||
"Microsoft.Web/sites/config/list/Action"
|
||||
],
|
||||
"condition": null,
|
||||
"conditionVersion": null,
|
||||
"dataActions": [],
|
||||
"notActions": [],
|
||||
"notDataActions": []
|
||||
}
|
||||
],
|
||||
"roleName": "ProwlerRole",
|
||||
"roleType": "CustomRole",
|
||||
"type": "Microsoft.Authorization/roleDefinitions",
|
||||
"updatedBy": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"updatedOn": "YYYY-MM-DDTHH:MM:SS.SSSSSS+00:00"
|
||||
}
|
||||
```
|
||||
3. Select “Access control (IAM)”.
|
||||
|
||||
## Recommendation for multiple subscriptions
|
||||
4. Click “+ Add” and select “Add custom role”.
|
||||
|
||||
Scanning multiple subscriptions can be tedious due to the need to create and assign roles for each one. To simplify this process, we recommend using management groups to organize and audit subscriptions collectively with Prowler.
|
||||
5. Under “Baseline permissions”, select “Start from JSON” and upload the modified role file.
|
||||
|
||||
6. Click “Review + create” to finalize the role creation.
|
||||
|
||||
#### Create ProwlerRole via Azure CLI
|
||||
|
||||
1. To create a new custom role, open a terminal and execute the following command:
|
||||
|
||||
```console
|
||||
az role definition create --role-definition '{ 640ms lun 16 dic 17:04:17 2024
|
||||
"Name": "ProwlerRole",
|
||||
"IsCustom": true,
|
||||
"Description": "Role used for checks that require read-only access to Azure resources and are not covered by the Reader role.",
|
||||
"AssignableScopes": [
|
||||
"/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" // USE YOUR SUBSCRIPTION ID
|
||||
],
|
||||
"Actions": [
|
||||
"Microsoft.Web/sites/host/listkeys/action",
|
||||
"Microsoft.Web/sites/config/list/Action"
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
2. If the command is executed successfully, the output is going to be similar to the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"assignableScopes": [
|
||||
"/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
|
||||
],
|
||||
"createdBy": null,
|
||||
"createdOn": "YYYY-MM-DDTHH:MM:SS.SSSSSS+00:00",
|
||||
"description": "Role used for checks that require read-only access to Azure resources and are not covered by the Reader role.",
|
||||
"id": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/providers/Microsoft.Authorization/roleDefinitions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"name": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"permissions": [
|
||||
{
|
||||
"actions": [
|
||||
"Microsoft.Web/sites/host/listkeys/action",
|
||||
"Microsoft.Web/sites/config/list/Action"
|
||||
],
|
||||
"condition": null,
|
||||
"conditionVersion": null,
|
||||
"dataActions": [],
|
||||
"notActions": [],
|
||||
"notDataActions": []
|
||||
}
|
||||
],
|
||||
"roleName": "ProwlerRole",
|
||||
"roleType": "CustomRole",
|
||||
"type": "Microsoft.Authorization/roleDefinitions",
|
||||
"updatedBy": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"updatedOn": "YYYY-MM-DDTHH:MM:SS.SSSSSS+00:00"
|
||||
}
|
||||
```
|
||||
|
||||
## Recommendation for Managing Multiple Subscriptions
|
||||
|
||||
Scanning multiple subscriptions requires creating and assigning roles for each, which can be a time-consuming process. To streamline subscription management and auditing, use management groups in Azure. This approach allows Prowler to efficiently organize and audit multiple subscriptions collectively.
|
||||
|
||||
1. **Create a Management Group**: Follow the [official guide](https://learn.microsoft.com/en-us/azure/governance/management-groups/create-management-group-portal) to create a new management group.
|
||||

|
||||
2. **Add all roles**: Assign roles at to the new management group like in the [past section](#assign-the-appropriate-permissions-to-the-identity-that-is-going-to-be-assumed-by-prowler) but at the management group level instead of the subscription level.
|
||||
3. **Add subscriptions**: Add all the subscriptions you want to audit to the management group.
|
||||

|
||||
|
||||

|
||||
|
||||
2. **Assign Roles**: Assign necessary roles to the management group, similar to the [role assignment process](#assign-the-appropriate-permissions-to-the-identity-that-is-going-to-be-assumed-by-prowler).
|
||||
|
||||
Role assignment should be done at the management group level instead of per subscription.
|
||||
|
||||
3. **Add Subscriptions**: Add all subscriptions you want to audit to the newly created management group. 
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
# Use non default Azure regions
|
||||
# Using Non-Default Azure Regions
|
||||
|
||||
Microsoft provides clouds for compliance with regional laws, which are available for your use.
|
||||
By default, Prowler uses `AzureCloud` cloud which is the comercial one. (you can list all the available with `az cloud list --output table`).
|
||||
Microsoft offers cloud environments that comply with regional regulations. These clouds are available for use based on your requirements. By default, Prowler utilizes the commercial `AzureCloud` environment. (To list all available Azure clouds, use `az cloud list --output table`).
|
||||
|
||||
As of this documentation's publication, the following Azure clouds are available:
|
||||
|
||||
At the time of writing this documentation the available Azure Clouds from different regions are the following:
|
||||
- AzureCloud
|
||||
- AzureChinaCloud
|
||||
- AzureUSGovernment
|
||||
|
||||
If you want to change the default one you must include the flag `--azure-region`, i.e.:
|
||||
To change the default cloud, include the flag `--azure-region`. For example:
|
||||
|
||||
```console
|
||||
prowler azure --az-cli-auth --azure-region AzureChinaCloud
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Check Aliases
|
||||
|
||||
Prowler allows you to use aliases for the checks. You only have to add the `CheckAliases` key to the check's metadata with a list of the aliases:
|
||||
|
||||
```json title="check.metadata.json"
|
||||
"Provider": "<provider>",
|
||||
"CheckID": "<check_id>",
|
||||
@@ -12,7 +13,9 @@ Prowler allows you to use aliases for the checks. You only have to add the `Chec
|
||||
],
|
||||
...
|
||||
```
|
||||
Then, you can execute the check either with its check ID or with one of the previous aliases:
|
||||
|
||||
Then you can execute the check either with its check ID or with one of the previous aliases:
|
||||
|
||||
```shell
|
||||
prowler <provider> -c/--checks <check_alias_1>
|
||||
|
||||
|
||||
@@ -1,37 +1,126 @@
|
||||
# Compliance
|
||||
|
||||
Prowler allows you to execute checks based on requirements defined in compliance frameworks. By default, it will execute and give you an overview of the status of each compliance framework:
|
||||
|
||||
<img src="../img/compliance/compliance.png"/>
|
||||
|
||||
> You can find CSVs containing detailed compliance results inside the compliance folder within Prowler's output folder.
|
||||
You can find CSVs containing detailed compliance results in the compliance folder within Prowler's output folder.
|
||||
|
||||
## Execute Prowler based on Compliance Frameworks
|
||||
|
||||
Prowler can analyze your environment based on a specific compliance framework and get more details, to do it, you can use option `--compliance`:
|
||||
|
||||
```sh
|
||||
prowler <provider> --compliance <compliance_framework>
|
||||
```
|
||||
|
||||
Standard results will be shown and additionally the framework information as the sample below for CIS AWS 2.0. For details a CSV file has been generated as well.
|
||||
|
||||
<img src="../img/compliance/compliance-cis-sample1.png"/>
|
||||
|
||||
???+ note
|
||||
**If Prowler can't find a resource related with a check from a compliance requirement, this requirement won't appear on the output**
|
||||
**If Prowler can't find a resource related with a check from a compliance requirement, this requirement won't appear on the output**
|
||||
|
||||
## List Available Compliance Frameworks
|
||||
In order to see which compliance frameworks are cover by Prowler, you can use option `--list-compliance`:
|
||||
|
||||
In order to see which compliance frameworks are covered by Prowler, you can use option `--list-compliance`:
|
||||
|
||||
```sh
|
||||
prowler <provider> --list-compliance
|
||||
```
|
||||
|
||||
The full and updated list of supported compliance frameworks for each provider is available at [Prowler Hub](https://hub.prowler.com/compliance).
|
||||
### AWS (36 frameworks)
|
||||
|
||||
- `aws_account_security_onboarding_aws`
|
||||
- `aws_audit_manager_control_tower_guardrails_aws`
|
||||
- `aws_foundational_security_best_practices_aws`
|
||||
- `aws_foundational_technical_review_aws`
|
||||
- `aws_well_architected_framework_reliability_pillar_aws`
|
||||
- `aws_well_architected_framework_security_pillar_aws`
|
||||
- `cis_1.4_aws`
|
||||
- `cis_1.5_aws`
|
||||
- `cis_2.0_aws`
|
||||
- `cis_3.0_aws`
|
||||
- `cis_4.0_aws`
|
||||
- `cis_5.0_aws`
|
||||
- `cisa_aws`
|
||||
- `ens_rd2022_aws`
|
||||
- `fedramp_low_revision_4_aws`
|
||||
- `fedramp_moderate_revision_4_aws`
|
||||
- `ffiec_aws`
|
||||
- `gdpr_aws`
|
||||
- `gxp_21_cfr_part_11_aws`
|
||||
- `gxp_eu_annex_11_aws`
|
||||
- `hipaa_aws`
|
||||
- `iso27001_2013_aws`
|
||||
- `iso27001_2022_aws`
|
||||
- `kisa_isms_p_2023_aws`
|
||||
- `kisa_isms_p_2023_korean_aws`
|
||||
- `mitre_attack_aws`
|
||||
- `nis2_aws`
|
||||
- `nist_800_171_revision_2_aws`
|
||||
- `nist_800_53_revision_4_aws`
|
||||
- `nist_800_53_revision_5_aws`
|
||||
- `nist_csf_1.1_aws`
|
||||
- `pci_3.2.1_aws`
|
||||
- `pci_4.0_aws`
|
||||
- `prowler_threatscore_aws`
|
||||
- `rbi_cyber_security_framework_aws`
|
||||
- `soc2_aws`
|
||||
|
||||
### Azure (10 frameworks)
|
||||
|
||||
- `cis_2.0_azure`
|
||||
- `cis_2.1_azure`
|
||||
- `cis_3.0_azure`
|
||||
- `ens_rd2022_azure`
|
||||
- `iso27001_2022_azure`
|
||||
- `mitre_attack_azure`
|
||||
- `nis2_azure`
|
||||
- `pci_4.0_azure`
|
||||
- `prowler_threatscore_azure`
|
||||
- `soc2_azure`
|
||||
|
||||
### GCP (10 frameworks)
|
||||
|
||||
- `cis_2.0_gcp`
|
||||
- `cis_3.0_gcp`
|
||||
- `cis_4.0_gcp`
|
||||
- `ens_rd2022_gcp`
|
||||
- `iso27001_2022_gcp`
|
||||
- `mitre_attack_gcp`
|
||||
- `nis2_gcp`
|
||||
- `pci_4.0_gcp`
|
||||
- `prowler_threatscore_gcp`
|
||||
- `soc2_gcp`
|
||||
|
||||
### Kubernetes (5 frameworks)
|
||||
|
||||
- `cis_1.10_kubernetes`
|
||||
- `cis_1.11_kubernetes`
|
||||
- `cis_1.8_kubernetes`
|
||||
- `iso27001_2022_kubernetes`
|
||||
- `pci_4.0_kubernetes`
|
||||
|
||||
### M365 (3 frameworks)
|
||||
|
||||
- `cis_4.0_m365`
|
||||
- `iso27001_2022_m365`
|
||||
- `prowler_threatscore_m365`
|
||||
|
||||
### GitHub (1 framework)
|
||||
|
||||
- `cis_1.0_github`
|
||||
|
||||
## List Requirements of Compliance Frameworks
|
||||
For each compliance framework, you can use option `--list-compliance-requirements` to list its requirements:
|
||||
For each compliance framework, you can use the `--list-compliance-requirements` option to list its requirements:
|
||||
|
||||
```sh
|
||||
prowler <provider> --list-compliance-requirements <compliance_framework(s)>
|
||||
```
|
||||
|
||||
Example for the first requirements of CIS 1.5 for AWS:
|
||||
|
||||
```
|
||||
Listing CIS 1.5 AWS Compliance Requirements:
|
||||
|
||||
@@ -66,4 +155,4 @@ Requirement Id: 1.5
|
||||
|
||||
## Create and contribute adding other Security Frameworks
|
||||
|
||||
This information is part of the Developer Guide and can be found here: https://docs.prowler.cloud/en/latest/tutorials/developer-guide/.
|
||||
This information is part of the Developer Guide and can be found [here](../developer-guide/security-compliance-framework.md).
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
# Configuration File
|
||||
|
||||
Several Prowler's checks have user configurable variables that can be modified in a common **configuration file**. This file can be found in the following [path](https://github.com/prowler-cloud/prowler/blob/master/prowler/config/config.yaml):
|
||||
|
||||
```
|
||||
prowler/config/config.yaml
|
||||
```
|
||||
|
||||
Also you can input a custom configuration file using the `--config-file` argument.
|
||||
Additionally, you can input a custom configuration file using the `--config-file` argument.
|
||||
|
||||
## AWS
|
||||
|
||||
@@ -78,7 +80,8 @@ The following list includes all the Azure checks with configurable variables tha
|
||||
| `app_ensure_python_version_is_latest` | `python_latest_version` | String |
|
||||
| `app_ensure_java_version_is_latest` | `java_latest_version` | String |
|
||||
| `sqlserver_recommended_minimal_tls_version` | `recommended_minimal_tls_versions` | List of Strings |
|
||||
| `defender_attack_path_notifications_properly_configured` | `defender_attack_path_minimal_risk_level` | String |
|
||||
| `vm_desired_sku_size` | `desired_vm_sku_sizes` | List of Strings |
|
||||
| `defender_attack_path_notifications_properly_configured` | `defender_attack_path_minimal_risk_level` | String |
|
||||
|
||||
|
||||
## GCP
|
||||
@@ -481,6 +484,16 @@ azure:
|
||||
"1.3"
|
||||
]
|
||||
|
||||
# Azure Virtual Machines
|
||||
# azure.vm_desired_sku_size
|
||||
# List of desired VM SKU sizes that are allowed in the organization
|
||||
desired_vm_sku_sizes:
|
||||
[
|
||||
"Standard_A8_v2",
|
||||
"Standard_DS3_v2",
|
||||
"Standard_D4s_v3",
|
||||
]
|
||||
|
||||
# GCP Configuration
|
||||
gcp:
|
||||
# GCP Compute Configuration
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Custom Checks Metadata
|
||||
|
||||
In certain organizations, the severity of specific checks might differ from the default values defined in the check's metadata. For instance, while `s3_bucket_level_public_access_block` could be deemed `critical` for some organizations, others might assign a different severity level.
|
||||
In certain organizations, the severity of specific checks might differ from the default values defined in the check's metadata. For instance, while `s3_bucket_level_public_access_block` could be deemed `critical` for some organizations, others might assign a different severity level to it.
|
||||
|
||||
The custom metadata option offers a means to override default metadata set by Prowler
|
||||
|
||||
@@ -15,18 +15,20 @@ The list of supported check's metadata fields that can be override are listed as
|
||||
- Risk
|
||||
- RelatedUrl
|
||||
- Remediation
|
||||
- Code
|
||||
- CLI
|
||||
- NativeIaC
|
||||
- Other
|
||||
- Terraform
|
||||
- Recommendation
|
||||
- Text
|
||||
- Url
|
||||
- Code
|
||||
- CLI
|
||||
- NativeIaC
|
||||
- Other
|
||||
- Terraform
|
||||
- Recommendation
|
||||
- Text
|
||||
- Url
|
||||
|
||||
|
||||
## File Syntax
|
||||
|
||||
This feature is available for all the providers supported in Prowler since the metadata format is common between all the providers. The following is the YAML format for the custom checks metadata file:
|
||||
This feature is available for all the providers supported in Prowler since the metadata format is common between all the providers. The YAML format for the custom checks metadata file is as follows:
|
||||
|
||||
```yaml title="custom_checks_metadata.yaml"
|
||||
CustomChecksMetadata:
|
||||
aws:
|
||||
@@ -117,6 +119,7 @@ CustomChecksMetadata:
|
||||
## Usage
|
||||
|
||||
Executing the following command will assess all checks and generate a report while overriding the metadata for those checks:
|
||||
|
||||
```sh
|
||||
prowler <provider> --custom-checks-metadata-file <path/to/custom/metadata>
|
||||
```
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
# Dashboard
|
||||
|
||||
Prowler allows you to run your own local dashboards using the csv outputs provided by Prowler
|
||||
|
||||
```sh
|
||||
prowler dashboard
|
||||
```
|
||||
|
||||
???+ note
|
||||
You can expose the `dashboard` server in another address using the `HOST` environment variable.
|
||||
|
||||
@@ -16,10 +18,9 @@ docker run -v /your/local/dir/prowler-output:/home/prowler/output --env HOST=0.0
|
||||
Make sure you update the `/your/local/dir/prowler-output` to match the path that contains your prowler output.
|
||||
|
||||
???+ note
|
||||
**Remember that the `dashboard` server is not authenticated, if you expose it to the internet, you are running it at your own risk.**
|
||||
**Remember that the `dashboard` server is not authenticated. If you expose it to the Internet, do it at your own risk.**
|
||||
|
||||
The banner and additional info about the dashboard will be shown on your console:
|
||||
<img src="../img/dashboard/dashboard-banner.png">
|
||||
The banner and additional info about the dashboard will be shown on your console: <img src="../img/dashboard/dashboard-banner.png">
|
||||
|
||||
## Overview Page
|
||||
|
||||
@@ -27,32 +28,39 @@ The overview page provides a full impression of your findings obtained from Prow
|
||||
|
||||
<img src="../img/dashboard/dashboard-overview.png">
|
||||
|
||||
In this page you can do multiple functions:
|
||||
This page allows for multiple functions:
|
||||
|
||||
* Apply filters:
|
||||
|
||||
* Assesment Date
|
||||
* Account
|
||||
* Region
|
||||
* Severity
|
||||
* Service
|
||||
* Status
|
||||
* See which files has been scanned to generate the dashboard placing your mouse on the `?` icon:
|
||||
<img src="../img/dashboard/dashboard-files-scanned.png">
|
||||
* Download the `Top Findings by Severity` table using the button `DOWNLOAD THIS TABLE AS CSV` or `DOWNLOAD THIS TABLE AS XLSX`
|
||||
* Click on the provider cards to filter by provider.
|
||||
* On the dropdowns under `Top Findings by Severity` you can apply multiple sorts to see the information, also you will get a detailed view of each finding using the dropdowns:
|
||||
<img src="../img/dashboard/dropdown.png">
|
||||
|
||||
* See which files has been scanned to generate the dashboard by placing your mouse on the `?` icon:
|
||||
|
||||
<img src="../img/dashboard/dashboard-files-scanned.png">
|
||||
|
||||
* Download the `Top Findings by Severity` table using the button `DOWNLOAD THIS TABLE AS CSV` or `DOWNLOAD THIS TABLE AS XLSX`
|
||||
|
||||
* Click the provider cards to filter by provider.
|
||||
|
||||
* On the dropdowns under `Top Findings by Severity` you can apply multiple sorts to see the information, also you will get a detailed view of each finding using the dropdowns:
|
||||
|
||||
<img src="../img/dashboard/dropdown.png">
|
||||
|
||||
## Compliance Page
|
||||
|
||||
This page shows all the info related to the compliance selected, you can apply multiple filters depending on your preferences.
|
||||
This page shows all the info related to the compliance selected. Multiple filters can be selected as per your preferences.
|
||||
|
||||
<img src="../img/dashboard/dashboard-compliance.png">
|
||||
|
||||
To add your own compliance to compliance page, add a file with the compliance name (using `_` instead of `.`) to the path `/dashboard/compliance`.
|
||||
|
||||
In this file use the format present in the others compliance files to create the table. Example for CIS 2.0:
|
||||
|
||||
```python
|
||||
import warnings
|
||||
|
||||
@@ -83,29 +91,31 @@ def get_table(data):
|
||||
|
||||
## S3 Integration
|
||||
|
||||
If you are using Prowler SaaS with the S3 integration or that integration from Prowler Open Source and you want to use your data from your S3 bucket, you can run:
|
||||
If you are using Prowler SaaS with the S3 integration or that integration from Prowler Open Source and you want to use your data from your S3 bucket, you can run the following command in order to load the dashboard with the new files:
|
||||
|
||||
```sh
|
||||
aws s3 cp s3://<your-bucket>/output/csv ./output --recursive
|
||||
```
|
||||
to load the dashboard with the new files.
|
||||
|
||||
## Output Path
|
||||
|
||||
Prowler will use the outputs from the folder `/output` (for common prowler outputs) and `/output/compliance` (for prowler compliance outputs) to generate the dashboard.
|
||||
|
||||
To change the path modify the values `folder_path_overview` or `folder_path_compliance` from `/dashboard/config.py`
|
||||
To change the path, modify the values `folder_path_overview` or `folder_path_compliance` from `/dashboard/config.py`
|
||||
|
||||
???+ note
|
||||
If you have any issue related with dashboards, check that the output path where the dashboard is getting the outputs is correct.
|
||||
|
||||
|
||||
## Output Support
|
||||
|
||||
Prowler dashboard supports the detailed outputs:
|
||||
|
||||
| Provider | V3 | V4 | COMPLIANCE-V3 | COMPLIANCE-V4|
|
||||
|---|---|---|---|---|
|
||||
| AWS | ✅ | ✅ | ✅ | ✅ |
|
||||
| Azure | ❌ | ✅ | ❌ | ✅ |
|
||||
| Kubernetes | ❌ | ✅ | ❌ | ✅ |
|
||||
| GCP | ❌ | ✅ | ❌ | ✅ |
|
||||
| Provider| V3| V4| COMPLIANCE-V3| COMPLIANCE-V4
|
||||
|----------|----------|----------|----------|----------
|
||||
| AWS| ✅| ✅| ✅| ✅
|
||||
| Azure| ❌| ✅| ❌| ✅
|
||||
| Kubernetes| ❌| ✅| ❌| ✅
|
||||
| GCP| ❌| ✅| ❌| ✅
|
||||
| M365| ❌| ✅| ❌| ✅
|
||||
| GitHub| ❌| ✅| ❌| ✅
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Prowler Fixer (remediation)
|
||||
|
||||
Prowler allows you to fix some of the failed findings it identifies. You can use the `--fixer` flag to run the fixes that are available for the checks that failed.
|
||||
|
||||
```sh
|
||||
@@ -8,16 +9,20 @@ prowler <provider> -c <check_to_fix_1> <check_to_fix_2> ... --fixer
|
||||
<img src="../img/fixer.png">
|
||||
|
||||
???+ note
|
||||
You can see all the available fixes for each provider with the `--list-remediations` or `--list-fixers flag.
|
||||
You can see all the available fixes for each provider with the `--list-remediations` or `--list-fixers` flag.
|
||||
|
||||
```sh
|
||||
prowler <provider> --list-fixers
|
||||
```
|
||||
|
||||
It's important to note that using the fixers for `Access Analyzer`, `GuardDuty`, and `SecurityHub` may incur additional costs. These AWS services might trigger actions or deploy resources that can lead to charges on your AWS account.
|
||||
|
||||
## Writing a Fixer
|
||||
|
||||
To write a fixer, you need to create a file called `<check_id>_fixer.py` inside the check folder, with a function called `fixer` that receives either the region or the resource to be fixed as a parameter, and returns a boolean value indicating if the fix was successful or not.
|
||||
|
||||
For example, the regional fixer for the `ec2_ebs_default_encryption` check, which enables EBS encryption by default in a region, would look like this:
|
||||
|
||||
```python
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
|
||||
@@ -25,8 +30,8 @@ from prowler.providers.aws.services.ec2.ec2_client import ec2_client
|
||||
|
||||
def fixer(region):
|
||||
"""
|
||||
Enable EBS encryption by default in a region. NOTE: Custom KMS keys for EBS Default Encryption may be overwritten.
|
||||
Requires the ec2:EnableEbsEncryptionByDefault permission:
|
||||
Enable EBS encryption by default in a region. ???+ note Custom KMS keys for EBS Default Encryption may be overwritten. Requires the ec2:EnableEbsEncryptionByDefault permission.
|
||||
It can be set as follows:
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
@@ -53,7 +58,9 @@ def fixer(region):
|
||||
)
|
||||
return False
|
||||
```
|
||||
|
||||
On the other hand, the fixer for the `s3_account_level_public_access_blocks` check, which enables the account-level public access blocks for S3, would look like this:
|
||||
|
||||
```python
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.aws.services.s3.s3control_client import s3control_client
|
||||
@@ -61,7 +68,7 @@ from prowler.providers.aws.services.s3.s3control_client import s3control_client
|
||||
|
||||
def fixer(resource_id: str) -> bool:
|
||||
"""
|
||||
Enable S3 Block Public Access for the account. NOTE: By blocking all S3 public access you may break public S3 buckets.
|
||||
Enable S3 Block Public Access for the account. ???+ note Custom KMS keys for EBS Default Encryption may be overwritten. By blocking all S3 public access you may break public S3 buckets.
|
||||
Requires the s3:PutAccountPublicAccessBlock permission:
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@@ -96,7 +103,9 @@ def fixer(resource_id: str) -> bool:
|
||||
```
|
||||
|
||||
## Fixer Config file
|
||||
|
||||
For some fixers, you can have configurable parameters depending on your use case. You can either use the default config file in `prowler/config/fixer_config.yaml` or create a custom config file and pass it to the fixer with the `--fixer-config` flag. The config file should be a YAML file with the following structure:
|
||||
|
||||
```yaml
|
||||
# Fixer configuration file
|
||||
aws:
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
# GCP authentication
|
||||
# GCP Authentication in Prowler
|
||||
|
||||
Prowler will use by default your User Account credentials, you can configure it using:
|
||||
## Default Authentication
|
||||
|
||||
- `gcloud init` to use a new account
|
||||
- `gcloud config set account <account>` to use an existing account
|
||||
- `gcloud auth application-default login`
|
||||
By default, Prowler uses your User Account credentials. You can configure authentication as follows:
|
||||
|
||||
This will generate Application Default Credentials (ADC) that Prowler will use automatically.
|
||||
- `gcloud init` to use a new account, or
|
||||
- `gcloud config set account <account>` to use an existing account.
|
||||
|
||||
---
|
||||
Then, obtain your access credentials using: `gcloud auth application-default login`.
|
||||
|
||||
## Using a Service Account key file
|
||||
## Using Service Account Keys
|
||||
|
||||
Otherwise, you can generate and download Service Account keys in JSON format (refer to https://cloud.google.com/iam/docs/creating-managing-service-account-keys) and provide the location of the file with the following argument:
|
||||
Alternatively, Service Account keys can be generated and downloaded in JSON format. Follow the steps in the Google Cloud IAM guide (https://cloud.google.com/iam/docs/creating-managing-service-account-keys) to create and manage service account keys. Provide the path to the key file using:
|
||||
|
||||
```console
|
||||
prowler gcp --credentials-file path
|
||||
@@ -21,8 +20,6 @@ prowler gcp --credentials-file path
|
||||
???+ note
|
||||
`prowler` will scan the GCP project associated with the credentials.
|
||||
|
||||
---
|
||||
|
||||
## Using an access token
|
||||
|
||||
If you already have an access token (e.g., generated with `gcloud auth print-access-token`), you can run Prowler with:
|
||||
@@ -38,11 +35,9 @@ prowler gcp --project-ids <project-id>
|
||||
export GOOGLE_CLOUD_PROJECT=<project-id>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Credentials lookup order
|
||||
|
||||
Prowler follows the same search order as [Google authentication libraries](https://cloud.google.com/docs/authentication/application-default-credentials#search_order):
|
||||
Prowler follows the same credential search process as [Google authentication libraries](https://cloud.google.com/docs/authentication/application-default-credentials#search_order), checking credentials in this order:
|
||||
|
||||
1. [`GOOGLE_APPLICATION_CREDENTIALS` environment variable](https://cloud.google.com/docs/authentication/application-default-credentials#GAC)
|
||||
2. [`CLOUDSDK_AUTH_ACCESS_TOKEN` + optional `GOOGLE_CLOUD_PROJECT`](https://cloud.google.com/sdk/gcloud/reference/auth/print-access-token)
|
||||
@@ -51,25 +46,22 @@ Prowler follows the same search order as [Google authentication libraries](https
|
||||
|
||||
???+ note
|
||||
The credentials must belong to a user or service account with the necessary permissions.
|
||||
To ensure full access, assign the roles/viewer IAM role to the identity being used.
|
||||
To ensure full access, assign the roles/reader IAM role to the identity being used.
|
||||
|
||||
???+ note
|
||||
Prowler will use the enabled Google Cloud APIs to get the information needed to perform the checks.
|
||||
|
||||
---
|
||||
|
||||
## Required Permissions
|
||||
|
||||
## Needed permissions
|
||||
To ensure full functionality, Prowler for Google Cloud needs the following permissions to be set:
|
||||
|
||||
Prowler for Google Cloud needs the following permissions to be set:
|
||||
|
||||
- **Viewer (`roles/viewer`) IAM role**: granted at the project / folder / org level in order to scan the target projects
|
||||
- **Reader (`roles/reader`) IAM role**: granted at the project / folder / org level in order to scan the target projects
|
||||
|
||||
- **Project level settings**: you need to have at least one project with the below settings:
|
||||
- Identity and Access Management (IAM) API (`iam.googleapis.com`) enabled by either using the
|
||||
[Google Cloud API UI](https://console.cloud.google.com/apis/api/iam.googleapis.com/metrics) or
|
||||
by using the gcloud CLI `gcloud services enable iam.googleapis.com --project <your-project-id>` command
|
||||
- Service Usage Consumer (`roles/serviceusage.serviceUsageConsumer`) IAM role
|
||||
- Set the quota project to be this project by either running `gcloud auth application-default set-quota-project <project-id>` or by setting an environment variable:
|
||||
`export GOOGLE_CLOUD_QUOTA_PROJECT=<project-id>`
|
||||
|
||||
@@ -79,12 +71,12 @@ The above settings must be associated to a user or service account.
|
||||
???+ note
|
||||
Prowler will use the enabled Google Cloud APIs to get the information needed to perform the checks.
|
||||
|
||||
## Impersonate Service Account
|
||||
## Impersonating a GCP Service Account in Prowler
|
||||
|
||||
If you want to impersonate a GCP service account, you can use the `--impersonate-service-account` argument:
|
||||
To impersonate a GCP service account, use the `--impersonate-service-account` argument followed by the service account email:
|
||||
|
||||
```console
|
||||
prowler gcp --impersonate-service-account <service-account-email>
|
||||
```
|
||||
|
||||
This argument will use the default credentials to impersonate the service account provided.
|
||||
This command leverages the default credentials to impersonate the specified service account.
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
# GCP Organization
|
||||
# Scanning a Specific GCP Organization
|
||||
|
||||
By default, Prowler scans all Google Cloud projects accessible to the authenticated user.
|
||||
|
||||
To limit the scan to projects within a specific Google Cloud organization, use the `--organization-id` option with the GCP organization ID:
|
||||
To limit the scan to projects within a specific Google Cloud organization, use the `--organization-id` option with the GCP organization’s ID:
|
||||
|
||||
```console
|
||||
prowler gcp --organization-id organization-id
|
||||
```
|
||||
|
||||
???+ warning
|
||||
Make sure that the used credentials have the role Cloud Asset Viewer (`roles/cloudasset.viewer`) or Cloud Asset Owner (`roles/cloudasset.owner`) on the organization level.
|
||||
Ensure the credentials used have one of the following roles at the organization level:
|
||||
Cloud Asset Viewer (`roles/cloudasset.viewer`), or Cloud Asset Owner (`roles/cloudasset.owner`).
|
||||
|
||||
???+ note
|
||||
With this option, Prowler retrieves all projects within the specified organization, including those organized in folders and nested subfolders. This ensures that every project under the organization’s hierarchy is scanned, providing full visibility across the entire organization.
|
||||
With this option, Prowler retrieves all projects under the specified Google Cloud organization, including those organized within folders and nested subfolders. This ensures full visibility across the entire organization’s hierarchy.
|
||||
|
||||
???+ note
|
||||
To find the organization ID, use the following command:
|
||||
To obtain the Google Cloud organization ID, use:
|
||||
|
||||
```console
|
||||
gcloud organizations list
|
||||
|
||||
@@ -1,28 +1,37 @@
|
||||
# GCP Projects
|
||||
# GCP Project Scanning in Prowler
|
||||
|
||||
By default, Prowler is multi-project, which means that is going to scan all the Google Cloud projects that the authenticated user has access to. If you want to scan a specific project(s), you can use the `--project-ids` argument.
|
||||
By default, Prowler operates in a multi-project mode, scanning all Google Cloud projects accessible to the authenticated user.
|
||||
|
||||
## Specifying Projects
|
||||
|
||||
To limit the scan to specific projects, use the `--project-ids` argument followed by the desired project ID(s).
|
||||
|
||||
```console
|
||||
prowler gcp --project-ids project-id1 project-id2
|
||||
```
|
||||
|
||||
???+ note
|
||||
You can use asterisk `*` to scan projects that match a pattern. For example, `prowler gcp --project-ids "prowler*"` will scan all the projects that start with `prowler`.
|
||||
### Pattern-Based Project Selection
|
||||
|
||||
???+ note
|
||||
If you want to know the projects that you have access to, you can use the following command:
|
||||
Use an asterisk `*` to scan projects that match a pattern. For example, `prowler gcp --project-ids "prowler*"` will scan all the projects that start with `prowler`.
|
||||
|
||||
```console
|
||||
prowler gcp --list-project-ids
|
||||
```
|
||||
### Listing Accessible Projects
|
||||
|
||||
### Exclude Projects
|
||||
To view a list of projects the user has access to, run:
|
||||
|
||||
If you want to exclude some projects from the scan, you can use the `--excluded-project-ids` argument.
|
||||
```console
|
||||
prowler gcp --list-project-ids
|
||||
```
|
||||
|
||||
### Excluding Projects in Prowler
|
||||
|
||||
#### Project Exclusion
|
||||
|
||||
To exclude specific Google Cloud projects from the scan, use the `--excluded-project-ids` argument followed by the project ID(s):
|
||||
|
||||
```console
|
||||
prowler gcp --excluded-project-ids project-id1 project-id2
|
||||
```
|
||||
|
||||
???+ note
|
||||
You can use asterisk `*` to exclude projects that match a pattern. For example, `prowler gcp --excluded-project-ids "sys*"` will exclude all the projects that start with `sys`.
|
||||
#### Pattern-Based Project Exclusion
|
||||
|
||||
Use an asterisk `*` to exclude projects that match a pattern. For example, `prowler gcp --excluded-project-ids "sys*"` will exclude all the projects that start with `sys`.
|
||||
|
||||
95
docs/tutorials/gcp/retry-configuration.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# GCP Retry Configuration in Prowler
|
||||
|
||||
Prowler's GCP Provider uses Google Cloud Python SDK's integrated retry mechanism to automatically retry API calls when encountering rate limiting errors (HTTP 429).
|
||||
|
||||
## Quick Configuration
|
||||
|
||||
### Using Command Line Flag (Recommended)
|
||||
```bash
|
||||
prowler gcp --gcp-retries-max-attempts 5
|
||||
```
|
||||
|
||||
### Using Configuration File
|
||||
Modify `prowler/providers/gcp/config.py`:
|
||||
```python
|
||||
DEFAULT_RETRY_ATTEMPTS = 5 # Default: 3
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
- **Automatic Detection**: Handles HTTP 429 and quota exceeded errors
|
||||
- **Exponential Backoff**: Each retry uses randomized exponential backoff
|
||||
- **Centralized Config**: All GCP services use the same retry configuration
|
||||
- **Transparent**: No additional code needed in services
|
||||
|
||||
## Error Examples Handled
|
||||
|
||||
```
|
||||
HttpError 429 when requesting https://cloudresourcemanager.googleapis.com/v1/projects/vms-uat-eiger:getIamPolicy?alt=json returned "Quota exceeded for quota metric 'Read requests' and limit 'Read requests per minute'"
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Client-Level Configuration
|
||||
```python
|
||||
from prowler.providers.gcp.config import DEFAULT_RETRY_ATTEMPTS
|
||||
|
||||
client = discovery.build(
|
||||
service, version, credentials=credentials,
|
||||
num_retries=DEFAULT_RETRY_ATTEMPTS
|
||||
)
|
||||
```
|
||||
|
||||
### Request-Level Configuration
|
||||
```python
|
||||
response = request.execute(num_retries=DEFAULT_RETRY_ATTEMPTS)
|
||||
```
|
||||
|
||||
## Services with Retry Support
|
||||
|
||||
All major GCP services are covered:
|
||||
- Cloud Resource Manager, Compute Engine, IAM
|
||||
- BigQuery, KMS, Cloud Storage, Monitoring
|
||||
- DNS, Logging, Cloud SQL, GKE, API Keys, DataProc
|
||||
|
||||
## Validation
|
||||
|
||||
### Debug Logging
|
||||
```bash
|
||||
prowler gcp --log-level DEBUG --log-file debuglogs.txt --project-id your-project-id
|
||||
```
|
||||
|
||||
### Check for Retry Messages
|
||||
```bash
|
||||
grep -i "sleeping\|retry\|quota exceeded" debuglogs.txt
|
||||
```
|
||||
|
||||
### Expected Output
|
||||
```
|
||||
"Sleeping 1.52 seconds before retry 1 of 3"
|
||||
"Sleeping 3.23 seconds before retry 2 of 3"
|
||||
```
|
||||
|
||||
## Testing in Real Environment
|
||||
|
||||
1. **Reduce API Quotas** in GCP Console:
|
||||
- APIs & Services > Quotas
|
||||
- Reduce "Read requests per minute" for Compute Engine API
|
||||
- Reduce "Policy Read Requests per minute" for IAM API
|
||||
|
||||
2. **Run Prowler** with debug logging
|
||||
3. **Monitor logs** for retry messages
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If experiencing rate limiting:
|
||||
1. Use `--gcp-retries-max-attempts` flag to increase attempts
|
||||
2. Request quota increases from Google Cloud support
|
||||
3. Optimize scanning to reduce simultaneous API calls
|
||||
4. Verify retry functionality with debug logging
|
||||
|
||||
## Official References
|
||||
|
||||
- [Google Cloud Python Client Libraries](https://cloud.google.com/python/docs)
|
||||
- [Google Cloud Quotas](https://cloud.google.com/docs/quotas)
|
||||
- [Google API Core Retry](https://googleapis.dev/python/google-api-core/latest/retry.html)
|
||||
@@ -13,6 +13,7 @@ This flexibility allows you to scan and analyze your GitHub account, including r
|
||||
Here are the available login methods and their respective flags:
|
||||
|
||||
### Personal Access Token (PAT)
|
||||
|
||||
Use this method by providing your personal access token directly.
|
||||
|
||||
```console
|
||||
@@ -20,6 +21,7 @@ prowler github --personal-access-token pat
|
||||
```
|
||||
|
||||
### OAuth App Token
|
||||
|
||||
Authenticate using an OAuth app token.
|
||||
|
||||
```console
|
||||
@@ -34,6 +36,7 @@ prowler github --github-app-id app_id --github-app-key-path app_key_path
|
||||
```
|
||||
|
||||
### Automatic Login Method Detection
|
||||
|
||||
If no login method is explicitly provided, Prowler will automatically attempt to authenticate using environment variables in the following order of precedence:
|
||||
|
||||
1. `GITHUB_PERSONAL_ACCESS_TOKEN`
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Getting Started with GitHub Authentication
|
||||
# Getting Started with GitHub
|
||||
|
||||
This guide explains how to set up authentication with GitHub for Prowler. The documentation covers credential retrieval processes for each supported authentication method.
|
||||
|
||||
|
||||