Compare commits

...

1120 Commits

Author SHA1 Message Date
github-actions
158d1c4e27 chore(release): 3.13.0 2024-02-08 13:33:28 +00:00
Sergio Garcia
8ed97810a8 feat(cis): add new CIS AWS v3.0.0 (#3379)
Co-authored-by: pedrooot <pedromarting3@gmail.com>
2024-02-08 13:31:12 +01:00
Sergio Garcia
c5af9605ee fix(alias): allow multiple check aliases (#3378) 2024-02-08 12:21:42 +01:00
Iain Wallace
f5a18dce56 fix(cis): update CIS AWS v2.0 Section 2.1 refs (#3375)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2024-02-08 12:09:49 +01:00
Sergio Garcia
d14d8f5e02 chore(regions_update): Changes in regions for AWS services. (#3377) 2024-02-08 10:42:19 +01:00
Pepe Fagoaga
eadc66f53b fix(allowlist): Handle tags and resources (#3376) 2024-02-08 10:06:02 +01:00
Sergio Garcia
5f946d08cb chore(regions_update): Changes in regions for AWS services. (#3370) 2024-02-07 17:57:29 +01:00
Rubén De la Torre Vico
3f7c37abb9 feat(defender): New Terraform URL for metadata checks (#3374) 2024-02-07 16:02:56 +01:00
Pedro Martín
b60b48b948 feat(Azure): Add 4 new checks related to SQLServer and Vulnerability Assessment (#3372) 2024-02-07 16:01:52 +01:00
Sergio Garcia
68ecf939d9 feat(python): support Python 3.12 (#3371) 2024-02-07 15:16:02 +01:00
Rubén De la Torre Vico
a50d093679 fix(defender): Manage 404 exception for "default" security contacts (#3373) 2024-02-07 13:38:20 +01:00
Rubén De la Torre Vico
740e829e4f feat(azure): Defender check defender_ensure_iot_hub_defender_is_on (#3367) 2024-02-07 12:46:02 +01:00
Pedro Martín
f7051351ec fix(azure): Fix check sqlserver_auditing_retention_90_days (#3365) 2024-02-06 17:17:10 +01:00
dependabot[bot]
a1018ad683 build(deps): bump aiohttp from 3.9.1 to 3.9.2 (#3366)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-02-06 13:52:05 +01:00
dependabot[bot]
a912189e51 build(deps): bump msgraph-core from 0.2.2 to 1.0.0 (#3309)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2024-02-06 13:35:22 +01:00
Sergio Garcia
7298f64e5c fix(s3): add s3:Get* case to s3_bucket_policy_public_write_access (#3364) 2024-02-06 13:04:55 +01:00
Rubén De la Torre Vico
fcf902eb1f feat(azure): Defender checks related to defender settings (#3347)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2024-02-06 12:23:36 +01:00
Sergio Garcia
89c71a068b chore(pre-commit): remove pytest from pre-commit (#3363) 2024-02-06 11:22:00 +01:00
dependabot[bot]
8946145070 build(deps-dev): bump coverage from 7.4.0 to 7.4.1 (#3357)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-02-06 10:50:20 +01:00
Sergio Garcia
db15c0de9e fix(rds): verify SGs in rds_instance_no_public_access (#3341)
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2024-02-06 10:49:58 +01:00
dependabot[bot]
643a918034 build(deps-dev): bump moto from 5.0.0 to 5.0.1 (#3358)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-02-06 10:33:51 +01:00
Sergio Garcia
f21dcd8122 chore(inspector): refactor inspector2_findings_exist check into two (#3338) 2024-02-06 10:32:19 +01:00
dependabot[bot]
ac44d4a27b build(deps-dev): bump black from 22.12.0 to 24.1.1 (#3356)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2024-02-06 10:17:01 +01:00
dependabot[bot]
9c898c34f6 build(deps): bump cryptography from 41.0.6 to 42.0.0 (#3362)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-02-06 10:05:34 +01:00
dependabot[bot]
c0e0ddbc1c build(deps): bump trufflesecurity/trufflehog from 3.66.1 to 3.67.2 (#3361)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-02-06 10:05:14 +01:00
dependabot[bot]
6c756ea52f build(deps): bump codecov/codecov-action from 3 to 4 (#3360)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-02-06 10:04:56 +01:00
dependabot[bot]
0a413b6fd2 build(deps): bump peter-evans/create-pull-request from 5 to 6 (#3359)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-02-06 09:58:55 +01:00
dependabot[bot]
7ac7d9c9a8 build(deps): bump google-api-python-client from 2.113.0 to 2.116.0 (#3355)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-02-06 09:58:28 +01:00
Toni de la Fuente
7322d0bd30 chore(docs): Update README.md (#3353)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2024-02-05 17:52:46 +01:00
Pedro Martín
469cc749d8 feat(readme): Update readme with new numbers for Prowler checks (#3354) 2024-02-05 17:49:43 +01:00
Toni de la Fuente
e91a694b46 chore(docs): update CODE_OF_CONDUCT.md (#3352) 2024-02-05 17:27:12 +01:00
Pedro Martín
4587a9f651 refactor(azure): Change class names from azure services and fix typing error (#3350) 2024-02-05 15:43:04 +01:00
Rubén De la Torre Vico
8c51094df1 fix(storage) Manage None type manage for key_expiration_period_in_days (#3351) 2024-02-05 15:42:03 +01:00
Rubén De la Torre Vico
c795d76fe9 feat(azure): Defender checks related to security contacts and notifications (#3344) 2024-02-05 13:51:56 +01:00
Pepe Fagoaga
c6e8a0b6d3 fix(organizations): Handle non existent policy (#3319) 2024-02-05 12:37:08 +01:00
dependabot[bot]
b23be4164f build(deps-dev): bump moto from 4.2.13 to 5.0.0 (#3329)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2024-02-05 12:06:16 +01:00
Pedro Martín
de77f3ff13 feat(azure): new check sqlserver_vulnerability_assessment_enabled (#3349) 2024-02-05 11:39:05 +01:00
Pedro Martín
7c0ff1ff6a feat(azure): New Azure SQLServer related check sqlserver_auditing_retention_90_days (#3345) 2024-02-05 10:58:44 +01:00
Sergio Garcia
888cb92987 chore(regions_update): Changes in regions for AWS services. (#3342)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2024-02-05 09:37:02 +01:00
Sergio Garcia
9a038f7bed chore(regions_update): Changes in regions for AWS services. (#3348)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2024-02-05 09:36:48 +01:00
Sergio Garcia
b98f245bf2 chore(regions_update): Changes in regions for AWS services. (#3339)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-02-05 09:20:26 +01:00
Sergio Garcia
e59b5caaf9 chore(regions_update): Changes in regions for AWS services. (#3333)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-02-05 09:20:09 +01:00
Sergio Garcia
5a602d7adb chore(regions_update): Changes in regions for AWS services. (#3325)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-02-05 09:18:49 +01:00
Pedro Martín
14aa7a3f67 feat(azure): SQLServer checks related to TDE encryption (#3343) 2024-02-02 11:35:18 +01:00
Pedro Martín
6e991107e7 feat(azure): New check storage_ensure_soft_delete_is_enabled (#3334) 2024-01-31 13:29:20 +01:00
Rubén De la Torre Vico
622bce9c52 feat(azure): Add check defender_ensure_system_updates_are_applied and defender_auto_provisioning_vulnerabilty_assessments_machines_on (#3327) 2024-01-31 12:29:45 +01:00
Pedro Martín
48587bd034 feat(compliance): account security onboarding compliance framework (#3286)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2024-01-31 10:18:31 +01:00
Rubén De la Torre Vico
19d6352950 fix(GuardDuty): fix class name (#3337) 2024-01-30 14:43:55 +01:00
dependabot[bot]
2c4b5c99ce build(deps): bump mkdocs-material from 9.5.4 to 9.5.6 (#3330)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-30 10:59:15 +01:00
dependabot[bot]
15a194c9b0 build(deps-dev): bump pytest from 7.4.4 to 8.0.0 (#3331)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-30 10:15:07 +01:00
dependabot[bot]
e94e3cead9 build(deps): bump trufflesecurity/trufflehog from 3.63.11 to 3.66.1 (#3332)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-30 10:14:51 +01:00
dependabot[bot]
ee2ed92fb5 build(deps-dev): bump vulture from 2.10 to 2.11 (#3328)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-30 09:46:17 +01:00
Pedro Martín
db4579435a feat(azure): add new check storage_ensure_private_endpoints_in_storage_accounts (#3326)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2024-01-29 13:55:19 +01:00
Pedro Martín
ae1ab1d957 feat(azure): Add new check storage_key_rotation_90_days (#3323) 2024-01-29 12:57:19 +01:00
Rubén De la Torre Vico
a8edd03e65 feat(azure): Add check defender_auto_provisioning_log_analytics_agent_vms_on (#3322)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2024-01-29 11:02:49 +01:00
Pepe Fagoaga
8768b4cc31 chore(actions): Add AWS tag to the update regions bot (#3321) 2024-01-29 10:15:16 +01:00
Pedro Martín
cd9c192208 chore(azure): Remove all unnecessary init methods in @dataclass (#3324) 2024-01-26 13:15:42 +01:00
Sergio Garcia
dcd97e7d26 chore(regions_update): Changes in regions for AWS services. (#3320)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-01-26 10:50:14 +01:00
Pedro Martín
8a6ae68b9a feat(azure): Add new check "iam_custom_role_permits_administering_resource_locks" (#3317)
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2024-01-25 14:29:29 +01:00
Sergio Garcia
dff3e72e7d chore(regions_update): Changes in regions for AWS services. (#3318)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-01-25 14:14:27 +01:00
Sergio Garcia
f0ac440146 chore(regions_update): Changes in regions for AWS services. (#3316)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-01-24 11:57:11 +01:00
dependabot[bot]
7d7e5f4e1d build(deps): bump azure-mgmt-security from 5.0.0 to 6.0.0 (#3312)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-23 13:55:28 +01:00
Antoine Ansari
a21dd4a2ed feat(quick-inventory): custom output file in quick inventory (#3306)
Co-authored-by: antoinea <antoinea@padok.fr>
2024-01-23 10:05:45 +01:00
dependabot[bot]
7f4e5bf435 build(deps-dev): bump safety from 2.3.5 to 3.0.1 (#3313)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-23 09:13:19 +01:00
dependabot[bot]
dad590f070 build(deps): bump pydantic from 1.10.13 to 1.10.14 (#3311)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-23 08:57:26 +01:00
dependabot[bot]
f22b81fe3b build(deps): bump trufflesecurity/trufflehog from 3.63.9 to 3.63.11 (#3307)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-23 08:40:55 +01:00
dependabot[bot]
68c1acbc7a build(deps): bump tj-actions/changed-files from 41 to 42 (#3308)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-23 08:40:37 +01:00
dependabot[bot]
e5412404ca build(deps): bump jsonschema from 4.20.0 to 4.21.1 (#3310)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-23 08:40:13 +01:00
Sergio Garcia
5e733f6217 chore(regions_update): Changes in regions for AWS services. (#3303)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-01-22 09:23:39 +01:00
Pepe Fagoaga
c830e4e399 docs(security-hub): Add integration steps and images (#3304) 2024-01-22 09:13:24 +01:00
Pepe Fagoaga
c3ecd2b3e5 docs(security-hub): improve documentation and clarify steps (#3301) 2024-01-18 13:55:07 +01:00
Sergio Garcia
fd4d2db467 fix(BadRequest): add BadRequest exception to WellArchitected (#3300) 2024-01-18 10:42:27 +01:00
Sergio Garcia
49b76ab050 chore(docs): update documentation (#3297) 2024-01-18 10:40:06 +01:00
Sergio Garcia
c53f931d09 fix(NoSuchEntity): add NoSuchEntity exception to IAM (#3299) 2024-01-18 10:39:09 +01:00
Sergio Garcia
f344dbbc07 chore(regions_update): Changes in regions for AWS services. (#3298)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-01-18 10:35:23 +01:00
Esteban Mendoza
c617c10ffa fix(acm): adding more details on remaining expiration days (#3293)
Co-authored-by: Esteban <mendoza@versprite.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2024-01-17 09:42:19 +01:00
Sergio Garcia
4a15625bf9 chore(compliance): make SocType attribute general (#3287) 2024-01-16 13:41:08 +01:00
dependabot[bot]
c5def6d736 build(deps): bump mkdocs-material from 9.5.3 to 9.5.4 (#3285)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-16 08:07:11 +01:00
dependabot[bot]
b232b675a7 build(deps): bump actions/checkout from 3 to 4 (#3284)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-16 08:06:54 +01:00
dependabot[bot]
6c03683c20 build(deps): bump peter-evans/create-pull-request from 4 to 5 (#3283)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-16 08:06:37 +01:00
dependabot[bot]
2da57db5a8 build(deps): bump docker/login-action from 2 to 3 (#3282)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-16 08:05:59 +01:00
dependabot[bot]
c7b794c1c4 build(deps): bump docker/build-push-action from 2 to 5 (#3281)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-16 08:05:44 +01:00
dependabot[bot]
5154cec7d2 build(deps): bump slack-sdk from 3.26.1 to 3.26.2 (#3280)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-15 11:44:57 +01:00
dependabot[bot]
e4cbb3c90e build(deps): bump actions/setup-python from 2 to 5 (#3277)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-15 11:36:01 +01:00
dependabot[bot]
17f5cbeac2 build(deps): bump docker/setup-buildx-action from 2 to 3 (#3276)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-15 11:30:25 +01:00
dependabot[bot]
90a4924508 build(deps): bump github/codeql-action from 2 to 3 (#3279)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-15 11:29:58 +01:00
dependabot[bot]
d499053016 build(deps): bump aws-actions/configure-aws-credentials from 1 to 4 (#3278)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-15 11:29:39 +01:00
dependabot[bot]
d343a67d6a build(deps): bump trufflesecurity/trufflehog from 3.4.4 to 3.63.9 (#3275)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-15 11:29:30 +01:00
Pepe Fagoaga
8435ab48b0 chore(dependabot): Run for GHA (#3274) 2024-01-15 11:19:44 +01:00
Sergio Garcia
27edf0f55a chore(regions_update): Changes in regions for AWS services. (#3273)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-01-15 10:53:55 +01:00
Sergio Garcia
3d00554332 chore(README): update syntax of supported Python versions (#3271) 2024-01-12 12:59:56 +01:00
Toni de la Fuente
2631709abf docs(README): Update Kubernetes development status and Python supported versions (#3270) 2024-01-12 12:17:06 +01:00
Sergio Garcia
4b0102b309 chore(release): update Prowler Version to 3.12.1 (#3269)
Co-authored-by: github-actions <noreply@github.com>
2024-01-12 11:52:02 +01:00
Nacho Rivera
b9a24e0338 fix(fms): handle list compliance status error (#3259) 2024-01-12 11:00:07 +01:00
Sergio Garcia
f127d4a8b1 chore(regions_update): Changes in regions for AWS services. (#3268)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-01-12 10:15:16 +01:00
Pepe Fagoaga
73780682a1 fix(allowlist): Handle empty exceptions (#3266) 2024-01-12 09:54:03 +01:00
dependabot[bot]
9a1c034a51 build(deps): bump jinja2 from 3.1.2 to 3.1.3 (#3267)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-12 08:31:37 +01:00
Sergio Garcia
94179f27ec chore(readme): remove deprecated library name (#3251) 2024-01-11 17:55:44 +01:00
Pepe Fagoaga
6797b5a93d fix(apigatewayv2_api_access_logging_enabled): Finding ID should be unique (#3263) 2024-01-11 15:15:48 +01:00
Nacho Rivera
874a131ec9 chore(precommit): set trufflehog as command (#3262) 2024-01-11 11:47:19 +01:00
Nacho Rivera
641727ee0e fix(rds): handle api call error response (#3258) 2024-01-11 09:50:44 +01:00
dependabot[bot]
f50075257c build(deps-dev): bump gitpython from 3.1.37 to 3.1.41 (#3257) 2024-01-11 09:50:16 +01:00
Sergio Garcia
4d1de8f75c chore(regions_update): Changes in regions for AWS services. (#3256)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-01-10 10:20:50 +01:00
Pepe Fagoaga
b76d0153eb chore(s3): Update log not to duplicate it (#3255) 2024-01-10 10:00:02 +01:00
Sergio Garcia
f82789b99f chore(regions_update): Changes in regions for AWS services. (#3249)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-01-09 10:31:05 +01:00
dependabot[bot]
89c789ce10 build(deps-dev): bump flake8 from 6.1.0 to 7.0.0 (#3246)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-09 10:02:54 +01:00
Pepe Fagoaga
6dba54b028 docs: Add Codecov badge (#3248) 2024-01-09 09:54:30 +01:00
dependabot[bot]
d852cb4ed6 build(deps): bump google-api-python-client from 2.111.0 to 2.113.0 (#3245)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-09 09:44:47 +01:00
dependabot[bot]
4c666fa1fe build(deps-dev): bump moto from 4.2.12 to 4.2.13 (#3244)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-09 09:01:42 +01:00
Sergio Garcia
98adc1872d chore(release): update Prowler Version to 3.12.0 (#3242)
Co-authored-by: github-actions <noreply@github.com>
2024-01-08 15:05:17 +01:00
Sergio Garcia
1df84ef6e4 chore(role arguments): enhance role arguments validation (#3240) 2024-01-08 14:41:52 +01:00
Sergio Garcia
80b88a9365 chore(exception): handle error in describing regions (#3241) 2024-01-08 14:16:27 +01:00
Fennerr
558b7a54c7 feat(aws): Added AWS role session name parameter (#3234)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2024-01-08 12:49:13 +01:00
Sergio Garcia
9522d0c733 fix(organizations_scp_check_deny_regions): enhance check logic (#3239) 2024-01-08 12:20:39 +01:00
dependabot[bot]
396d6e5c0e build(deps-dev): bump coverage from 7.3.4 to 7.4.0 (#3233)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-03 18:21:12 +01:00
Sergio Garcia
a69d7471b3 chore(regions_update): Changes in regions for AWS services. (#3236)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2024-01-03 13:34:14 +01:00
dependabot[bot]
eb56e1417c build(deps-dev): bump pytest from 7.4.3 to 7.4.4 (#3232)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-03 13:33:48 +01:00
dependabot[bot]
3d032a8efe build(deps): bump tj-actions/changed-files from 39 to 41 in /.github/workflows (#3235)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-03 13:30:21 +01:00
Sergio Garcia
d712470047 chore(regions_update): Changes in regions for AWS services. (#3231) 2023-12-29 10:56:24 +01:00
Pepe Fagoaga
423f96b95f fix(fms): Handle PolicyComplianceStatusList key error (#3230)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-12-28 18:25:21 +01:00
Sergio Garcia
d1bd097079 chore(regions_update): Changes in regions for AWS services. (#3228)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-12-28 10:24:10 +01:00
Evgenii
ceabe8ecba chore: сhanged concatenation of strings to f-strings to improve readability (#3227) 2023-12-28 08:51:00 +01:00
Pepe Fagoaga
0fff0568fa fix(allowlist): Analyse single and multi account allowlist if present (#3210)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-12-27 11:02:31 +01:00
dependabot[bot]
10e822238e build(deps): bump google-api-python-client from 2.110.0 to 2.111.0 (#3224)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-26 10:26:13 +01:00
dependabot[bot]
1cf1c827f1 build(deps-dev): bump freezegun from 1.3.1 to 1.4.0 (#3222)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-26 09:33:12 +01:00
dependabot[bot]
5bada440fa build(deps-dev): bump coverage from 7.3.3 to 7.3.4 (#3223)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-26 09:09:29 +01:00
Sergio Garcia
04bb95e044 chore(ENS): add missing ENS mappings (#3218) 2023-12-26 09:08:54 +01:00
dependabot[bot]
819140bc59 build(deps): bump shodan from 1.30.1 to 1.31.0 (#3221)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-26 08:54:01 +01:00
Sergio Garcia
d490bcc955 chore(regions_update): Changes in regions for AWS services. (#3219)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-12-26 08:49:41 +01:00
dependabot[bot]
cb94960178 build(deps): bump mkdocs-material from 9.5.2 to 9.5.3 (#3220)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-26 08:39:31 +01:00
Sergio Garcia
7361c10cb9 fix(s3): handle NoSuchBucketPolicy error (#3217) 2023-12-22 10:57:55 +01:00
Sergio Garcia
b47408e94e fix(trustedadvisor): solve trustedadvisor check metadata (#3216) 2023-12-22 10:56:21 +01:00
Sergio Garcia
806a3590aa chore(regions_update): Changes in regions for AWS services. (#3215)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-12-22 10:49:04 +01:00
Sergio Garcia
e953fe021d chore(regions_update): Changes in regions for AWS services. (#3214) 2023-12-21 11:34:33 +01:00
Sergio Garcia
e570d94a6e chore(regions_update): Changes in regions for AWS services. (#3213)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-12-20 14:08:52 +01:00
Nacho Rivera
78505cb0a8 chore(sqs_...not_publicly_accessible): less restrictive condition test (#3211)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-12-19 16:53:19 +01:00
dependabot[bot]
f8d77d9a30 build(deps): bump google-auth-httplib2 from 0.1.1 to 0.2.0 (#3207)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-19 13:05:30 +01:00
Sergio Garcia
1a4887f028 chore(regions_update): Changes in regions for AWS services. (#3209)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-12-19 12:39:19 +01:00
dependabot[bot]
71042b5919 build(deps): bump mkdocs-material from 9.4.14 to 9.5.2 (#3206)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-19 12:39:10 +01:00
dependabot[bot]
435976800a build(deps-dev): bump moto from 4.2.11 to 4.2.12 (#3205)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-19 10:14:04 +01:00
dependabot[bot]
18f4c7205b build(deps-dev): bump coverage from 7.3.2 to 7.3.3 (#3204)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-19 08:55:14 +01:00
dependabot[bot]
06eeefb8bf build(deps-dev): bump pylint from 3.0.2 to 3.0.3 (#3203)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-19 08:30:45 +01:00
Sergio Garcia
1737d7cf42 fix(gcp): fix UnknownApiNameOrVersion error (#3202) 2023-12-18 14:32:33 +01:00
dependabot[bot]
cd03fa6d46 build(deps): bump jsonschema from 4.18.0 to 4.20.0 (#3057)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-12-18 13:00:43 +01:00
Sergio Garcia
a10a73962e chore(regions_update): Changes in regions for AWS services. (#3200) 2023-12-18 07:21:18 +01:00
Pepe Fagoaga
99d6fee7a0 fix(iam): Handle NoSuchEntity in list_group_policies (#3197) 2023-12-15 14:04:59 +01:00
Nacho Rivera
c8831f0f50 chore(s3 bucket input validation): validates input bucket (#3198) 2023-12-15 13:37:41 +01:00
Pepe Fagoaga
fdeb523581 feat(securityhub): Send only FAILs but storing all in the output files (#3195) 2023-12-15 13:31:55 +01:00
Sergio Garcia
9a868464ee chore(regions_update): Changes in regions for AWS services. (#3196)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-12-15 10:15:54 +01:00
Alexandros Gidarakos
051ec75e01 docs(cloudshell): Update AWS CloudShell installation steps (#3192)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-12-14 08:35:23 +01:00
Alexandros Gidarakos
fc3909491a docs(cloudshell): Add missing steps to workaround (#3191) 2023-12-14 08:18:24 +01:00
Pepe Fagoaga
2437fe270c docs(cloudshell): Add workaround to clone from github (#3190) 2023-12-13 17:19:30 +01:00
Nacho Rivera
c937b193d0 fix(apigw_restapi_auth check): add method auth testing (#3183) 2023-12-13 16:20:09 +01:00
Fennerr
8b5c995486 fix(lambda): memory leakage with lambda function code (#3167)
Co-authored-by: Justin Moorcroft <justin.moorcroft@mwrcybersec.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-12-13 15:15:13 +01:00
Sergio Garcia
4410f2a582 chore(regions_update): Changes in regions for AWS services. (#3189)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-12-13 10:32:10 +01:00
Fennerr
bbb816868e docs(aws): Added debug information to inspect retries in API calls (#3186)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-12-12 14:07:33 +01:00
Fennerr
2441cca810 fix(threading): Improved threading for the AWS Service (#3175)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-12-12 12:50:26 +01:00
Sergio Garcia
3c3dfb380b fix(gcp): improve logging messages (#3185) 2023-12-12 12:38:50 +01:00
Nacho Rivera
0f165f0bf0 chore(actions): add prowler 4.0 branch to actions (#3184) 2023-12-12 11:40:01 +01:00
Sergio Garcia
7fcff548eb chore(regions_update): Changes in regions for AWS services. (#3182)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-12-12 10:28:01 +01:00
dependabot[bot]
8fa7b9ba00 build(deps-dev): bump docker from 6.1.3 to 7.0.0 (#3180)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-12 10:27:49 +01:00
dependabot[bot]
b101e15985 build(deps-dev): bump bandit from 1.7.5 to 1.7.6 (#3179)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-12 09:53:03 +01:00
dependabot[bot]
b4e412a37f build(deps-dev): bump pylint from 3.0.2 to 3.0.3 (#3181)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-12 09:33:27 +01:00
dependabot[bot]
ac0e2bbdb2 build(deps): bump google-api-python-client from 2.109.0 to 2.110.0 (#3178)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-12 08:07:30 +01:00
Sergio Garcia
ba16330e20 feat(cognito): add Amazon Cognito service (#3060) 2023-12-11 14:35:00 +01:00
Pepe Fagoaga
c9cb9774c6 fix(aws_regions): Get enabled regions (#3095) 2023-12-11 14:09:39 +01:00
Pepe Fagoaga
7b5b14dbd0 refactor(cloudwatch): simplify logic (#3172) 2023-12-11 11:23:24 +01:00
Fennerr
bd13973cf5 docs(parallel-execution): Combining the output files (#3096) 2023-12-11 11:11:53 +01:00
Fennerr
a7f8656e89 chore(elb): Improve status in elbv2_insecure_ssl_ciphers (#3169) 2023-12-11 11:04:37 +01:00
Sergio Garcia
1be52fab06 chore(ens): do not apply recomendation type to score (#3058) 2023-12-11 10:53:26 +01:00
Pepe Fagoaga
c9baff1a7f fix(generate_regional_clients): Global is not needed anymore (#3162) 2023-12-11 10:50:15 +01:00
Pepe Fagoaga
d1bc68086d fix(access-analyzer): Handle ValidationException (#3165) 2023-12-11 09:40:12 +01:00
Pepe Fagoaga
44a4c0670b fix(cloudtrail): Handle UnsupportedOperationException (#3166) 2023-12-11 09:38:23 +01:00
Pepe Fagoaga
4785056740 fix(elasticache): Handle CacheClusterNotFound (#3174) 2023-12-11 09:37:01 +01:00
Pepe Fagoaga
694aa448a4 fix(s3): Handle NoSuchBucket in the service (#3173) 2023-12-11 09:36:26 +01:00
Sergio Garcia
ee215b1ced chore(regions_update): Changes in regions for AWS services. (#3168) 2023-12-11 08:04:48 +01:00
Nacho Rivera
018e87884c test(audit_info): missing workspace test (#3164) 2023-12-05 16:05:39 +01:00
Nacho Rivera
a81cbbc325 test(audit_info): refactor iam (#3163) 2023-12-05 15:59:53 +01:00
Pepe Fagoaga
3962c9d816 test(audit_info): refactor acm, account and access analyzer (#3097) 2023-12-05 15:09:14 +01:00
Pepe Fagoaga
e187875da5 test(audit_info): refactor guardduty (#3160) 2023-12-05 15:00:46 +01:00
Pepe Fagoaga
f0d1a799a2 test(audit_info): refactor cloudtrail (#3111) 2023-12-05 14:59:42 +01:00
Pepe Fagoaga
5452d535d7 test(audit_info): refactor ec2 (#3132) 2023-12-05 14:58:58 +01:00
Pepe Fagoaga
7a776532a8 test(aws_account_id): refactor (#3161) 2023-12-05 14:58:42 +01:00
Nacho Rivera
e704d57957 test(audit_info): refactor inspector2 (#3159) 2023-12-05 14:19:40 +01:00
Pepe Fagoaga
c9a6eb5a1a test(audit_info): refactor globalaccelerator (#3154) 2023-12-05 14:13:02 +01:00
Pepe Fagoaga
c071812160 test(audit_info): refactor glue (#3158) 2023-12-05 14:12:44 +01:00
Pepe Fagoaga
3f95ad9ada test(audit_info): refactor glacier (#3153) 2023-12-05 14:09:04 +01:00
Nacho Rivera
250f59c9f5 test(audit_info): refactor kms (#3157) 2023-12-05 14:05:56 +01:00
Nacho Rivera
c17bbea2c7 test(audit_info): refactor macie (#3156) 2023-12-05 13:59:08 +01:00
Nacho Rivera
0262f8757a test(audit_info): refactor neptune (#3155) 2023-12-05 13:48:32 +01:00
Nacho Rivera
dbc2c481dc test(audit_info): refactor networkfirewall (#3152) 2023-12-05 13:20:52 +01:00
Pepe Fagoaga
e432c39eec test(audit_info): refactor fms (#3151) 2023-12-05 13:18:28 +01:00
Pepe Fagoaga
7383ae4f9c test(audit_info): refactor elbv2 (#3148) 2023-12-05 13:18:06 +01:00
Pepe Fagoaga
d217e33678 test(audit_info): refactor emr (#3149) 2023-12-05 13:17:42 +01:00
Nacho Rivera
d1daceff91 test(audit_info): refactor opensearch (#3150) 2023-12-05 13:17:28 +01:00
Nacho Rivera
dbbd556830 test(audit_info): refactor organizations (#3147) 2023-12-05 12:59:22 +01:00
Nacho Rivera
d483f1d90f test(audit_info): refactor rds (#3146) 2023-12-05 12:51:22 +01:00
Nacho Rivera
80684a998f test(audit_info): refactor redshift (#3144) 2023-12-05 12:42:08 +01:00
Pepe Fagoaga
0c4f0fde48 test(audit_info): refactor elb (#3145) 2023-12-05 12:41:37 +01:00
Pepe Fagoaga
071115cd52 test(audit_info): refactor elasticache (#3142) 2023-12-05 12:41:11 +01:00
Nacho Rivera
9136a755fe test(audit_info): refactor resourceexplorer2 (#3143) 2023-12-05 12:28:38 +01:00
Nacho Rivera
6ff864fc04 test(audit_info): refactor route53 (#3141) 2023-12-05 12:28:12 +01:00
Nacho Rivera
828a6f4696 test(audit_info): refactor s3 (#3140) 2023-12-05 12:13:21 +01:00
Pepe Fagoaga
417aa550a6 test(audit_info): refactor eks (#3139) 2023-12-05 12:07:41 +01:00
Pepe Fagoaga
78ffc2e238 test(audit_info): refactor efs (#3138) 2023-12-05 12:07:21 +01:00
Pepe Fagoaga
c9f22db1b5 test(audit_info): refactor ecs (#3137) 2023-12-05 12:07:01 +01:00
Pepe Fagoaga
41da560b64 test(audit_info): refactor ecr (#3136) 2023-12-05 12:06:42 +01:00
Nacho Rivera
b49e0b95f7 test(audit_info): refactor shield (#3131) 2023-12-05 11:40:42 +01:00
Nacho Rivera
50ef2729e6 test(audit_info): refactor sagemaker (#3135) 2023-12-05 11:40:19 +01:00
Nacho Rivera
6a901bb7de test(audit_info): refactor secretsmanager (#3134) 2023-12-05 11:33:54 +01:00
Nacho Rivera
f0da63c850 test(audit_info): refactor shub (#3133) 2023-12-05 11:33:34 +01:00
Nacho Rivera
b861c1dd3c test(audit_info): refactor sns (#3128) 2023-12-05 11:05:27 +01:00
Nacho Rivera
45faa2e9e8 test(audit_info): refactor sqs (#3130) 2023-12-05 11:05:05 +01:00
Pepe Fagoaga
b2e1eed684 test(audit_info): refactor dynamodb (#3129) 2023-12-05 10:59:26 +01:00
Pepe Fagoaga
4018221da6 test(audit_info): refactor drs (#3127) 2023-12-05 10:59:09 +01:00
Pepe Fagoaga
28ec3886f9 test(audit_info): refactor documentdb (#3126) 2023-12-05 10:58:48 +01:00
Pepe Fagoaga
ed323f4602 test(audit_info): refactor dlm (#3124) 2023-12-05 10:58:31 +01:00
Pepe Fagoaga
f72d360384 test(audit_info): refactor directoryservice (#3123) 2023-12-05 10:58:09 +01:00
Nacho Rivera
682bba452b test(audit_info): refactor ssm (#3125) 2023-12-05 10:45:15 +01:00
Nacho Rivera
e2ce5ae2af test(audit_info): refactor ssmincidents (#3122) 2023-12-05 10:38:09 +01:00
Nacho Rivera
039a0da69e tests(audit_info): refactor trustedadvisor (#3120) 2023-12-05 10:30:54 +01:00
Pepe Fagoaga
c9ad12b87e test(audit_info): refactor config (#3121) 2023-12-05 10:30:13 +01:00
Pepe Fagoaga
094be2e2e6 test(audit_info): refactor codeartifact (#3117) 2023-12-05 10:17:08 +01:00
Pepe Fagoaga
1b3029d833 test(audit_info): refactor codebuild (#3118) 2023-12-05 10:17:02 +01:00
Nacho Rivera
d00d5e863b tests(audit_info): refactor vpc (#3119) 2023-12-05 10:16:51 +01:00
Pepe Fagoaga
3d19e89710 test(audit_info): refactor cloudwatch (#3116) 2023-12-05 10:04:45 +01:00
Pepe Fagoaga
247cd6fc44 test(audit_info): refactor cloudfront (#3110) 2023-12-05 10:04:07 +01:00
Pepe Fagoaga
ba244c887f test(audit_info): refactor cloudformation (#3105) 2023-12-05 10:03:50 +01:00
Pepe Fagoaga
f77d92492a test(audit_info): refactor backup (#3104) 2023-12-05 10:03:32 +01:00
Pepe Fagoaga
1b85af95c0 test(audit_info): refactor athena (#3101) 2023-12-05 10:03:11 +01:00
Pepe Fagoaga
9236f5d058 test(audit_info): refactor autoscaling (#3102) 2023-12-05 10:02:54 +01:00
dependabot[bot]
39ba8cd230 build(deps-dev): bump freezegun from 1.2.2 to 1.3.1 (#3109)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-05 09:51:57 +01:00
Nacho Rivera
e67328945f test(audit_info): refactor waf (#3115) 2023-12-05 09:51:37 +01:00
Nacho Rivera
bcee2b0b6d test(audit_info): refactor wafv2 (#3114) 2023-12-05 09:51:20 +01:00
Nacho Rivera
be9a1b2f9a test(audit_info): refactor wellarchitected (#3113) 2023-12-05 09:40:31 +01:00
dependabot[bot]
4f9c2aadc2 build(deps-dev): bump moto from 4.2.10 to 4.2.11 (#3108)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-05 09:34:13 +01:00
Pepe Fagoaga
25d419ac7f test(audit_info): refactor appstream (#3100) 2023-12-05 09:33:53 +01:00
Pepe Fagoaga
57cfb508f1 test(audit_info): refactor apigateway (#3098) 2023-12-05 09:33:20 +01:00
Pepe Fagoaga
c88445f90d test(audit_info): refactor apigatewayv2 (#3099) 2023-12-05 09:32:31 +01:00
Nacho Rivera
9b6d6c3a42 test(audit_info): refactor workspaces (#3112) 2023-12-05 09:32:13 +01:00
Pepe Fagoaga
d26c1405ce test(audit_info): refactor awslambda (#3103) 2023-12-05 09:18:23 +01:00
dependabot[bot]
4bb35ab92d build(deps): bump slack-sdk from 3.26.0 to 3.26.1 (#3107)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-05 08:39:26 +01:00
dependabot[bot]
cdd983aa04 build(deps): bump google-api-python-client from 2.108.0 to 2.109.0 (#3106)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-05 08:12:57 +01:00
Nacho Rivera
e83ce86eb3 fix(docs): typo in reporting/csv (#3094) 2023-12-04 10:20:57 +01:00
Nacho Rivera
bcc590a3ee chore(actions): not launch linters for mkdocs.yml (#3093) 2023-12-04 09:57:18 +01:00
Fennerr
5fdffb93d1 docs(parallel-execution): How to execute it in parallel (#3091)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-12-04 09:48:46 +01:00
Nacho Rivera
db20b2c04f fix(docs): csv fields (#3092)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-12-04 09:46:20 +01:00
Nacho Rivera
4e037c0f43 fix(send_to_s3_bucket): don't kill exec when fail (#3088) 2023-12-01 13:25:59 +01:00
Nacho Rivera
fdcc2ac5cb revert(clean local dirs): delete clean local dirs output feature (#3087) 2023-12-01 12:26:59 +01:00
William
9099bd79f8 fix(vpc_different_regions): Handle if there are no VPC (#3081)
Co-authored-by: William Brady <will@crofton.cloud>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-12-01 11:44:23 +01:00
Pepe Fagoaga
a01683d8f6 refactor(severities): Define it in one place (#3086) 2023-12-01 11:39:35 +01:00
Pepe Fagoaga
6d2b2a9a93 refactor(load_checks_to_execute): Refactor function and add tests (#3066) 2023-11-30 17:41:14 +01:00
Sergio Garcia
de4166bf0d chore(regions_update): Changes in regions for AWS services. (#3079) 2023-11-29 11:21:06 +01:00
dependabot[bot]
1cbef30788 build(deps): bump cryptography from 41.0.4 to 41.0.6 (#3078)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-29 08:17:34 +01:00
Nacho Rivera
89c6e27489 fix(trustedadvisor): handle missing checks dict key (#3075) 2023-11-28 10:37:24 +01:00
Sergio Garcia
f74ffc530d chore(regions_update): Changes in regions for AWS services. (#3074)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-28 10:22:29 +01:00
dependabot[bot]
441d4d6a38 build(deps-dev): bump moto from 4.2.9 to 4.2.10 (#3073)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-28 09:57:56 +01:00
dependabot[bot]
3c6b9d63a6 build(deps): bump slack-sdk from 3.24.0 to 3.26.0 (#3072) 2023-11-28 09:21:46 +01:00
dependabot[bot]
254d8616b7 build(deps-dev): bump pytest-xdist from 3.4.0 to 3.5.0 (#3071) 2023-11-28 09:06:23 +01:00
dependabot[bot]
d3bc6fda74 build(deps): bump mkdocs-material from 9.4.10 to 9.4.14 (#3070)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-28 08:46:49 +01:00
Nacho Rivera
e4a5d9376f fix(clean local output dirs): change function description (#3068)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-11-27 14:55:34 +01:00
Nacho Rivera
523605e3e7 fix(set_azure_audit_info): assign correct logging when no auth (#3063) 2023-11-27 11:00:22 +01:00
Nacho Rivera
ed33fac337 fix(gcp provider): move generate_client for consistency (#3064) 2023-11-27 10:31:40 +01:00
Sergio Garcia
bf0e62aca5 chore(regions_update): Changes in regions for AWS services. (#3065)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-27 10:30:12 +01:00
Nacho Rivera
60c0b79b10 fix(outputs): initialize_file_descriptor is called dynamically (#3050) 2023-11-21 16:05:26 +01:00
Sergio Garcia
f9d2e7aa93 chore(regions_update): Changes in regions for AWS services. (#3059)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-21 11:07:08 +01:00
dependabot[bot]
0646748e24 build(deps): bump google-api-python-client from 2.107.0 to 2.108.0 (#3056)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-21 09:31:25 +01:00
dependabot[bot]
f6408e9df7 build(deps-dev): bump moto from 4.2.8 to 4.2.9 (#3055)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-21 08:14:00 +01:00
dependabot[bot]
5769bc815c build(deps): bump mkdocs-material from 9.4.8 to 9.4.10 (#3054)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-21 07:51:27 +01:00
dependabot[bot]
5a3e3e9b1f build(deps): bump slack-sdk from 3.23.0 to 3.24.0 (#3053) 2023-11-21 07:31:15 +01:00
Pepe Fagoaga
26cbafa204 fix(deps): Add missing jsonschema (#3052) 2023-11-20 18:41:39 +01:00
Sergio Garcia
d14541d1de fix(json-ocsf): add profile only for AWS provider (#3051) 2023-11-20 17:00:36 +01:00
Sergio Garcia
3955ebd56c chore(python): update python version constraint <3.12 (#3047) 2023-11-20 14:49:09 +01:00
Ignacio Dominguez
e212645cf0 fix(codeartifact): solve dependency confusion check (#2999)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-11-20 14:48:46 +01:00
Sergio Garcia
db9c1c24d3 chore(moto): install all moto dependencies (#3048) 2023-11-20 13:44:53 +01:00
Vajrala Venkateswarlu
0a305c281f feat(custom_checks_metadata): Add checks metadata overide for severity (#3038)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-11-20 10:44:47 +01:00
Sergio Garcia
43c96a7875 chore(regions_update): Changes in regions for AWS services. (#3045)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-20 10:15:32 +01:00
Sergio Garcia
3a93aba7d7 chore(release): update Prowler Version to 3.11.3 (#3044)
Co-authored-by: github-actions <noreply@github.com>
2023-11-16 17:07:14 +01:00
Sergio Garcia
3d563356e5 fix(json): check if profile is None (#3043) 2023-11-16 13:52:07 +01:00
Johnny Lu
9205ef30f8 fix(securityhub): findings not being imported or archived in non-aws partitions (#3040)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-11-16 11:27:28 +01:00
Sergio Garcia
19c2dccc6d chore(regions_update): Changes in regions for AWS services. (#3042)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-16 11:09:41 +01:00
Sergio Garcia
8f819048ed chore(release): update Prowler Version to 3.11.2 (#3037)
Co-authored-by: github-actions <noreply@github.com>
2023-11-15 09:07:57 +01:00
Sergio Garcia
3a3bb44f11 fix(GuardDuty): only execute checks if GuardDuty enabled (#3028) 2023-11-14 14:14:05 +01:00
Nacho Rivera
f8e713a544 feat(azure regions): support non default azure region (#3013)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-11-14 13:17:48 +01:00
Pepe Fagoaga
573f1eba56 fix(securityhub): Use enabled_regions instead of audited_regions (#3029) 2023-11-14 12:57:54 +01:00
simone ragonesi
a36be258d8 chore: modify latest version msg (#3036)
Signed-off-by: r3drun3 <simone.ragonesi@sighup.io>
2023-11-14 12:11:55 +01:00
Sergio Garcia
690ec057c3 fix(ec2_securitygroup_not_used): check if security group is associated (#3026) 2023-11-14 12:03:01 +01:00
dependabot[bot]
2681feb1f6 build(deps): bump azure-storage-blob from 12.18.3 to 12.19.0 (#3034)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-14 11:47:42 +01:00
Sergio Garcia
e662adb8c5 chore(regions_update): Changes in regions for AWS services. (#3035)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-14 11:47:24 +01:00
Sergio Garcia
c94bd96c93 chore(args): make compatible severity and services arguments (#3024) 2023-11-14 11:26:53 +01:00
dependabot[bot]
6d85433194 build(deps): bump alive-progress from 3.1.4 to 3.1.5 (#3033)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-14 09:41:32 +01:00
dependabot[bot]
7a6092a779 build(deps): bump google-api-python-client from 2.106.0 to 2.107.0 (#3032)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-14 09:16:00 +01:00
dependabot[bot]
4c84529aed build(deps-dev): bump pytest-xdist from 3.3.1 to 3.4.0 (#3031)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-14 08:48:02 +01:00
Sergio Garcia
512d3e018f chore(accessanalyzer): include service in allowlist_non_default_regions (#3025) 2023-11-14 08:00:17 +01:00
dependabot[bot]
c6aff985c9 build(deps-dev): bump moto from 4.2.7 to 4.2.8 (#3030)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-14 07:54:34 +01:00
Sergio Garcia
7fadf31a2b chore(release): update Prowler Version to 3.11.1 (#3021)
Co-authored-by: github-actions <noreply@github.com>
2023-11-10 12:53:07 +01:00
Sergio Garcia
e7d098ed1e chore(regions_update): Changes in regions for AWS services. (#3020)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-10 11:34:44 +01:00
Sergio Garcia
21fba27355 fix(iam): do not list tags for inline policies (#3014) 2023-11-10 09:51:19 +01:00
John Mastron
74e37307f7 fix(SQS): fix invalid SQS ARNs (#3016)
Co-authored-by: John Mastron <jmastron@jpl.nasa.gov>
2023-11-10 09:33:18 +01:00
Sergio Garcia
d9d7c009a5 fix(rds): check if engines exist in region (#3012) 2023-11-10 09:20:36 +01:00
Pepe Fagoaga
2220cf9733 refactor(allowlist): Simplify and handle corner cases (#3019) 2023-11-10 09:11:52 +01:00
Pepe Fagoaga
3325b72b86 fix(iam-sqs): Handle exceptions for non-existent resources (#3010) 2023-11-08 14:06:45 +01:00
Sergio Garcia
9182d56246 chore(regions_update): Changes in regions for AWS services. (#3011)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-08 10:42:23 +01:00
Nacho Rivera
299ece19a8 fix(clean local output dirs): clean dirs when output to s3 (#2997) 2023-11-08 10:05:24 +01:00
Sergio Garcia
0a0732d7c0 docs(gcp): update GCP permissions (#3008) 2023-11-07 14:06:22 +01:00
Sergio Garcia
28011d97a9 chore(regions_update): Changes in regions for AWS services. (#3007)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-07 11:04:45 +01:00
Sergio Garcia
e71b0d1b6a chore(regions_update): Changes in regions for AWS services. (#3001)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-07 11:04:36 +01:00
John Mastron
ec01b62a82 fix(aws): check all conditions in IAM policy parser (#3006)
Co-authored-by: John Mastron <jmastron@jpl.nasa.gov>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-11-07 10:40:34 +01:00
dependabot[bot]
12b45c6896 build(deps): bump google-api-python-client from 2.105.0 to 2.106.0 (#3005)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-07 09:45:51 +01:00
dependabot[bot]
51c60dd4ee build(deps): bump mkdocs-material from 9.4.7 to 9.4.8 (#3004)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-07 09:02:02 +01:00
Sergio Garcia
bf315261af chore(regions_update): Changes in regions for AWS services. (#2998)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-06 10:14:50 +01:00
Kay Agahd
6e83afb580 fix(s3 race condition): catch error if a bucket does not exist any longer (#3000) 2023-11-06 09:24:51 +01:00
Sergio Garcia
1a5742d4f5 fix(cloudtrail): handle HasInsightSelectors key (#2996) 2023-11-02 14:09:27 +01:00
Sergio Garcia
0e22458e86 fix(docs): solve allowlist syntax (#2995) 2023-11-02 12:43:59 +01:00
Sergio Garcia
cd8d1b8a8f chore(regions_update): Changes in regions for AWS services. (#2993)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-11-02 12:27:21 +01:00
Sergio Garcia
141a142742 chore(brew): remove brew action (#2994) 2023-11-02 10:28:32 +01:00
Sergio Garcia
a59b344d20 chore(release): update Prowler Version to 3.11.0 (#2992)
Co-authored-by: github-actions <noreply@github.com>
2023-10-31 15:48:33 +01:00
Pepe Fagoaga
f666711a2a fix(vpc_endpoint_services_allowed_principals_trust_boundaries): Principal (#2991) 2023-10-31 14:19:20 +01:00
Sergio Garcia
1014d64828 fix(outputs): remove empty outputs (#2990) 2023-10-31 14:09:02 +01:00
Sergio Garcia
a126a99853 fix(cis): remove new lines in CIS csv (#2989) 2023-10-31 13:56:33 +01:00
Sergio Garcia
082390a7f0 chore(gcp): print inactive GCP APIs (#2987) 2023-10-31 12:53:53 +01:00
Sergio Garcia
a994553c16 fix(allowlist): verify if allowlist file exists (#2988) 2023-10-31 12:53:45 +01:00
Sergio Garcia
3fd2ae954d fix(elbv2_desync_mitigation_mode): improve logic (#2986) 2023-10-31 12:42:24 +01:00
dependabot[bot]
e17c5642ca build(deps): bump google-api-python-client from 2.104.0 to 2.105.0 (#2985)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-31 11:36:12 +01:00
Sergio Garcia
fa7968cb1b feat(alias): add check alias functionality (#2971) 2023-10-31 11:25:54 +01:00
dependabot[bot]
57c3183b15 build(deps): bump mkdocs-material from 9.4.6 to 9.4.7 (#2983)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-31 11:07:29 +01:00
dependabot[bot]
1fd6471cb1 build(deps-dev): bump moto from 4.2.6 to 4.2.7 (#2984)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-31 10:41:22 +01:00
dependabot[bot]
1827230514 build(deps): bump azure-identity from 1.14.1 to 1.15.0 (#2982)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-31 10:13:32 +01:00
dependabot[bot]
06dc3d3361 build(deps-dev): bump pytest from 7.4.2 to 7.4.3 (#2981)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-10-31 09:28:50 +01:00
Sergio Garcia
a7a2e24d42 chore(docs): allowlist non-default regions (#2980) 2023-10-30 21:52:25 +01:00
ToastyCat
bb543cb5db fix(ec2_instance_imdsv2_enabled ): verify if metadata service is disabled (#2978)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-10-30 21:16:25 +01:00
Pepe Fagoaga
373ce0ad04 fix(GuardDuty): Add enabled_in_account parameter (#2979) 2023-10-30 19:39:22 +01:00
Sergio Garcia
fcb979aae1 feat(allowlist): allowlist non-default regions configuration (#2974) 2023-10-30 17:51:49 +01:00
Pepe Fagoaga
fcc56ad6f7 chore(allowlist): Extract allowlist from report (#2975) 2023-10-30 09:52:59 +01:00
Nacho Rivera
5be8570c8c fix(cloudtrail service): typo in logging info (#2976) 2023-10-30 09:49:20 +01:00
Sergio Garcia
d471442422 chore(regions_update): Changes in regions for AWS services. (#2973)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-27 11:41:08 +02:00
Sergio Garcia
4070c923fc chore(regions_update): Changes in regions for AWS services. (#2969)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-26 15:46:45 +02:00
Sergio Garcia
3ca38fe92d fix(gcp): set always location to lowercase (#2970) 2023-10-26 13:21:09 +02:00
dependabot[bot]
55ebadfe28 build(deps-dev): bump werkzeug from 2.3.4 to 3.0.1 (#2968)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-26 07:58:28 +02:00
Sergio Garcia
9bd2519c83 chore(APIGatewayV2): improve check naming (#2966) 2023-10-25 16:59:06 +02:00
Sergio Garcia
4bfe145be3 chore(regions_update): Changes in regions for AWS services. (#2965)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-25 13:59:19 +02:00
Sergio Garcia
41085049e2 chore(docs): add STS Endpoint and Allowlist updates (#2964) 2023-10-25 13:58:59 +02:00
Sergio Garcia
f7312db0c7 chore(allowlist): prettify allowlist names (#2963) 2023-10-24 18:48:34 +02:00
Sergio Garcia
008534d839 feat(controltower): add AWS Control Tower resources to default Allowlist configuration file (#2953)
Co-authored-by: Toni de la Fuente <toni@blyx.com>
2023-10-24 16:45:21 +02:00
Pepe Fagoaga
8533714cb2 tests: remove tests folder after execution (#2962) 2023-10-24 16:29:18 +02:00
Sergio Garcia
b822c19d2c feat(ignore unused services): add --ignore-unused-services argument to ignore findings from services not in actual use (#2936) 2023-10-24 14:09:27 +02:00
Sergio Garcia
2aa3126eb0 chore(regions_update): Changes in regions for AWS services. (#2961)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-24 11:37:17 +02:00
Sergio Garcia
4c5e85f7ba fix(sts): force v2 STS tokens (#2956) 2023-10-24 10:15:41 +02:00
dependabot[bot]
2b41da4543 build(deps-dev): bump vulture from 2.9.1 to 2.10 (#2960)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-24 09:46:34 +02:00
dependabot[bot]
f8dc88df6e build(deps): bump google-api-python-client from 2.102.0 to 2.104.0 (#2959)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-24 09:15:15 +02:00
dependabot[bot]
534033874e build(deps-dev): bump openapi-spec-validator from 0.6.0 to 0.7.1 (#2958)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-24 08:52:06 +02:00
dependabot[bot]
0851b923fd build(deps-dev): bump pylint from 3.0.1 to 3.0.2 (#2957)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-24 08:05:18 +02:00
Kay Agahd
fd4bed65a0 docs(v2_v3_mapping): document prowler v3.10.0 changes (#2955) 2023-10-23 15:23:17 +02:00
Nacho Rivera
4746b8b835 feat(report interface): add reporting interface call after report (#2948) 2023-10-23 09:06:51 +02:00
Sergio Garcia
d24eafe6a6 chore(regions_update): Changes in regions for AWS services. (#2954)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-23 07:37:54 +02:00
Sergio Garcia
f3b81edf67 fix(APIGateway): Improve check naming (#2952) 2023-10-20 08:07:08 +02:00
Sergio Garcia
976d0da26e fix(resource filters): add missing resource filters (#2951) 2023-10-19 18:18:58 +02:00
Sergio Garcia
5113b83bc4 chore(create_role_to_assume_cfn.yaml): Add DLM permissions (#2949) 2023-10-19 17:40:07 +02:00
Sergio Garcia
a88877bf7c chore(github): ignore permissions path in GitHub actions (#2950) 2023-10-19 17:37:35 +02:00
Jit
a46d7b2ed9 feat(aws): New Neptune, ElastiCache, APIGW and IAM checks (#2862) 2023-10-19 17:31:51 +02:00
Pepe Fagoaga
170241649d fix(ec2_securitygroup_not_used): Mock Lambda service (#2947) 2023-10-19 17:05:04 +02:00
Sergio Garcia
1ac22bddd6 fix(security group): check if security groups are used by Lambda (#2944) 2023-10-19 12:13:24 +02:00
Sergio Garcia
54fe10ae86 chore(permissions): add DLM permissions (#2946) 2023-10-19 11:45:41 +02:00
Sergio Garcia
33647786e6 chore(regions_update): Changes in regions for AWS services. (#2945)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-19 11:35:19 +02:00
Sergio Garcia
eb3cb97115 feat(vpc): add vpc, nacl or subnet names in findings (#2928) 2023-10-18 16:07:53 +02:00
Sergio Garcia
236f57ab0e chore(regions_update): Changes in regions for AWS services. (#2942)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-18 11:39:45 +02:00
Kay Agahd
c88054107e docs(config): add missing configurable variables (#2941) 2023-10-18 09:10:46 +02:00
dependabot[bot]
c03c7c35d8 build(deps): bump urllib3 from 1.26.17 to 1.26.18 (#2940) 2023-10-18 08:08:11 +02:00
Pepe Fagoaga
b5455215a5 fix(sqs): Handle AWS.SimpleQueueService.NonExistentQueue in list_queue_tags (#2939) 2023-10-17 20:45:22 +02:00
Jit
85e12e9479 feat(aws): New CloudTrail, DLM, DocumentDB, EC2, Account and Support checks (#2675)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-10-17 19:00:37 +02:00
Sergio Garcia
f3b7f841fb chore(regions_update): Changes in regions for AWS services. (#2937)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-17 15:55:38 +02:00
Sergio Garcia
92547bfdb6 fix(vpc): ignore com.amazonaws.vpce endpoints (#2929) 2023-10-17 11:14:12 +02:00
dependabot[bot]
3739801ed4 build(deps): bump shodan from 1.30.0 to 1.30.1 (#2935)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-17 11:03:52 +02:00
dependabot[bot]
a6778a6e27 build(deps-dev): bump moto from 4.2.5 to 4.2.6 (#2934)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-17 10:40:37 +02:00
dependabot[bot]
f1fc3c63ea build(deps): bump azure-identity from 1.14.0 to 1.14.1 (#2933)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-17 09:58:42 +02:00
dependabot[bot]
b2a80775a8 build(deps): bump mkdocs-material from 9.4.4 to 9.4.6 (#2932)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-17 08:48:21 +02:00
dependabot[bot]
1f7f68f6af build(deps): bump azure-storage-blob from 12.18.2 to 12.18.3 (#2931)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-17 08:25:28 +02:00
Pepe Fagoaga
388678f822 chore(docs): Add report.region criteria (#2930) 2023-10-16 14:50:11 +02:00
Sergio Garcia
1230a3323d chore(regions_update): Changes in regions for AWS services. (#2927)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-16 09:42:30 +02:00
Sergio Garcia
02a3c750f8 chore(release): update Prowler Version to 3.10.0 (#2926)
Co-authored-by: github-actions <noreply@github.com>
2023-10-11 17:56:14 +02:00
Nacho Rivera
cbdb9ce614 fix(Dockerfile): Update to python:3.11-alpine 2023-10-11 16:42:03 +02:00
dependabot[bot]
be98ea52d7 build(deps-dev): bump gitpython from 3.1.35 to 3.1.37 (#2924)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-11 09:46:41 +02:00
Sergio Garcia
b6cf63bb0c chore(regions_update): Changes in regions for AWS services. (#2923)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-10 18:55:45 +02:00
dependabot[bot]
04410033e7 build(deps-dev): bump pylint from 3.0.0 to 3.0.1 (#2920)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-10 18:55:28 +02:00
dependabot[bot]
e6c6df1334 build(deps): bump slack-sdk from 3.22.0 to 3.23.0 (#2919)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-10 16:35:44 +02:00
dependabot[bot]
91b06a4297 build(deps): bump google-api-python-client from 2.101.0 to 2.102.0 (#2918)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-10 15:52:07 +02:00
dependabot[bot]
640ad7bd60 build(deps): bump mkdocs-material from 9.4.3 to 9.4.4 (#2917)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-10 15:19:02 +02:00
Sergio Garcia
08b2ea01ab chore(iam): add IAM privilege escalation cases (#2921) 2023-10-10 12:41:02 +02:00
Nacho Rivera
236dea9d26 fix(pull-request.yml): launch linters when source code modified (#2922) 2023-10-10 12:14:24 +02:00
dependabot[bot]
f281f3791b build(deps): bump azure-storage-blob from 12.18.1 to 12.18.2 (#2916)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-10 08:02:45 +02:00
Pepe Fagoaga
aff2b80d55 docs(pull-request): Include check list to create/review PR (#2913) 2023-10-09 16:33:58 +02:00
Sergio Garcia
e69949c336 docs(misc): add option -z (#2914)
Co-authored-by: Nacho Rivera <nachor1992@gmail.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-10-09 16:33:49 +02:00
Nacho Rivera
5f7f36ecd4 fix(build-lint-push pipeline): pass pipeline when ignored files (#2915)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-10-09 15:45:16 +02:00
Sergio Garcia
9212478148 fix(cloudwatch): ignore new lines in filters (#2912) 2023-10-09 11:06:29 +02:00
Nacho Rivera
dec0ee1001 fix(pipeline): launch linters with file changes (#2911) 2023-10-06 11:41:58 +02:00
Sergio Garcia
e610c2514d feat(iam): improve disable credentials checks (#2909) 2023-10-06 11:41:04 +02:00
Sergio Garcia
3955450245 fix(securityhub): archive SecurityHub findings in empty regions (#2908) 2023-10-05 15:49:43 +02:00
Sergio Garcia
49a437dc0d chore(regions_update): Changes in regions for AWS services. (#2907)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-05 11:24:46 +02:00
Sergio Garcia
bf37be5013 chore(regions_update): Changes in regions for AWS services. (#2905)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-05 11:24:21 +02:00
Sergio Garcia
9793de1e96 fix(elb): add resource ARN to checks (#2906) 2023-10-04 12:37:15 +02:00
DevOpSpace
4c15318f28 feat(wafv2): Add check wafv2_webacl_logging_enabled (#2898)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-10-04 11:10:47 +02:00
Sergio Garcia
a4d3e78eb1 fix(acm): add certificate id (#2903) 2023-10-03 13:03:46 +02:00
Sergio Garcia
436166c255 chore(regions_update): Changes in regions for AWS services. (#2902)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-10-03 11:32:14 +02:00
Nacho Rivera
bbce2c5e35 fix(custom checks): fix import from s3 (#2901) 2023-10-03 11:31:55 +02:00
Sergio Garcia
0745a57f52 fix(findingID): remove duplicate finding IDs (#2890) 2023-10-03 11:31:33 +02:00
dependabot[bot]
9974c84440 build(deps-dev): bump coverage from 7.3.1 to 7.3.2 (#2895)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-03 10:34:52 +02:00
Nacho Rivera
3c396e76f6 fix(remove_custom_checks_module): delete service folder if empty (#2885) 2023-10-03 10:33:06 +02:00
Nacho Rivera
e701aca64b test(iam_credentials): Don't use search and negative indexes (#2899) 2023-10-03 09:54:53 +02:00
dependabot[bot]
26ad482b90 build(deps): bump mkdocs-material from 9.4.2 to 9.4.3 (#2894)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-03 09:54:16 +02:00
dependabot[bot]
d8fd3ef506 build(deps-dev): bump pylint from 2.17.6 to 3.0.0 (#2893)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-03 09:29:09 +02:00
dependabot[bot]
43016d75e8 build(deps-dev): bump moto from 4.2.4 to 4.2.5 (#2892)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-03 08:51:07 +02:00
Pepe Fagoaga
39b6ce3352 fix(dockerfile): Use latest curl (#2897) 2023-10-03 08:48:32 +02:00
dependabot[bot]
1e3ec10a1a build(deps): bump urllib3 from 1.26.15 to 1.26.17 (#2896)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-03 08:12:45 +02:00
dependabot[bot]
c4e13eef3f build(deps): bump pydantic from 1.10.12 to 1.10.13 (#2891)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-03 08:07:11 +02:00
Samuel Burgos
6558aedee3 feat(json-asff): adds AWS resource tags in json-asff and SecurityHub findings (#2786)
Co-authored-by: samuel.burgos <samuel.burgos@flywire.com>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-10-02 18:20:35 +02:00
Sergio Garcia
a2dfb60466 test(vpc_peering_routing_tables_with_least_privilege): add test (#2889) 2023-10-02 17:22:11 +02:00
Nacho Rivera
c158dcf2ef fix(iam creds checks): add missing tests and fix current ones (#2888) 2023-10-02 16:27:44 +02:00
Sergio Garcia
40318b87bf fix(vpc_peering_routing_tables_with_least_privilege): check only peering routes (#2887) 2023-10-02 16:20:39 +02:00
Sergio Garcia
64f06b11b8 fix(version): add timeout and check HTTP errors (#2886) 2023-10-02 14:44:16 +02:00
Pepe Fagoaga
583194085c test(utils): Include missing tests (#2884) 2023-10-02 11:29:09 +02:00
Nacho Rivera
2d89f57644 fix(iam credentials checks): unify logic (#2883) 2023-10-02 11:28:26 +02:00
Nacho Rivera
f4ed01444a fix(ec2_elastic_ip_unassigned): rename check (#2882) 2023-10-02 10:34:46 +02:00
Pepe Fagoaga
a7980a202d fix(aws): Include missing ARNs (#2880) 2023-10-02 08:45:06 +02:00
JackStuart
3a6c93dd37 fix(azure): Typo in SQL check (#2881) 2023-10-02 08:21:00 +02:00
Pepe Fagoaga
6cd272da37 docs(developer-guide): fix typos (#2878) 2023-09-29 13:12:05 +02:00
Sergio Garcia
a7056b66c7 chore(regions_update): Changes in regions for AWS services. (#2879)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-29 11:34:41 +02:00
Pepe Fagoaga
4d6d58ef91 fix(autoscaling_find_secrets_ec2_launch_configuration): Fix UnicodeDecodeError (#2870) 2023-09-28 17:13:17 +02:00
Sergio Garcia
93a88ec2c7 chore(regions_update): Changes in regions for AWS services. (#2875)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-28 11:43:51 +02:00
Pepe Fagoaga
b679df4fbe docs(aws): Move regions and profiles to AWS (#2874) 2023-09-27 23:23:31 +02:00
Sergio Garcia
ba2c7347f9 chore(regions_update): Changes in regions for AWS services. (#2873)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-27 11:35:14 +02:00
Pepe Fagoaga
f8b4e6e8f0 fix(iam): Handle NoSuchEntity when calling list_role_policies (#2872) 2023-09-27 09:37:07 +02:00
Pepe Fagoaga
7ecb4d7b00 fix(s3_bucket_policy_public_write_access): Handle S3 Policy without Principal (#2871) 2023-09-27 09:35:26 +02:00
Pepe Fagoaga
1697e6ad62 fix(outputs_unix_timestamp): Remove subsecond (#2861) 2023-09-26 16:02:52 +02:00
Pepe Fagoaga
6687f76736 refactor(security_hub): Send findings in batches (#2868)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-09-26 14:10:25 +02:00
Sergio Garcia
35e5bbdaf1 chore(regions_update): Changes in regions for AWS services. (#2869)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-26 12:42:48 +02:00
dependabot[bot]
5c5e7d9509 build(deps): bump google-api-python-client from 2.99.0 to 2.101.0 (#2867)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-26 11:38:34 +02:00
dependabot[bot]
b0c0a9d98c build(deps-dev): bump moto from 4.2.3 to 4.2.4 (#2866)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-26 10:28:34 +02:00
dependabot[bot]
7c246f7be4 build(deps-dev): bump pylint from 2.17.5 to 2.17.6 (#2865) 2023-09-26 10:02:58 +02:00
dependabot[bot]
bfc2a41699 build(deps): bump mkdocs-material from 9.3.1 to 9.4.2 (#2864)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-26 09:22:31 +02:00
Sergio Garcia
081a7ead4c chore(regions_update): Changes in regions for AWS services. (#2863) 2023-09-23 19:14:46 +02:00
Sergio Garcia
70fbf1676a fix(iam_inline_policy_no_administrative_privileges): set resource id as the entity name (#2820) 2023-09-22 12:59:10 +02:00
Pepe Fagoaga
87ddb6b171 fix(apigw): KeyError name (#2858) 2023-09-22 11:23:37 +02:00
Pepe Fagoaga
c0d45d730f fix(elbv2): Handle LoadBalancerNotFound (#2860) 2023-09-22 11:23:21 +02:00
Fennerr
6b97a04643 fix(eks_control_plane_endpoint_access_restricted): handle endpoint private access (#2824)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-09-22 11:22:56 +02:00
Pepe Fagoaga
2a5a07bae0 fix(ds): GetSnapshotLimits for MicrosoftAD (#2859) 2023-09-22 11:22:42 +02:00
Pepe Fagoaga
18e34c670e fix(iam): Handle NoSuchEntityException in ListRolePolicies (#2857) 2023-09-22 11:21:33 +02:00
Fennerr
d6a35485d2 fix(sqs_queues_not_publicly_accessible): Improve status extended (#2848)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-09-22 11:20:59 +02:00
Fennerr
6204f6cdc8 fix(eks_endpoints_not_publicly_accessible): handle endpoint private access (#2825)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-09-22 11:19:56 +02:00
dependabot[bot]
50bc5309f5 build(deps): bump cryptography from 41.0.3 to 41.0.4 (#2856)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-22 09:47:08 +02:00
Sergio Garcia
725e2e92ab chore(regions_update): Changes in regions for AWS services. (#2853)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-20 11:42:21 +02:00
Sergio Garcia
0b07326e36 chore(regions_update): Changes in regions for AWS services. (#2852)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-20 00:03:41 +02:00
dependabot[bot]
e86d194f11 build(deps-dev): bump moto from 4.2.2 to 4.2.3 (#2851)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-09-19 13:38:39 +02:00
dependabot[bot]
6949656d0e build(deps): bump azure-storage-blob from 12.18.0 to 12.18.1 (#2850)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-19 11:36:27 +02:00
dependabot[bot]
a2c62bab47 build(deps): bump mkdocs from 1.5.2 to 1.5.3 (#2849)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-19 09:33:05 +02:00
Kay Agahd
3dd8aeac7c fix(iam): findings of some checks may have been lost (#2847) 2023-09-18 10:46:04 +02:00
Sergio Garcia
2c342a5c5f chore(regions_update): Changes in regions for AWS services. (#2846) 2023-09-17 09:37:05 +02:00
Sergio Garcia
adef1afdfa chore(regions_update): Changes in regions for AWS services. (#2845)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-15 11:36:56 +02:00
Pepe Fagoaga
a980b2606b fix(cloudtrail_s3_dataevents_read/write_enabled): Handle S3 ARN (#2844) 2023-09-15 11:36:40 +02:00
Sergio Garcia
ed83927486 fix(vpc): solves CidrBlock KeyError (#2817) 2023-09-15 10:41:05 +02:00
Tayler Haviland
e745885b09 fix(ebs): improve snapshot encryption logic and typos (#2836)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-09-15 10:37:34 +02:00
Sergio Garcia
16ddbfde9f chore(regions_update): Changes in regions for AWS services. (#2842)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-14 12:03:59 +02:00
dependabot[bot]
bc11537350 build(deps): bump mkdocs-material from 9.2.1 to 9.3.1 (#2839) 2023-09-13 17:44:06 +02:00
Sergio Garcia
ab4de79168 chore(regions_update): Changes in regions for AWS services. (#2833) 2023-09-13 17:15:52 +02:00
Sergio Garcia
8134897e91 chore(regions_update): Changes in regions for AWS services. (#2819) 2023-09-13 17:14:57 +02:00
Sergio Garcia
693d22ed25 chore(regions_update): Changes in regions for AWS services. (#2821) 2023-09-13 17:14:14 +02:00
dependabot[bot]
b1dab2466f build(deps): bump azure-storage-blob from 12.17.0 to 12.18.0 (#2838)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-13 17:03:03 +02:00
dependabot[bot]
d2b09f39e7 build(deps): bump google-api-python-client from 2.97.0 to 2.99.0 (#2837)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-13 16:24:40 +02:00
Cameron Stark
4475801a96 fix(storage_ensure_minimum_tls_version_12): misspelling in metadata (#2835) 2023-09-13 13:02:07 +02:00
dependabot[bot]
126ff8cf0d build(deps): bump slack-sdk from 3.21.3 to 3.22.0 (#2832)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-13 13:01:55 +02:00
dependabot[bot]
a536a785de build(deps-dev): bump gitpython from 3.1.34 to 3.1.35 (#2831)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-13 12:51:15 +02:00
dependabot[bot]
ed89ef74eb build(deps-dev): bump coverage from 7.3.0 to 7.3.1 (#2828)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-13 12:42:13 +02:00
Kay Agahd
f1bea27e44 feat(iam): add new check iam_role_administratoraccess_policy (#2822) 2023-09-12 09:19:20 +02:00
dependabot[bot]
7305e53439 build(deps-dev): bump pytest from 7.4.1 to 7.4.2 (#2827)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-12 09:11:58 +02:00
dependabot[bot]
b08c0e8150 build(deps): bump google-auth-httplib2 from 0.1.0 to 0.1.1 (#2826)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-12 09:11:34 +02:00
Nacho Rivera
8606a4579a fix(pre-commit): add file filter to python linters (#2818) 2023-09-08 08:29:55 +02:00
Nacho Rivera
1dfb72a1d1 feat(unix timestamp): add the --unix-timestamp flag to docs (#2816) 2023-09-07 10:33:58 +02:00
Sergio Garcia
f09b55b893 chore(regions_update): Changes in regions for AWS services. (#2814)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-07 09:15:19 +02:00
Nacho Rivera
30ba6029f5 feat(unix timestamp): add unix timestamp to outputs (#2813) 2023-09-07 09:14:02 +02:00
dependabot[bot]
9f0c830511 build(deps-dev): bump gitpython from 3.1.32 to 3.1.34 (#2815)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-07 08:44:03 +02:00
Nacho Rivera
973e3138fe feat(Dockerfile): add curl package to docker image (#2812) 2023-09-05 15:21:46 +02:00
Nacho Rivera
c996a562e6 fix(3.9.0): update pyproject.toml to latest release (#2811) 2023-09-05 15:21:33 +02:00
dependabot[bot]
f2bba4d1ee build(deps-dev): bump moto from 4.2.0 to 4.2.2 (#2809)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 13:38:17 +02:00
dependabot[bot]
8017a95413 build(deps-dev): bump pytest from 7.4.0 to 7.4.1 (#2808)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 13:02:17 +02:00
Nacho Rivera
26d209daff fix(testing docs): fix testing docs typos and syntax (#2803) 2023-09-05 13:01:35 +02:00
Nacho Rivera
44b979b4a4 fix(ec2_instance_..._ssm): mock ssm service and client in all the tests (#2804) 2023-09-05 12:34:02 +02:00
Kay Agahd
03ad61abc6 iam_policy_no_administrative_privileges does not exist and maps not to check122 (#2797) 2023-09-04 10:23:48 +02:00
Sergio Garcia
fe425f89a4 chore(regions_update): Changes in regions for AWS services. (#2802)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-04 10:23:06 +02:00
Nacho Rivera
11ad66fb79 feat(ec2_instance_managed_by_ssm): missing tests (#2800) 2023-09-04 10:22:43 +02:00
Sergio Garcia
ca5734a2c6 chore(regions_update): Changes in regions for AWS services. (#2801)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-01 12:55:50 +02:00
Nacho Rivera
e5414e87c7 fix(ec2 nacl checks):unify logic (#2799) 2023-09-01 12:55:29 +02:00
Sergio Garcia
8142f8f62f chore(regions_update): Changes in regions for AWS services. (#2794)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-09-01 12:24:56 +02:00
Nacho Rivera
74cf4076fa fix(apikeys_..._90_days): fix key creation time with dinamic date (#2798) 2023-09-01 12:18:55 +02:00
Nacho Rivera
dbd29c0ce1 fix(ec2 tests): add region and delete search sg checks (#2788) 2023-08-31 11:55:30 +02:00
Nacho Rivera
38a7dc1a93 fix(ec2 ebs/instance checks): unify checks logic (#2795) 2023-08-31 11:55:10 +02:00
Nacho Rivera
2891bc0b96 fix(policy_condition_parser): add StringEquals aws:SourceArn condition (#2793) 2023-08-31 11:54:48 +02:00
dependabot[bot]
8846ae6664 build(deps-dev): bump moto from 4.1.15 to 4.2.0 (#2783)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-31 10:29:00 +02:00
Nacho Rivera
2e3c3a55aa fix(html): unroll regions set prior concat (#2790) 2023-08-30 16:38:56 +02:00
Nacho Rivera
7e44116d51 fix(is_valid_arn): include . into resource name (#2789) 2023-08-30 16:11:46 +02:00
Nacho Rivera
46f85e6395 fix(ec2 tests): add tags and region non sg checks (#2781) 2023-08-30 16:10:27 +02:00
Sergio Garcia
94a384fd81 chore(regions_update): Changes in regions for AWS services. (#2791)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-30 12:48:25 +02:00
Sergio Garcia
af6acefb53 chore(regions_update): Changes in regions for AWS services. (#2787)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-29 11:21:39 +02:00
Sergio Garcia
94fd7d252f chore(regions_update): Changes in regions for AWS services. (#2779)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-29 11:21:13 +02:00
dependabot[bot]
4767e38f5b build(deps-dev): bump vulture from 2.8 to 2.9.1 (#2785)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-29 09:58:55 +02:00
Nacho Rivera
276f6f9fb1 fix(ec2_securitygroup_default_restrict_traffic): fix check only allow empty rules (#2777) 2023-08-25 12:42:26 +02:00
Sergio Garcia
2386c71c4f chore(regions_update): Changes in regions for AWS services. (#2776)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-25 11:24:43 +02:00
Pepe Fagoaga
21c52db66b test(vpc_endpoint_services_allowed_principals_trust_boundaries) (#2768) 2023-08-25 10:56:47 +02:00
Pepe Fagoaga
13cfa02f80 fix(test): Update moto to 4.1.15 and update tests (#2769) 2023-08-25 10:56:39 +02:00
Pepe Fagoaga
eedfbe3e7a fix(iam_policy_allows_privilege_escalation): Not use search for checking API actions (#2772) 2023-08-25 10:56:28 +02:00
Pepe Fagoaga
fe03eb4436 docs: explain output formats (#2774) 2023-08-25 10:56:15 +02:00
Pepe Fagoaga
d8e45d5c3f docs: Include new config ecr_repository_vulnerability_minimum_severity (#2775) 2023-08-25 10:56:04 +02:00
Sergio Garcia
12e9fb5eeb chore(regions_update): Changes in regions for AWS services. (#2773)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-24 12:07:05 +02:00
gerardocampo
957ffaabae feat(compliance): Update AWS compliance frameworks after PR 2750 (#2771)
Co-authored-by: Gerard Ocampo <gerard.ocampo@zelis.com>
2023-08-24 08:01:00 +02:00
Pepe Fagoaga
cb76e5a23c chore(s3): Move lib to the AWS provider and include tests (#2664) 2023-08-23 16:12:48 +02:00
Sergio Garcia
b17cc563ff chore(regions_update): Changes in regions for AWS services. (#2767)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-23 11:29:12 +02:00
Pepe Fagoaga
06a0b12efb fix(iam_policy_allows_privilege_escalation): Handle admin permission so * (#2763) 2023-08-23 10:40:06 +02:00
Pepe Fagoaga
d5bd5ebb7d chore(parser): Move provider logic to their folder (#2746) 2023-08-23 10:33:36 +02:00
Nacho Rivera
0a9a1c26db fix(get_regions_from_audit_resources): fix logic and add tests (#2766) 2023-08-23 10:20:12 +02:00
Nacho Rivera
83bfd8a2d4 fix(get_checks_from_input_arn): fix logic and add tests (#2764) 2023-08-23 09:35:42 +02:00
gerardocampo
e5d2c0c700 feat(iam): Check inline policies in IAM Users, Groups & Roles for admin priv's (#2750)
Co-authored-by: Gerard Ocampo <gerard.ocampo@zelis.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-08-23 08:29:13 +02:00
Pepe Fagoaga
590a5669d6 fix(nacls): Tests (#2760) 2023-08-22 22:26:19 +02:00
Sergio Garcia
e042740f67 chore(regions_update): Changes in regions for AWS services. (#2759)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-22 11:43:58 +02:00
dependabot[bot]
dab2ecaa6b build(deps): bump shodan from 1.29.1 to 1.30.0 (#2754)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-22 09:16:08 +02:00
dependabot[bot]
f9f4133b48 build(deps): bump azure-mgmt-storage from 21.0.0 to 21.1.0 (#2756)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-22 08:49:06 +02:00
dependabot[bot]
33dd21897d build(deps-dev): bump pytest-randomly from 3.13.0 to 3.15.0 (#2755)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-22 08:30:07 +02:00
Geoff Singer
cb2ef23a29 feat(s3): Add S3 KMS encryption check (#2757)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-08-22 08:28:17 +02:00
dependabot[bot]
e70e01196f build(deps): bump google-api-python-client from 2.96.0 to 2.97.0 (#2753)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-22 08:08:13 +02:00
dependabot[bot]
f70b9e6eb4 build(deps): bump mkdocs-material from 9.1.21 to 9.2.1 (#2752)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-22 07:39:45 +02:00
Chris Farris
d186c69473 feat(checks): dump all checks as a json file (#2683)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-08-21 17:35:31 +02:00
Nacho Rivera
4d817c48a8 fix(get_checks_from_input_arn): fix function and add tests (#2749) 2023-08-21 13:23:43 +02:00
Pepe Fagoaga
c13cab792b docs(testing): Mocking the service and the service client at the service client level (#2747) 2023-08-21 09:05:57 +02:00
Pepe Fagoaga
80aa463aa2 fix(checks_to_execute): --checks and --resource_arn working together (#2743) 2023-08-21 09:04:15 +02:00
Sergio Garcia
bd28b17ad9 chore(regions_update): Changes in regions for AWS services. (#2748)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-21 08:15:25 +02:00
Sergio Garcia
223119e303 chore(regions_update): Changes in regions for AWS services. (#2744)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-18 12:38:17 +02:00
Pepe Fagoaga
7c45cb45ae feat(ecr_repositories_scan_vulnerabilities_in_latest_image): Minimum severity is configurable (#2736) 2023-08-18 09:17:02 +02:00
Pepe Fagoaga
ac11c6729b chore(tests): Replace sure with standard assert (#2738) 2023-08-17 11:36:45 +02:00
Pepe Fagoaga
1677654dea docs(audit_config): How to use it (#2739) 2023-08-17 11:36:32 +02:00
Pepe Fagoaga
bc5a7a961b tests(check_security_group) (#2740) 2023-08-17 11:36:17 +02:00
Sergio Garcia
c10462223d chore(regions_update): Changes in regions for AWS services. (#2741)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-17 11:31:31 +02:00
vysakh-devopspace
54a9f412e8 feat(ec2): New check ec2_instance_detailed_monitoring_enabled (#2735)
Co-authored-by: Vysakh <venugopal.vysakh@gmail.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-08-16 14:31:06 +02:00
Sergio Garcia
5a107c58bb chore(regions_update): Changes in regions for AWS services. (#2737)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-16 11:42:47 +02:00
Pepe Fagoaga
8f091e7548 fix(gcp): Status extended ends with a dot (#2734) 2023-08-16 10:14:41 +02:00
Pepe Fagoaga
8cdc7b18c7 fix(test-vpc): use the right import paths (#2732) 2023-08-16 09:17:18 +02:00
christiandavilakoobin
9f2e87e9fb fix(is_account_only_allowed_in_condition): Context name on conditions are case-insensitive (#2726) 2023-08-16 08:27:24 +02:00
Sergio Garcia
e119458048 chore(regions_update): Changes in regions for AWS services. (#2733)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-15 16:25:17 +02:00
dependabot[bot]
c2983faf1d build(deps): bump azure-identity from 1.13.0 to 1.14.0 (#2731)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-15 10:34:56 +02:00
dependabot[bot]
a09855207e build(deps-dev): bump coverage from 7.2.7 to 7.3.0 (#2730)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-15 09:50:18 +02:00
Pepe Fagoaga
1e1859ba6f docs(style): Add more details (#2724) 2023-08-15 09:26:48 +02:00
dependabot[bot]
a3937e48a8 build(deps): bump google-api-python-client from 2.95.0 to 2.96.0 (#2729)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-15 09:22:59 +02:00
dependabot[bot]
d2aa53a2ec build(deps): bump mkdocs-material from 9.1.20 to 9.1.21 (#2728)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-15 08:57:24 +02:00
dependabot[bot]
b0bdeea60f build(deps-dev): bump vulture from 2.7 to 2.8 (#2727) 2023-08-15 08:33:27 +02:00
Pepe Fagoaga
465e64b9ac fix(azure): Status extended ends with a dot (#2725) 2023-08-14 21:48:16 +02:00
Pepe Fagoaga
fc53b28997 test(s3): Mock S3Control when used (#2722) 2023-08-14 21:48:05 +02:00
Pepe Fagoaga
72e701a4b5 fix(security): GitPython issue (#2720) 2023-08-14 21:09:12 +02:00
Pepe Fagoaga
2298d5356d test(coverage): Add Codecov (#2719) 2023-08-14 21:08:45 +02:00
Pepe Fagoaga
54137be92b test(python): 3.9, 3.10, 3.11 (#2718) 2023-08-14 21:08:29 +02:00
Sergio Garcia
7ffb12268d chore(release): update Prowler Version to 3.8.2 (#2721)
Co-authored-by: github-actions <noreply@github.com>
2023-08-14 09:18:23 +02:00
Sergio Garcia
790fff460a chore(regions_update): Changes in regions for AWS services. (#2717)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-14 08:13:10 +02:00
Chris Farris
9055dbafe3 fix(s3_bucket_policy_public_write_access): look at account and bucket-level public access block settings (#2715) 2023-08-12 01:46:24 +02:00
Pepe Fagoaga
4454d9115e chore(aws): 2nd round - Improve tests and include dot in status extended (#2714) 2023-08-12 01:41:35 +02:00
Sergio Garcia
0d74dec446 chore(regions_update): Changes in regions for AWS services. (#2712)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-11 11:18:18 +02:00
Pepe Fagoaga
0313dba7b4 chore(aws): Improve tests and status from accessanalyzer to cloudwatch (#2711) 2023-08-11 11:04:04 +02:00
Pepe Fagoaga
3fafac75ef docs(dev-guide): Fix a list and include some details to use the report (#2710) 2023-08-11 11:01:58 +02:00
Sergio Garcia
6b24b46f3d fix(security-hub): handle default output filename error (#2709) 2023-08-11 09:12:25 +02:00
Pepe Fagoaga
474e39a4c9 docs(developer-guide): Update checks, services and include testing (#2705)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-08-10 17:28:35 +02:00
Sergio Garcia
e652298b6a chore(release): update Prowler Version to 3.8.1 (#2706)
Co-authored-by: github-actions <noreply@github.com>
2023-08-10 14:08:48 +02:00
Pepe Fagoaga
9340ae43f3 fix(ds): Restore enums without optional (#2704) 2023-08-10 13:43:31 +02:00
Sergio Garcia
552024c53e fix(Enum): handle Enum classes correctly (#2702) 2023-08-10 13:21:24 +02:00
Pepe Fagoaga
3aba71ad2f docs(aws-orgs): Update syntax (#2703) 2023-08-10 12:40:17 +02:00
christiandavilakoobin
ade511df28 fix(sns): allow default SNS policy with SourceOwner (#2698)
Co-authored-by: Azure Pipeplines CI <monitor@koobin.com>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-08-10 12:13:57 +02:00
Sergio Garcia
fc650214d4 fix(security hub): include custom output filename in resolve_security_hub_previous_findings (#2687) 2023-08-10 12:11:10 +02:00
Sergio Garcia
8266fd0c6f chore(print): prettify prints of listings and logs (#2699) 2023-08-10 12:08:07 +02:00
Pepe Fagoaga
f4308032c3 fix(cloudfront): fix ViewerProtocolPolicy and GeoRestrictionType (#2701) 2023-08-10 12:02:49 +02:00
Sergio Garcia
1e1f445ade chore(regions_update): Changes in regions for AWS services. (#2700)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-10 11:29:05 +02:00
Pepe Fagoaga
d41b0332ac feat(athena): New AWS Athena service + 2 workgroup checks (#2696) 2023-08-10 10:23:17 +02:00
Pepe Fagoaga
7258466572 fix(iam): password policy expiration (#2694) 2023-08-10 10:10:20 +02:00
Pepe Fagoaga
76db92ea14 chore(service): service class type hints (#2695) 2023-08-10 10:01:54 +02:00
Sergio Garcia
ad3cd66e08 docs(organizations): fix script and improve titles (#2693) 2023-08-10 09:56:47 +02:00
Sergio Garcia
22f8855ad7 chore(regions_update): Changes in regions for AWS services. (#2692)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-09 11:23:28 +02:00
Sergio Garcia
36e095c830 fix(iam_role_cross_service_confused_deputy_prevention): add ResourceAccount and PrincipalAccount conditions (#2689) 2023-08-09 10:41:48 +02:00
Sergio Garcia
887cac1264 fix(typo): spelling typo in organizations_scp_check_deny_regions (#2691) 2023-08-09 10:24:29 +02:00
Pepe Fagoaga
13059e0568 fix(ec2-securitygroups): Handle IPv6 public (#2690) 2023-08-09 10:08:30 +02:00
Pepe Fagoaga
9e8023d716 fix(config): Pass a configuration file using --config-file config.yaml (#2679) 2023-08-09 09:52:45 +02:00
Sergio Garcia
c54ba5fd8c chore(regions_update): Changes in regions for AWS services. (#2688)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-09 09:34:52 +02:00
dependabot[bot]
db80e063d4 build(deps-dev): bump pylint from 2.17.4 to 2.17.5 (#2685)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-08 10:48:42 +02:00
dependabot[bot]
b6aa12706a build(deps): bump mkdocs from 1.4.3 to 1.5.2 (#2684)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-08 10:22:20 +02:00
Chris Farris
c1caf6717d fix(organizations): request Organization Info after assume_role occurs (#2682)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-08-07 15:17:05 +02:00
Pepe Fagoaga
513fd9f532 fix(iam-dynamodb): Handle errors (#2680) 2023-08-07 10:04:19 +02:00
Pepe Fagoaga
bf77f817cb chore(azure): Improve AzureService class with __set_clients__ (#2676) 2023-08-04 13:04:05 +02:00
Sergio Garcia
e0bfef2ece chore(regions_update): Changes in regions for AWS services. (#2677)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-04 12:10:19 +02:00
Sergio Garcia
4a87f908a8 chore(release): update Prowler Version to 3.8.0 (#2674)
Co-authored-by: github-actions <noreply@github.com>
2023-08-03 18:34:23 +02:00
Sergio Garcia
16d95e5155 chore(readme): update providers summary table (#2673) 2023-08-03 16:45:09 +02:00
Pepe Fagoaga
1797b54259 test(azure): Storage Service (#2672) 2023-08-03 15:07:17 +02:00
Pepe Fagoaga
f289c8fb2e test(azure): SQL Server Service (#2671) 2023-08-03 14:43:18 +02:00
Pepe Fagoaga
e4ad881a69 test(azure): IAM service (#2670) 2023-08-03 14:15:34 +02:00
Pepe Fagoaga
138bca38e7 test(azure): Defender service (#2669) 2023-08-03 13:52:55 +02:00
edurra
44f7af3580 feat(azure): add Azure SQL Server service and 3 checks (#2665)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-08-03 11:29:17 +02:00
Sergio Garcia
2d832bca15 feat(gcp): Improve gcp performance (#2662) 2023-08-03 10:52:52 +02:00
Pepe Fagoaga
efa75a62e3 fix(iam_policy_allows_privilege_escalation): Handle permissions in groups (#2655) 2023-08-03 10:40:51 +02:00
Pepe Fagoaga
5763bca317 refactor(vpc_endpoint_connections_trust_boundaries) (#2667) 2023-08-03 09:56:09 +02:00
Pepe Fagoaga
c335334402 fix(test_only_aws_service_linked_roles): Flaky test (#2666) 2023-08-03 09:18:06 +02:00
Pepe Fagoaga
5bf3f70717 fix(vpc_endpoint_connections_trust_boundaries): Handle AWS Account ID as Principal (#2611) 2023-08-03 09:16:58 +02:00
Pepe Fagoaga
92c8a440ea feat(gcp): Add internet-exposed and encryption categories (#2663) 2023-08-02 15:53:12 +02:00
Pepe Fagoaga
b92d8a014c fix(cryptography): Update to 41.0.3 (#2661) 2023-08-02 11:47:51 +02:00
Sergio Garcia
aced44f051 fix(sns): handle topic policy conditions (#2660) 2023-08-02 11:45:27 +02:00
Sergio Garcia
49c9d2b077 chore(regions_update): Changes in regions for AWS services. (#2658) 2023-08-02 11:32:11 +02:00
Pepe Fagoaga
61beacf085 fix(docs): Azure auth and Slack integration (#2659) 2023-08-02 11:18:45 +02:00
Pepe Fagoaga
02f432238e fix(outputs): Not use reserved keyword list as variable (#2657) 2023-08-02 09:00:04 +02:00
Sergio Garcia
864d178e01 chore(regions_update): Changes in regions for AWS services. (#2654)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-08-01 11:52:02 +02:00
Sergio Garcia
78f0b823a9 fix(s3_bucket_level_public_access_block): check s3 public access block at account level (#2653) 2023-08-01 11:24:58 +02:00
dependabot[bot]
26cdc7a0ee build(deps-dev): bump flake8 from 6.0.0 to 6.1.0 (#2651)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-08-01 10:59:58 +02:00
dependabot[bot]
5e773f1eee build(deps): bump azure-mgmt-authorization from 3.0.0 to 4.0.0 (#2652)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-01 10:18:56 +02:00
dependabot[bot]
4a7ac7df22 build(deps-dev): bump moto from 4.1.13 to 4.1.14 (#2650)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-01 10:03:03 +02:00
dependabot[bot]
5250670d5d build(deps): bump google-api-python-client from 2.94.0 to 2.95.0 (#2649)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-01 09:49:51 +02:00
Gabriel Pragin
de4a825db8 fix(metadata): Typos (#2646) 2023-08-01 09:07:23 +02:00
dependabot[bot]
c256419144 build(deps): bump mkdocs-material from 9.1.19 to 9.1.20 (#2648) 2023-08-01 08:58:32 +02:00
Pepe Fagoaga
7bdca0420e fix(cloudtrail): Set status to INFO when trail is outside the audited account (#2643) 2023-07-31 17:50:21 +02:00
Pepe Fagoaga
3aa1fbced9 feat(azure_service): New parent class (#2642) 2023-07-31 16:03:49 +02:00
Pepe Fagoaga
dbbb70027a feat(gcp_service): Parent class (#2641) 2023-07-31 15:01:25 +02:00
Pepe Fagoaga
b4e78d28f8 fix(test): mock VPC client (#2640) 2023-07-31 11:19:15 +02:00
Pepe Fagoaga
e3d4e38a59 feat(aws): New AWSService class as parent (#2638) 2023-07-31 11:18:54 +02:00
Pepe Fagoaga
386f558eae fix(ec2_instance_secrets_user_data): Include line numbers in status (#2639) 2023-07-31 10:33:34 +02:00
Sergio Garcia
e08424d3a3 chore(regions_update): Changes in regions for AWS services. (#2637)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-31 09:54:44 +02:00
Chris Farris
03ad403e7a feat(s3): Add checks for publicly listable Buckets or writable buckets by ACL (#2628)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-07-31 08:35:18 +02:00
Sergio Garcia
4a674aae99 chore(regions_update): Changes in regions for AWS services. (#2634)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-28 11:34:30 +02:00
Pepe Fagoaga
8ee3744027 chore(security-hub): Explain Unique ID (#2631) 2023-07-27 13:39:12 +02:00
Gabriel Pragin
965327e801 chore(typos): Update check's status (#2629)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-07-27 11:44:09 +02:00
Sergio Garcia
f82ea43324 chore(regions_update): Changes in regions for AWS services. (#2630)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-27 11:31:45 +02:00
Pepe Fagoaga
a5c63845b4 test: security groups (#2627) 2023-07-26 16:29:27 +02:00
Sergio Garcia
034faa72cf chore(release): update Prowler Version to 3.7.2 (#2625)
Co-authored-by: github-actions <noreply@github.com>
2023-07-26 13:37:31 +02:00
Sergio Garcia
9bcd617964 chore(ec2): add SG name to resource_details (#2495) 2023-07-26 13:12:36 +02:00
Sergio Garcia
0db975dc7b fix(pypi-release): solve GH action for release (#2624) 2023-07-26 13:03:34 +02:00
Pepe Fagoaga
a51fa7703b fix(security): certifi issue (#2623) 2023-07-26 12:45:07 +02:00
Sergio Garcia
69fad0009d fix(ec2_ami_public): correct check metadata and logic (#2618) 2023-07-26 10:34:04 +02:00
Sergio Garcia
e721251936 fix(compute): solve key errors in compute service (#2610) 2023-07-26 08:49:09 +02:00
Pepe Fagoaga
2fe767e3e5 fix(ecs_task_def_secrets): Improve description to explain findings (#2621) 2023-07-25 18:26:22 +02:00
Sergio Garcia
6328ef4444 fix(guardduty): handle disabled detectors in guardduty_is_enabled (#2616) 2023-07-25 12:26:37 +02:00
dependabot[bot]
50b8e084e7 build(deps): bump google-api-python-client from 2.93.0 to 2.94.0 (#2614)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-25 09:37:10 +02:00
dependabot[bot]
3d88544feb build(deps): bump mkdocs-material from 9.1.18 to 9.1.19 (#2615)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-25 09:10:01 +02:00
dependabot[bot]
62e602c32e build(deps): bump pydantic from 1.10.11 to 1.10.12 (#2613)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-25 08:55:43 +02:00
Pepe Fagoaga
47a82560ea fix(s3): __get_object_lock_configuration__ warning logs (#2608) 2023-07-24 10:49:50 +02:00
Pepe Fagoaga
f7bbcc98b3 docs(boto3-configuration): format list (#2609) 2023-07-24 10:47:55 +02:00
Sergio Garcia
98a587aa15 chore(regions_update): Changes in regions for AWS services. (#2606)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-23 18:30:30 +02:00
Sergio Garcia
d2e34c42fd chore(regions_update): Changes in regions for AWS services. (#2599) 2023-07-18 17:38:43 +02:00
dependabot[bot]
605b07901e build(deps): bump google-api-python-client from 2.92.0 to 2.93.0 (#2597)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-18 10:52:26 +02:00
dependabot[bot]
18f02fac68 build(deps-dev): bump moto from 4.1.12 to 4.1.13 (#2598)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-07-18 10:37:34 +02:00
Pepe Fagoaga
28ea37f367 test(aws_provider): Role and User MFA (#2486) 2023-07-18 09:36:37 +02:00
Gabriel Pragin
65a737bb58 chore(metadata): Typos (#2595)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-07-18 09:27:58 +02:00
dependabot[bot]
7423cd2f93 build(deps): bump azure-storage-blob from 12.16.0 to 12.17.0 (#2596)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-18 09:25:51 +02:00
Gabriel Pragin
babd026351 chore(metadata): Typos (#2594) 2023-07-17 22:28:24 +02:00
Sergio Garcia
dd6e5a9029 fix(security): solve dependabot security alert (#2592) 2023-07-17 12:03:35 +02:00
Pepe Fagoaga
02519a4429 fix(assume_role): Set the AWS STS endpoint region (#2587) 2023-07-17 10:09:48 +02:00
Pepe Fagoaga
6575121b7a fix(ssm_incidents): Handle empty name (#2591) 2023-07-17 09:20:44 +02:00
Pepe Fagoaga
5b66368f0d fix(opensearch): log exception as WARNING (#2581) 2023-07-17 09:18:42 +02:00
Sergio Garcia
971c6720e4 chore(regions_update): Changes in regions for AWS services. (#2590) 2023-07-16 21:56:21 +02:00
Sergio Garcia
3afccc279f chore(regions_update): Changes in regions for AWS services. (#2588)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-14 11:34:21 +02:00
Nacho Rivera
8f015d0672 fix(allowlist): single account checks handling (#2585)
Co-authored-by: thomscode <thomscode@gmail.com>
2023-07-14 09:55:27 +02:00
Pepe Fagoaga
f33b96861c release: v3.7.1 (#2578) 2023-07-13 16:48:18 +02:00
Sergio Garcia
9832ce2ff9 chore(regions_update): Changes in regions for AWS services. (#2580)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-13 12:34:16 +02:00
Kay Agahd
490cbbaa48 docs: typos in README.md (#2579) 2023-07-13 07:34:27 +02:00
Nacho Rivera
d1c91093e2 feat(cond parser): add policy cond parser & apply in sqs public check (#2575) 2023-07-12 15:39:01 +02:00
Nacho Rivera
66fe101ccd fix(allowlist): handle wildcard in account field (#2577) 2023-07-12 14:22:42 +02:00
Pepe Fagoaga
7ab8c6b154 fix(iam): Handle NoSuchEntityException when calling list_attached_role_policies (#2571) 2023-07-12 12:48:57 +02:00
Sergio Garcia
73017b14c3 chore(regions_update): Changes in regions for AWS services. (#2574)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-12 11:17:00 +02:00
Sergio Garcia
f55495cd6a chore(regions_update): Changes in regions for AWS services. (#2572)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-11 11:45:43 +02:00
dependabot[bot]
e97146b5a3 build(deps): bump google-api-python-client from 2.91.0 to 2.92.0 (#2570)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-11 11:45:21 +02:00
dependabot[bot]
58f056c76d build(deps-dev): bump openapi-spec-validator from 0.5.7 to 0.6.0 (#2569)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-11 11:16:23 +02:00
dependabot[bot]
338bbc7a1f build(deps): bump pydantic from 1.10.9 to 1.10.11 (#2568)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-11 09:59:01 +02:00
dependabot[bot]
4ba54738a9 build(deps): bump boto3 from 1.26.161 to 1.26.165 (#2566)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-07-11 09:37:29 +02:00
Toni de la Fuente
235fd2adc4 docs: Update Compliance in README (#2563) 2023-07-11 09:12:11 +02:00
Toni de la Fuente
b15d518c94 feat(compliance): CIS Benchmark 2.0 for AWS (#2562) 2023-07-11 09:12:03 +02:00
dependabot[bot]
021e1c122c build(deps-dev): bump pytest-randomly from 3.12.0 to 3.13.0 (#2567)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-11 09:07:05 +02:00
Sergio Garcia
014b0dd6f6 chore(regions_update): Changes in regions for AWS services. (#2561)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-10 08:28:09 +02:00
Sergio Garcia
f9f68f9b86 chore(regions_update): Changes in regions for AWS services. (#2560)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-07 11:34:53 +02:00
Pepe Fagoaga
11a8ba131a test(outputs): Remove debug (#2559) 2023-07-07 10:14:47 +02:00
Sergio Garcia
858de64f8e chore(release): version 3.7.0 (#2558) 2023-07-06 21:17:21 +02:00
Sergio Garcia
676e60afb7 feat(gcp): add CIS checks (#2544) 2023-07-06 17:01:56 +02:00
Nacho Rivera
b1968f3f8b fix(allowlist): reformat allowlist logic (#2555)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-07-06 15:33:32 +02:00
Sergio Garcia
d2d077afaa chore(regions_update): Changes in regions for AWS services. (#2557)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-06 11:29:50 +02:00
Nacho Rivera
7097ca401d feat(lambda allowlist): mapping lambda/awslambda in allowlist (#2554) 2023-07-05 11:49:42 +02:00
Antoine Cichowicz
73e9a1eb9e docs: Update Amazon Linux 2 installation (#2553) 2023-07-05 07:54:18 +02:00
Nacho Rivera
0439d455fb fix(reporting docs): fix S3 reporting desc (#2551) 2023-07-04 12:43:39 +02:00
Sergio Garcia
d57f665a78 docs(allowlist): update DynamoDB allowlist example (#2552)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-07-04 11:55:33 +02:00
dependabot[bot]
859c731a13 build(deps): bump google-api-python-client from 2.90.0 to 2.91.0 (#2548)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-04 11:08:13 +02:00
Sergio Garcia
2e7613ddec docs(OCSF): add docs for OCSF output (#2550) 2023-07-04 10:37:42 +02:00
dependabot[bot]
57e9436783 build(deps): bump botocore from 1.29.161 to 1.29.165 (#2547)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-04 10:23:03 +02:00
dependabot[bot]
2f153fda2e build(deps): bump mkdocs-material from 9.1.17 to 9.1.18 (#2546)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-04 09:02:25 +02:00
dependabot[bot]
cbcb5905a3 build(deps): bump boto3 from 1.26.156 to 1.26.161 (#2545)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-04 08:46:49 +02:00
Sergio Garcia
6a2fb37615 fix(bigquery_dataset_public_access): handle status correctly (#2542) 2023-07-03 13:01:51 +02:00
Nacho Rivera
6403feaff9 fix(cloudwatch secrets): fix nonetype error handling (#2543) 2023-07-03 12:52:46 +02:00
Sergio Garcia
47736910ca fix(list-checks): handle listing checks when -s (#2540) 2023-07-03 11:48:40 +02:00
Sergio Garcia
ead592a0bf chore(regions_update): Changes in regions for AWS services. (#2539)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-07-03 11:22:43 +02:00
Nacho Rivera
d5bdba9244 feat(lambda service): mapping lambda service to awslambda (#2538) 2023-07-03 11:19:02 +02:00
Sergio Garcia
4f033cec8d feat(MITRE): add MITRE ATT&CK framework for AWS (#2537) 2023-06-30 12:24:05 +02:00
sssalim-aws
a58f4b2498 feat(compliance): AWS Well-Architected Framework Reliability Pillar v0.1 (#2536)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-06-29 11:13:38 +02:00
Sergio Garcia
01522ed8c7 feat(ENS): complete ENS Compliance Framework mapping (#2534) 2023-06-27 15:22:25 +02:00
Sergio Garcia
fa99ee9d5b feat(allowlist): add exceptions to allowlist (#2527) 2023-06-27 12:57:18 +02:00
Sergio Garcia
6efe634850 fix(iam): add StringLike condition in iam_role_cross_service_confused_deputy_prevention (#2533) 2023-06-27 10:06:46 +02:00
dependabot[bot]
60a1497eaf build(deps-dev): bump moto from 4.1.11 to 4.1.12 (#2530)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-27 09:07:44 +02:00
dependabot[bot]
1d0cbc08df build(deps): bump google-api-python-client from 2.89.0 to 2.90.0 (#2531)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-27 08:36:41 +02:00
dependabot[bot]
4d4280033b build(deps-dev): bump pytest from 7.3.2 to 7.4.0 (#2532)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-27 07:55:26 +02:00
dependabot[bot]
fd58775cae build(deps): bump mkdocs-material from 9.1.16 to 9.1.17 (#2529)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-27 07:39:58 +02:00
dependabot[bot]
ccb0e93da2 build(deps): bump botocore from 1.29.156 to 1.29.161 (#2528)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-27 07:19:22 +02:00
Sergio Garcia
c2a05da908 chore(ec2): reduce noise in Security Groups checks (#2525) 2023-06-23 15:06:09 +02:00
Sergio Garcia
e1da9e60fc chore(region): add get_default_region function in AWS Services (#2524) 2023-06-23 14:10:49 +02:00
Sergio Garcia
d044e535e0 fix(compliance): add version to ISO27001 (#2523) 2023-06-21 17:04:08 +02:00
Sergio Garcia
293560dcd4 fix(contrib): migrate multi-account-securityhub/run-prowler-securityhub.sh to v3 (#2503)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-06-21 15:18:02 +02:00
Sergio Garcia
90ebb815d5 fix(security hub): solve Security Hub format requirements (#2520) 2023-06-21 13:04:14 +02:00
Sergio Garcia
3d3d418ee6 chore(regions_update): Changes in regions for AWS services. (#2522)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-06-21 11:32:35 +02:00
Pedro Martín
f875cd05be feat(compliance): add ISO27001 compliance framework (#2517)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-06-20 16:57:28 +02:00
Sergio Garcia
435911489f fix(gcp): update Prowler SDK info of GCP (#2515) 2023-06-20 14:32:24 +02:00
Sergio Garcia
5fcfcd53aa fix(compliance): remove unnecessary Optional attributes (#2514) 2023-06-20 14:22:13 +02:00
dependabot[bot]
bc09215aad build(deps): bump boto3 from 1.26.147 to 1.26.156 (#2511)
Signed-off-by: dependabot[bot] <support@github.com>
2023-06-20 10:36:53 +02:00
dependabot[bot]
5f7e109e3d build(deps-dev): bump openapi-spec-validator from 0.5.6 to 0.5.7 (#2507)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-20 09:44:30 +02:00
Nacho Rivera
b75a5050d7 fix(apigw): Update metadata for API GW checks (#2512) 2023-06-20 09:22:00 +02:00
dependabot[bot]
be497f7083 build(deps): bump google-api-python-client from 2.88.0 to 2.89.0 (#2510) 2023-06-20 08:40:41 +02:00
dependabot[bot]
0ccae3e15b build(deps): bump mkdocs-material from 9.1.15 to 9.1.16 (#2508)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-20 08:08:17 +02:00
dependabot[bot]
d736c32aec build(deps): bump botocore from 1.29.152 to 1.29.156 (#2506)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-20 07:41:30 +02:00
Sergio Garcia
8ea5ba5d3f chore(OCSF): improve OCSF logic (#2502) 2023-06-19 12:37:04 +02:00
Nacho Rivera
60c341befd fix(vpc): handle ephemeral VPC endpoint services (#2501) 2023-06-19 12:23:52 +02:00
Sergio Garcia
be4f58ed8f chore(regions_update): Changes in regions for AWS services. (#2500) 2023-06-19 07:59:42 +02:00
Sergio Garcia
d82d1abab6 chore(3.6.1): release version (#2498) 2023-06-16 12:34:17 +02:00
Sergio Garcia
0d81bd457c fix(asff): handle empty Recommendation Url (#2496)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-06-16 12:17:09 +02:00
Sergio Garcia
af2b19436f fix(route53): correct Hosted Zone ARN (#2494) 2023-06-15 16:32:54 +02:00
Sergio Garcia
51beb3c7e4 chore(regions_update): Changes in regions for AWS services. (#2497)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-06-15 15:56:23 +02:00
Chris Kelly
5061456735 fix(security hub): Adds logic to map to valid ASFF statuses (#2491) 2023-06-15 15:52:19 +02:00
Nacho Rivera
b01eb3af95 fix(rds checks): test if key exists prior checking it (#2489) 2023-06-14 12:15:33 +02:00
Sergio Garcia
328bebc168 chore(regions_update): Changes in regions for AWS services. (#2487)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-06-14 11:52:11 +02:00
Sergio Garcia
fc63fffa15 chore(release): 3.6.0 (#2485) 2023-06-13 17:38:51 +02:00
Sebastian Nyberg
707584b2ef feat(aws): Add MFA flag if try to assume role in AWS (#2478)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-06-13 17:18:10 +02:00
Nacho Rivera
561459d93b fix(dataevents checks): add trails home region (#2484) 2023-06-13 11:48:55 +02:00
Sergio Garcia
25e48ae546 chore(arn): include ARN of AWS accounts (#2477)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-06-13 10:18:23 +02:00
dependabot[bot]
513bb3e8d0 build(deps): bump botocore from 1.29.147 to 1.29.152 (#2482)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-13 10:07:57 +02:00
dependabot[bot]
04710ca908 build(deps): bump google-api-python-client from 2.86.0 to 2.88.0 (#2483)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-13 09:50:10 +02:00
dependabot[bot]
fcf0fcf20c build(deps): bump pydantic from 1.10.8 to 1.10.9 (#2481)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-13 09:06:59 +02:00
dependabot[bot]
2ff40d8e37 build(deps): bump boto3 from 1.26.142 to 1.26.147 (#2480)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-13 08:11:54 +02:00
dependabot[bot]
1bab5b06a4 build(deps-dev): bump pytest from 7.3.1 to 7.3.2 (#2479)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-13 07:50:41 +02:00
Sergio Garcia
01cd4bcb47 chore(arn): add missing ARNs to AWS Services (#2476) 2023-06-12 13:33:12 +02:00
Sebastian Nyberg
49b2a559ae feat(vpc): add check vpc_subnet_no_public_ip_by_default (#2472)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-06-12 09:44:10 +02:00
Sergio Garcia
9212d24685 chore(regions_update): Changes in regions for AWS services. (#2474)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-06-12 08:48:44 +02:00
Nacho Rivera
eb43b11202 fix(arn validator): include : in regex (#2471) 2023-06-09 13:24:29 +02:00
Sergio Garcia
5c4cae8c9d feat(wellarchitected): add WellArchitected service and check (#2461) 2023-06-09 13:19:01 +02:00
Sergio Garcia
cfd7099743 chore(regions_update): Changes in regions for AWS services. (#2469)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-06-09 13:09:30 +02:00
Sergio Garcia
19ae237d29 chore(regions_update): Changes in regions for AWS services. (#2462)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-06-09 13:09:01 +02:00
Sergio Garcia
9cda78e561 chore(docs): improve allowlist suggestion (#2466) 2023-06-09 13:07:28 +02:00
Sergio Garcia
cc31872a7f fix(kms): check only KMS CMK tags (#2468) 2023-06-09 13:06:06 +02:00
Sebastian Nyberg
3c2c896708 chore(vpc): add mapPublicIpOnLaunch attribute to VPC subnets (#2470) 2023-06-09 12:45:28 +02:00
Jit
b73da9c54c feat(gcp): add 12 new checks for CIS Framework (#2426)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-06-08 11:25:51 +02:00
Sergio Garcia
414a45bfb0 chore(quick inventory): add warning message (#2460) 2023-06-07 15:16:52 +02:00
Sergio Garcia
2a6f808bca chore(boto3): update boto3 config (#2459) 2023-06-07 14:32:40 +02:00
Sergio Garcia
cdf2a13bbd feat(oscf): add OCSF format as JSON output for AWS, Azure and GCP. Hello Amazon Security Lake! (#2429) 2023-06-07 14:28:43 +02:00
Sergio Garcia
3e3e8a14ee fix(inventory): handle exception for every call (#2457) 2023-06-07 09:33:10 +02:00
Nacho Rivera
37e180827a fix(azure): fix empty subscriptions case (#2455) 2023-06-06 17:31:43 +02:00
Pepe Fagoaga
b047b54545 fix(backup): Handle last_execution_date when None (#2454) 2023-06-06 16:57:17 +02:00
Pepe Fagoaga
b7bb4bbd57 fix(aws): Add missing resources ARN (#2453) 2023-06-06 16:56:59 +02:00
Pepe Fagoaga
86cf2cd233 fix(efs): Include resource ARN and handle from input (#2452) 2023-06-06 14:29:58 +02:00
Sergio Garcia
ab12c201b4 chore(docs): improve custom checks docs (#2428) 2023-06-06 11:58:20 +02:00
Sergio Garcia
a8f03d859c feat(gcp): add --project-ids flag and scan all projects by default (#2393)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-06-06 11:56:39 +02:00
Sergio Garcia
3c7580f024 fix(ec2): handle false positive in ec2_securitygroup_allow_ingress_from_internet_to_any_port (#2449) 2023-06-06 11:55:27 +02:00
Sergio Garcia
277833e388 fix(services): verify Route53 records and handle TrustedAdvisor error (#2448) 2023-06-06 11:50:44 +02:00
Sergio Garcia
eb16d7e6f9 chore(regions_update): Changes in regions for AWS services. (#2450)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-06-06 11:20:03 +02:00
Pepe Fagoaga
1418068d2b fix(services): Handle AWS service errors (#2440) 2023-06-06 09:23:03 +02:00
dependabot[bot]
774346f5f8 build(deps): bump botocore from 1.29.142 to 1.29.147 (#2447)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-06 08:38:49 +02:00
dependabot[bot]
1aab88e6ca build(deps): bump alive-progress from 3.1.1 to 3.1.4 (#2446)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-06 08:25:06 +02:00
dependabot[bot]
613f49b8bb build(deps-dev): bump docker from 6.1.2 to 6.1.3 (#2445)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-06 08:03:03 +02:00
dependabot[bot]
5c95dc6e20 build(deps): bump boto3 from 1.26.138 to 1.26.142 (#2444)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-06 07:45:14 +02:00
dependabot[bot]
cbc2713bee build(deps-dev): bump moto from 4.1.10 to 4.1.11 (#2443) 2023-06-06 07:29:25 +02:00
christiandavilakoobin
2955975793 fix(cloudfront): fix DefaultCacheConfigBehaviour enum type(#2430)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-06-05 15:48:34 +02:00
Sergio Garcia
f8299d7f40 chore(regions_update): Changes in regions for AWS services. (#2441)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-06-05 14:44:30 +02:00
Toni de la Fuente
e855d44523 docs: Create CONTRIBUTING.md (#2416)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-06-05 08:52:57 +02:00
dependabot[bot]
64e7715480 build(deps): bump cryptography from 40.0.2 to 41.0.0 (#2436)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-05 08:52:11 +02:00
Nacho Rivera
2e9a74f609 fix(README): add references to tenant-id when browser auth (#2439) 2023-06-05 08:39:59 +02:00
Sergio Garcia
11a1230738 chore(regions_update): Changes in regions for AWS services. (#2437)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-06-05 08:09:21 +02:00
Sergio Garcia
298373742e chore(regions_update): Changes in regions for AWS services. (#2427)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-06-02 13:32:04 +02:00
Sergio Garcia
dc7aeecd85 chore(regions_update): Changes in regions for AWS services. (#2434)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-06-02 13:24:47 +02:00
Nacho Rivera
15a7de7b24 fix(browser auth): fix browser auth in Azure to include tenant id (#2415)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-06-02 13:22:43 +02:00
sssalim-aws
714d0d4092 Update aws_well_architected_framework_security_pillar_aws.json (#2432) 2023-06-02 11:58:31 +02:00
Jenny Kim
225d7f39d1 chore(logo): Add Prowler logo in SVG format & Propose to Prowler icon design (#2423)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-06-01 12:03:49 +02:00
Sergio Garcia
0005798c83 chore(regions_update): Changes in regions for AWS services. (#2424)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-31 18:22:44 +02:00
dependabot[bot]
1d9078f9be build(deps): bump mkdocs-material from 9.1.12 to 9.1.15 (#2420)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-30 12:51:50 +02:00
dependabot[bot]
510ac7005a build(deps-dev): bump pytest-xdist from 3.3.0 to 3.3.1 (#2421)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-30 11:00:11 +02:00
dependabot[bot]
c049b968a5 build(deps): bump pydantic from 1.10.7 to 1.10.8 (#2418)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-30 10:45:13 +02:00
dependabot[bot]
858698f7cd build(deps): bump botocore from 1.29.138 to 1.29.142 (#2419)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-30 09:42:19 +02:00
dependabot[bot]
d104f6f8fc build(deps-dev): bump coverage from 7.2.5 to 7.2.7 (#2422)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-30 07:52:01 +02:00
Sergio Garcia
3ecf0d3230 chore(regions_update): Changes in regions for AWS services. (#2414) 2023-05-29 07:20:44 +02:00
Sergio Garcia
6e4131fee4 fix(ecr): handle LifecyclePolicyNotFoundException (#2411)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-05-26 17:15:49 +02:00
Sergio Garcia
41fa6bc8ed chore(regions_update): Changes in regions for AWS services. (#2413)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-26 13:02:37 +02:00
Sergio Garcia
58a29bf058 fix(codebuild): handle FAIL in codebuild_project_user_controlled_buildspec (#2410)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-05-25 13:30:01 +02:00
Sergio Garcia
7dac17de18 chore(regions_update): Changes in regions for AWS services. (#2409)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-25 11:51:32 +02:00
Toni de la Fuente
799d7de182 fix: typo in README.md (#2407)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-05-24 16:55:49 +02:00
Pedro Martín
735af02f59 feat(new_security_framework): AWS Well Architected Framework security pillar (#2382)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-05-24 16:38:32 +02:00
Sergio Garcia
ad3f3799fa fix(typo): typo in README.md (#2406) 2023-05-24 14:22:58 +02:00
Sergio Garcia
5f97df015e chore(release): change release version to 3.5.3 (#2405) 2023-05-24 13:56:53 +02:00
Toni de la Fuente
ff18fd2c38 chore(docs): add summary table to README.md (#2402)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-05-24 13:56:17 +02:00
Jit
3ab0cd02df feat(checks-gcp): Include 4 new checks covering GCP CIS (#2376)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-05-24 12:10:43 +02:00
Sergio Garcia
c31072f42f chore(regions_update): Changes in regions for AWS services. (#2403)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-24 11:59:15 +02:00
Sergio Garcia
c01c59023a fix(ClientError): handle ClientErrors in DynamoDB and Directory Service (#2400) 2023-05-24 11:50:08 +02:00
Sergio Garcia
4329aac377 chore(quick-inventory): send quick inventory to output bucket (#2399)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-05-24 11:48:49 +02:00
Sergio Garcia
c10b31e9d0 fix(categories): remove empty categories from metadata (#2401) 2023-05-24 10:44:51 +02:00
kij
71a789c0b4 fix(OSError): handle different OSErrors (#2398)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-05-23 17:16:17 +02:00
Sergio Garcia
deb9847e2b fix(route53_dangling_ip_subdomain_takeover): notify only IPs with AWS IP Ranges (#2396) 2023-05-23 16:35:13 +02:00
Pepe Fagoaga
9e9e7e1e96 fix(aws): Handle unique map keys (#2390)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-05-23 15:54:22 +02:00
Sergio Garcia
d34e0341e2 chore(regions_update): Changes in regions for AWS services. (#2392)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-23 12:28:38 +02:00
Sergio Garcia
aec254b05a fix(inspector2): fix active findings count (#2395) 2023-05-23 12:26:09 +02:00
dependabot[bot]
f8b420047a build(deps): bump boto3 from 1.26.125 to 1.26.138 (#2389)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-23 11:15:42 +02:00
dependabot[bot]
7e6e4c0bc6 build(deps): bump shodan from 1.29.0 to 1.29.1 (#2385)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-23 10:56:50 +02:00
dependabot[bot]
71fb59943c build(deps): bump requests from 2.30.0 to 2.31.0 (#2388)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-23 10:25:28 +02:00
dependabot[bot]
34419d0ca1 build(deps): bump azure-identity from 1.12.0 to 1.13.0 (#2386)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-23 10:22:05 +02:00
dependabot[bot]
475a36f0d7 build(deps-dev): bump moto from 4.1.9 to 4.1.10 (#2384)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-23 09:52:18 +02:00
Kevin Pullin
1234c1e7e2 fix(allowlist) - tags parameter is a string, not a list (#2375) 2023-05-23 09:51:50 +02:00
dependabot[bot]
a4a400facf build(deps): bump botocore from 1.29.134 to 1.29.138 (#2383)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-23 07:52:47 +02:00
Sergio Garcia
ed2ca4d896 chore(regions_update): Changes in regions for AWS services. (#2378)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-19 11:36:08 +02:00
Pepe Fagoaga
ce42e4d1cd fix(pypi-release): Push version change to the branch (#2374) 2023-05-18 18:46:11 +02:00
Sergio Garcia
b048128e77 chore(release): release version 3.5.2 (#2373) 2023-05-18 17:04:18 +02:00
Sergio Garcia
635c257502 fix(ssm incidents): check if service available in aws partition (#2372) 2023-05-18 16:44:52 +02:00
Pepe Fagoaga
58a38c08d7 docs: format regions-and-partitions (#2371) 2023-05-18 16:35:54 +02:00
Pepe Fagoaga
8fbee7737b fix(resource_not_found): Handle error (#2370) 2023-05-18 16:26:08 +02:00
Pepe Fagoaga
e84f5f184e fix(sts): Use the right region to validate credentials (#2349)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-05-18 15:51:57 +02:00
Sergio Garcia
0bd26b19d7 chore(regions_update): Changes in regions for AWS services. (#2368)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-18 11:17:28 +02:00
Sergio Garcia
64f82d5d51 chore(regions_update): Changes in regions for AWS services. (#2366)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-17 11:52:16 +02:00
Sergio Garcia
f63ff994ce fix(action): solve pypi-release action creating the release branch (#2364) 2023-05-16 13:32:46 +02:00
Sergio Garcia
a10ee43271 release: 3.5.1 (#2363) 2023-05-16 11:42:08 +02:00
Sergio Garcia
54ed29e08d fix(route53): handle empty Records in Zones (#2351) 2023-05-16 10:51:43 +02:00
dependabot[bot]
cc097e7a3f build(deps-dev): bump docker from 6.1.1 to 6.1.2 (#2360)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-16 09:39:24 +02:00
dependabot[bot]
5de92ada43 build(deps): bump mkdocs-material from 9.1.8 to 9.1.12 (#2359)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-16 09:24:39 +02:00
dependabot[bot]
0c546211cf build(deps-dev): bump pytest-xdist from 3.2.1 to 3.3.0 (#2358)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-16 08:09:55 +02:00
dependabot[bot]
4dc5a3a67c build(deps): bump botocore from 1.29.125 to 1.29.134 (#2357)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-16 07:51:19 +02:00
dependabot[bot]
c51b226ceb build(deps): bump shodan from 1.28.0 to 1.29.0 (#2356)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-16 07:34:51 +02:00
dependabot[bot]
0a5ca6cf74 build(deps): bump pymdown-extensions from 9.11 to 10.0 (#2355)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-16 07:33:56 +02:00
Sergio Garcia
96957219e4 chore(regions_update): Changes in regions for AWS services. (#2353)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-16 07:32:41 +02:00
Sergio Garcia
32b7620db3 chore(regions_update): Changes in regions for AWS services. (#2350)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-12 11:37:53 +02:00
Sergio Garcia
347f65e089 chore(release): 3.5.0 (#2346) 2023-05-11 17:42:46 +02:00
Sergio Garcia
16628a427e fix(README): update Architecture image and PyPi links (#2345) 2023-05-11 17:29:17 +02:00
Sergio Garcia
ed16034a25 fix(README): order providers alphbetically (#2344) 2023-05-11 16:30:04 +02:00
Pepe Fagoaga
0c5f144e41 fix(poetry): Skip updates during pre-commit (#2342) 2023-05-11 12:17:21 +02:00
Sergio Garcia
acc7d6e7dc chore(regions_update): Changes in regions for AWS services. (#2341)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-11 11:41:39 +02:00
Sergio Garcia
84b4139052 chore(iam): add new permissions (#2339) 2023-05-11 11:35:32 +02:00
Sergio Garcia
9943643958 fix(s3): improve error handling (#2337) 2023-05-10 16:43:06 +02:00
Pepe Fagoaga
9ceaefb663 fix(access-analyzer): Handle ResourceNotFoundException (#2336) 2023-05-10 15:44:14 +02:00
Gabriel Soltz
ec03ea5bc1 feat(workspaces): New check workspaces_vpc_2private_1public_subnets_nat (#2286)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
Co-authored-by: n4ch04 <nachor1992@gmail.com>
2023-05-10 15:40:42 +02:00
Sergio Garcia
5855633c1f fix(resourceexplorer2): add resource id (#2335)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-05-10 14:48:34 +02:00
Pedro Martín
a53bc2bc2e feat(rds): new check rds_instance_deprecated_engine_version (#2298)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-05-10 14:48:12 +02:00
Sergio Garcia
88445820ed feat(slack): add Slack App integration (#2305)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-05-10 13:38:28 +02:00
Sergio Garcia
044ed3ae98 chore(regions_update): Changes in regions for AWS services. (#2334)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-10 13:30:24 +02:00
Pepe Fagoaga
6f48012234 fix(ecr): Refactor service (#2302)
Co-authored-by: Gabriel Soltz <thegaby@gmail.com>
Co-authored-by: Kay Agahd <kagahd@users.noreply.github.com>
Co-authored-by: Nacho Rivera <nachor1992@gmail.com>
Co-authored-by: Kevin Pullin <kevin.pullin@gmail.com>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-05-09 17:04:21 +02:00
Sergio Garcia
d344318dd4 feat(allowlist): allowlist a specific service (#2331) 2023-05-09 15:43:04 +02:00
Sergio Garcia
6273dd3d83 chore(regions_update): Changes in regions for AWS services. (#2330)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-09 12:21:07 +02:00
dependabot[bot]
0f3f3cbffd build(deps-dev): bump moto from 4.1.8 to 4.1.9 (#2328)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-05-09 11:38:41 +02:00
Pepe Fagoaga
3244123b21 fix(cloudfront_distributions_https_enabled): Add default case (#2329)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-05-09 11:09:18 +02:00
dependabot[bot]
cba2ee3622 build(deps): bump boto3 from 1.26.115 to 1.26.125 (#2327)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-09 08:48:15 +02:00
dependabot[bot]
25ed925df5 build(deps-dev): bump docker from 6.0.1 to 6.1.1 (#2326)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-09 08:22:03 +02:00
dependabot[bot]
8c5bd60bab build(deps-dev): bump pylint from 2.17.3 to 2.17.4 (#2325)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-09 07:59:21 +02:00
dependabot[bot]
c5510556a7 build(deps): bump mkdocs from 1.4.2 to 1.4.3 (#2324)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-09 07:38:43 +02:00
Sergio Garcia
bbcfca84ef fix(trustedadvisor): avoid not_available checks (#2323) 2023-05-08 17:55:31 +02:00
Sergio Garcia
1260e94c2a fix(cloudtrail): handle InsightNotEnabledException error (#2322) 2023-05-08 16:06:13 +02:00
Pepe Fagoaga
8a02574303 fix(sagemaker): Handle ValidationException (#2321) 2023-05-08 14:52:28 +02:00
Pepe Fagoaga
c930f08348 fix(emr): Handle InvalidRequestException (#2320) 2023-05-08 14:52:12 +02:00
Pepe Fagoaga
5204acb5d0 fix(iam): Handle ListRoleTags and policy errors (#2319) 2023-05-08 14:42:23 +02:00
Sergio Garcia
784aaa98c9 feat(iam): add iam_role_cross_account_readonlyaccess_policy check (#2312) 2023-05-08 13:27:51 +02:00
Sergio Garcia
745e2494bc chore(docs): improve GCP docs (#2318) 2023-05-08 13:26:23 +02:00
Sergio Garcia
c00792519d chore(docs): improve GCP docs (#2318) 2023-05-08 13:26:02 +02:00
Sergio Garcia
142fe5a12c chore(regions_update): Changes in regions for AWS services. (#2315)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-08 12:40:31 +02:00
Sergio Garcia
5b127f232e fix(typo): typo in backup_vaults_exist check title (#2317) 2023-05-08 12:29:08 +02:00
Kevin Pullin
c22bf01003 feat(allowlist): Support regexes in Tags to allow "or"-like conditional matching (#2300)
Co-authored-by: Kevin Pullin <kevinp@nexttrucking.com>
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-05-05 14:56:27 +02:00
Nacho Rivera
05e4911d6f fix(vpc services): list to dicts in vpc and subnets (#2310) 2023-05-04 15:35:02 +02:00
Nacho Rivera
9b551ef0ba feat(pre-commit): added trufflehog to pre-commit (#2311) 2023-05-04 15:33:11 +02:00
Sergio Garcia
56a8bb2349 chore(regions_update): Changes in regions for AWS services. (#2309)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-04 12:30:10 +02:00
Pepe Fagoaga
8503c6a64d fix(client_error): Handle errors (#2308) 2023-05-04 11:06:24 +02:00
Pepe Fagoaga
820f18da4d release: 3.4.1 (#2303) 2023-05-03 19:24:17 +02:00
Kay Agahd
51a2432ebf fix(typo): remove redundant lines (#2307) 2023-05-03 19:23:48 +02:00
Gabriel Soltz
6639534e97 feat(ssmincidents): Use regional_client region instead of audit_profile region (#2306) 2023-05-03 19:22:30 +02:00
Gabriel Soltz
0621577c7d fix(backup): Return [] when None AdvancedBackupSettings (#2304) 2023-05-03 17:10:53 +02:00
Sergio Garcia
26a507e3db feat(route53): add route53_dangling_ip_subdomain_takeover check (#2288)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-05-03 11:47:36 +02:00
Sergio Garcia
244b540fe0 fix(s3): handle NoSuchBucket error (#2289) 2023-05-03 09:55:19 +02:00
Gabriel Soltz
030ca4c173 fix(backups): change severity and only check report_plans if plans exists (#2291)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-05-03 09:00:15 +02:00
dependabot[bot]
88a2810f29 build(deps): bump botocore from 1.29.115 to 1.29.125 (#2301)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-03 08:55:14 +02:00
dependabot[bot]
9164ee363a build(deps-dev): bump coverage from 7.2.3 to 7.2.5 (#2297)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-03 08:38:03 +02:00
dependabot[bot]
4cd47fdcc5 build(deps): bump google-api-python-client from 2.84.0 to 2.86.0 (#2296)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-03 08:11:36 +02:00
dependabot[bot]
708852a3cb build(deps): bump mkdocs-material from 9.1.6 to 9.1.8 (#2294)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-05-03 07:49:52 +02:00
Sergio Garcia
4a93bdf3ea chore(regions_update): Changes in regions for AWS services. (#2293)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-05-03 07:49:27 +02:00
Gabriel Soltz
22e7d2a811 feat(Organizations): New check organizations_tags_policies_enabled_and_attached (#2287)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-28 16:14:08 +02:00
Sergio Garcia
93eca1dff2 chore(regions_update): Changes in regions for AWS services. (#2290)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-28 13:19:46 +02:00
Gabriel Soltz
9afe7408cd feat(FMS): New Service FMS and Check fms_accounts_compliant (#2259)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
Co-authored-by: Nacho Rivera <nacho@verica.io>
2023-04-28 11:47:55 +02:00
Sergio Garcia
5dc2347a25 docs(security hub): improve security hub docs (#2285)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-27 16:22:49 +02:00
Pepe Fagoaga
e3a0124b10 fix(opensearch): Handle invalid JSON policy (#2262) 2023-04-27 12:05:43 +02:00
Gabriel Soltz
16af89c281 feat(autoscaling): new check autoscaling_group_multiple_az (#2273) 2023-04-26 15:10:04 +02:00
Sergio Garcia
621e4258c8 feat(s3): add s3_bucket_object_lock check (#2274) 2023-04-26 15:04:45 +02:00
Sergio Garcia
ac6272e739 fix(rds): check configurations for DB instances at cluster level (#2277)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-26 13:51:07 +02:00
Sergio Garcia
6e84f517a9 fix(apigateway2): correct paginator name (#2283) 2023-04-26 13:43:15 +02:00
Pepe Fagoaga
fdbdb3ad86 fix(sns_topics_not_publicly_accessible): Change PASS behaviour (#2282) 2023-04-26 12:51:51 +02:00
Sergio Garcia
7adcf5ca46 chore(regions_update): Changes in regions for AWS services. (#2280)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-26 11:59:34 +02:00
Gabriel Soltz
fe6716cf76 feat(NetworkFirewall): New Service and Check (#2261)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-04-26 11:58:11 +02:00
dependabot[bot]
3c2096db68 build(deps): bump azure-mgmt-security from 4.0.0 to 5.0.0 (#2270)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-25 11:59:30 +02:00
Pepe Fagoaga
58cad1a6b3 fix(log_group_retention): handle log groups that never expire (#2272) 2023-04-25 10:45:43 +02:00
dependabot[bot]
662e67ff16 build(deps): bump boto3 from 1.26.105 to 1.26.115 (#2269)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-25 10:35:15 +02:00
dependabot[bot]
8d577b872f build(deps-dev): bump moto from 4.1.7 to 4.1.8 (#2268)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-25 10:12:25 +02:00
dependabot[bot]
b55290f3cb build(deps-dev): bump pylint from 2.17.2 to 2.17.3 (#2267)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-25 09:20:15 +02:00
dependabot[bot]
e8d3eb7393 build(deps-dev): bump pytest from 7.3.0 to 7.3.1 (#2266)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-25 08:03:45 +02:00
Sergio Garcia
47fa16e35f chore(test): add CloudWatch and Logs tests (#2264) 2023-04-24 17:05:05 +02:00
Gabriel Soltz
a87f769b85 feat(DRS): New DRS Service and Checks (#2257)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-24 14:22:22 +02:00
Sergio Garcia
8e63fa4594 fix(version): execute check current version function only when -v (#2263) 2023-04-24 12:45:59 +02:00
Gabriel Soltz
63501a0d59 feat(inspector2): New Service and Check (#2250)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-04-24 12:15:16 +02:00
Sergio Garcia
828fb37ca8 chore(regions_update): Changes in regions for AWS services. (#2258)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-24 08:32:40 +02:00
Sergio Garcia
40f513d3b6 chore(regions_update): Changes in regions for AWS services. (#2251)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-21 12:10:15 +02:00
Sergio Garcia
f0b8b66a75 chore(test): add rds_instance_transport_encrypted test (#2252) 2023-04-21 12:09:47 +02:00
Sergio Garcia
d51cdc068b fix(iam_role_cross_service_confused_deputy_prevention): avoid service linked roles (#2249) 2023-04-21 10:42:05 +02:00
Sergio Garcia
f8b382e480 fix(version): update version to 3.4.0 (#2247) 2023-04-20 17:05:18 +02:00
Ronen Atias
1995f43b67 fix(redshift): correct description in redshift_cluster_automatic_upgrades (#2246) 2023-04-20 15:19:49 +02:00
Sergio Garcia
69e0392a8b fix(rds): exclude Aurora in rds_instance_transport_encrypted check (#2245) 2023-04-20 14:28:12 +02:00
Sergio Garcia
1f6319442e chore(docs): improve GCP docs (#2242) 2023-04-20 14:15:28 +02:00
Sergio Garcia
559c4c0c2c chore(regions_update): Changes in regions for AWS services. (#2243)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-20 11:43:02 +02:00
Sergio Garcia
feeb5b58d9 fix(checks): improve --list-checks function (#2240) 2023-04-19 17:00:20 +02:00
Sergio Garcia
7a00f79a56 fix(iam_policy_no_administrative_privileges): check attached policies and AWS-Managed (#2200)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-19 14:34:53 +02:00
Sergio Garcia
10d744704a fix(errors): solve ECR and CodeArtifact errors (#2239) 2023-04-19 13:27:19 +02:00
Gabriel Soltz
eee35f9cc3 feat(ssmincidents): New Service and Checks (#2219)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-19 12:26:20 +02:00
Gabriel Soltz
b3656761eb feat(check): New VPC checks (#2218) 2023-04-19 12:01:12 +02:00
Sergio Garcia
7b5fe34316 feat(html): add html to Azure and GCP (#2181)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-18 16:13:57 +02:00
Sergio Garcia
4536780a19 feat(check): new check ecr_registry_scan_images_on_push_enabled (#2237) 2023-04-18 15:45:21 +02:00
Sergio Garcia
05d866e6b3 chore(regions_update): Changes in regions for AWS services. (#2236)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-18 13:43:15 +02:00
dependabot[bot]
0d138cf473 build(deps): bump botocore from 1.29.105 to 1.29.115 (#2233)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-18 13:42:50 +02:00
dependabot[bot]
dbe539ac80 build(deps): bump boto3 from 1.26.90 to 1.26.105 (#2232)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-18 12:35:33 +02:00
dependabot[bot]
665a39d179 build(deps): bump azure-storage-blob from 12.15.0 to 12.16.0 (#2230)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-18 11:02:39 +02:00
dependabot[bot]
5fd5d8c8c5 build(deps-dev): bump coverage from 7.2.2 to 7.2.3 (#2234)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-18 08:03:44 +02:00
dependabot[bot]
2832b4564c build(deps-dev): bump moto from 4.1.6 to 4.1.7 (#2231)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-18 07:40:50 +02:00
dependabot[bot]
d4369a64ee build(deps): bump azure-mgmt-security from 3.0.0 to 4.0.0 (#2141)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-17 13:22:09 +02:00
Sergio Garcia
81fa1630b7 chore(regions_update): Changes in regions for AWS services. (#2227)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-17 11:18:41 +02:00
Sergio Garcia
a1c4b35205 chore(regions_update): Changes in regions for AWS services. (#2217)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-17 11:16:22 +02:00
Sergio Garcia
5e567f3e37 fix(iam tests): mock audit_info object (#2226)
Co-authored-by: n4ch04 <nachor1992@gmail.com>
2023-04-17 11:14:48 +02:00
Pepe Fagoaga
c4757684c1 fix(test): Mock audit into in SecurityHub CodeBuild (#2225) 2023-04-17 11:14:36 +02:00
Sergio Garcia
a55a6bf94b fix(test): Mock audit info in EC2 (#2224) 2023-04-17 10:54:56 +02:00
Pepe Fagoaga
fa1792eb77 fix(test): Mock audit into in CloudWatch (#2223) 2023-04-17 10:54:01 +02:00
Nacho Rivera
93a8f6e759 fix(rds tests): mocked audit_info object (#2222) 2023-04-17 10:06:25 +02:00
Nacho Rivera
4a614855d4 fix(s3 tests): audit_info object mocked (#2221) 2023-04-17 10:04:28 +02:00
Pepe Fagoaga
8bdd47f912 fix(test): Mock audit info in KMS (#2215) 2023-04-14 14:34:55 +02:00
Nacho Rivera
f9e82abadc fix(vpc tests): mock current_audit_info (#2214) 2023-04-14 14:31:34 +02:00
Gabriel Soltz
428fda81e2 feat(check): New GuardDuty check guardduty_centrally_managed (#2195)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-14 14:30:51 +02:00
Pepe Fagoaga
29c9ad602d fix(test): Mock audit into in Macie (#2213) 2023-04-14 14:29:19 +02:00
Pepe Fagoaga
44458e2a97 fix(test): Mock audit info codeartifact-config-ds (#2210) 2023-04-14 14:25:45 +02:00
Pepe Fagoaga
861fb1f54b fix(test): Mock audit into in Glacier (#2212) 2023-04-14 14:20:03 +02:00
Pepe Fagoaga
02534f4d55 fix(test): Mock audit info DynamoDB (#2211) 2023-04-14 14:19:08 +02:00
Pepe Fagoaga
5532cb95a2 fix(test): Mock audit info in appstream and autoscaling (#2209) 2023-04-14 14:06:07 +02:00
Pepe Fagoaga
9176e43fc9 fix(test): Mock audit info API Gateway (#2208) 2023-04-14 13:49:38 +02:00
Pepe Fagoaga
cb190f54fc fix(elb-test): Use a mocked current audit info (#2207) 2023-04-14 12:43:08 +02:00
Sergio Garcia
4be2539bc2 fix(resourceexplorer2): solve test and region (#2206) 2023-04-14 12:33:52 +02:00
Sergio Garcia
291e2adffa chore(regions_update): Changes in regions for AWS services. (#2205)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-14 12:32:58 +02:00
Gabriel Soltz
fa2ec63f45 feat(check): New Check and Service: resourceexplorer2_indexes_found (#2196)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-04-14 10:18:36 +02:00
Nacho Rivera
946c943457 fix(global services): fixed global services region (#2203)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-14 09:57:33 +02:00
Pepe Fagoaga
0e50766d6e fix(test): call cloudtrail_s3_dataevents_write_enabled check (#2204) 2023-04-14 09:35:29 +02:00
Sergio Garcia
58a1610ae0 chore(regions_update): Changes in regions for AWS services. (#2201)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-13 15:53:56 +02:00
Nacho Rivera
06dc21168a feat(orgs checks region): added region to all orgs checks (#2202) 2023-04-13 14:41:18 +02:00
Gabriel Soltz
305b67fbed feat(check): New check cloudtrail_bucket_requires_mfa_delete (#2194)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-13 14:18:31 +02:00
Sergio Garcia
4da6d152c3 feat(custom checks): add -x/--checks-folder for custom checks (#2191) 2023-04-13 13:44:25 +02:00
Sergio Garcia
25630f1ef5 chore(regions): sort AWS regions (#2198)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-12 13:24:14 +02:00
Sergio Garcia
9b01e3f1c9 chore(regions_update): Changes in regions for AWS services. (#2197)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-12 12:53:03 +02:00
Sergio Garcia
99450400eb chore(regions_update): Changes in regions for AWS services. (#2189)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-12 10:47:21 +02:00
Gabriel Soltz
2f8a8988d7 feat(checks): New IAM Checks no full access to critical services (#2183) 2023-04-12 07:47:21 +02:00
Sergio Garcia
9104d2e89e fix(kms): handle empty principal error (#2192) 2023-04-11 16:59:29 +02:00
Gabriel Soltz
e75022763c feat(checks): New iam_securityaudit_role_created (#2182) 2023-04-11 14:15:39 +02:00
Gabriel Soltz
f0f3fb337d feat(check): New CloudTrail check cloudtrail_insights_exist (#2184) 2023-04-11 13:49:54 +02:00
dependabot[bot]
f7f01a34c2 build(deps): bump google-api-python-client from 2.81.0 to 2.84.0 (#2188)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-11 12:13:41 +02:00
dependabot[bot]
f9f9ff0cb8 build(deps): bump alive-progress from 3.1.0 to 3.1.1 (#2187)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-11 08:13:17 +02:00
dependabot[bot]
522ba05ba8 build(deps): bump mkdocs-material from 9.1.5 to 9.1.6 (#2186)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-11 07:54:41 +02:00
Gabriel Soltz
f4f4093466 feat(backup): New backup service and checks (#2172)
Co-authored-by: Nacho Rivera <nacho@verica.io>
2023-04-11 07:43:40 +02:00
dependabot[bot]
2e16ab0c2c build(deps-dev): bump pytest from 7.2.2 to 7.3.0 (#2185)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-11 07:39:09 +02:00
Sergio Garcia
6f02606fb7 fix(iam): handle no display name error in service account (#2176) 2023-04-10 12:06:08 +02:00
Sergio Garcia
df40142b51 chore(regions_update): Changes in regions for AWS services. (#2180)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-10 12:05:48 +02:00
Sergio Garcia
cc290d488b chore(regions_update): Changes in regions for AWS services. (#2178)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-10 12:05:30 +02:00
Nacho Rivera
64328218fc feat(banner): azure credential banner (#2179) 2023-04-10 09:58:28 +02:00
Sergio Garcia
8d1356a085 fix(logging): add default resource id when no resources (#2177) 2023-04-10 08:02:40 +02:00
Sergio Garcia
4f39dd0f73 fix(version): handle request response property (#2175)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-05 15:17:30 +02:00
Pepe Fagoaga
54ffc8ae45 chore(release): 3.3.4 (#2174) 2023-04-05 14:18:07 +02:00
Sergio Garcia
78ab1944bd chore(regions_update): Changes in regions for AWS services. (#2173)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-05 12:32:25 +02:00
dependabot[bot]
434cf94657 build(deps-dev): bump moto from 4.1.5 to 4.1.6 (#2164)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-04-05 12:31:58 +02:00
Nacho Rivera
dcb893e230 fix(elbv2 desync check): Mixed elbv2 desync and smuggling (#2171) 2023-04-05 11:36:06 +02:00
Sergio Garcia
ce4fadc378 chore(regions_update): Changes in regions for AWS services. (#2170)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-05 08:47:19 +02:00
dependabot[bot]
5683d1b1bd build(deps): bump botocore from 1.29.100 to 1.29.105 (#2163)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-04 13:24:03 +02:00
dependabot[bot]
0eb88d0c10 build(deps): bump mkdocs-material from 9.1.4 to 9.1.5 (#2162)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-04 11:07:41 +02:00
Nacho Rivera
eb1367e54d fix(pipeline build): fixed wording when build and push (#2169) 2023-04-04 10:21:28 +02:00
dependabot[bot]
33a4786206 build(deps-dev): bump pylint from 2.17.0 to 2.17.2 (#2161)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-04-04 09:35:10 +02:00
Pepe Fagoaga
8c6606ad95 fix(dax): Call list_tags using the cluster ARN (#2167) 2023-04-04 09:30:36 +02:00
Pepe Fagoaga
cde9519a76 fix(iam): Handle LimitExceededException when calling generate_credential_report (#2168) 2023-04-04 09:29:27 +02:00
Pepe Fagoaga
7b2e0d79cb fix(cloudformation): Handle ValidationError (#2166) 2023-04-04 09:28:11 +02:00
Pepe Fagoaga
5b0da8e92a fix(rds): Handle DBSnapshotNotFound (#2165) 2023-04-04 09:27:36 +02:00
Michael Göhler
0126d2f77c fix(secretsmanager_automatic_rotation_enabled): Improve description for Secrets Manager secret rotation (#2156) 2023-04-03 11:01:29 +02:00
Sergio Garcia
0b436014c9 chore(regions_update): Changes in regions for AWS services. (#2159)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-04-03 11:01:15 +02:00
Igor Ceron
2cb7f223ed fix(docs): check extra_742 name adjusted in the V2 to V3 mapping (#2154) 2023-03-31 12:54:13 +02:00
Sergio Garcia
eca551ed98 chore(regions_update): Changes in regions for AWS services. (#2155)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-31 12:53:49 +02:00
Gabriel Soltz
608fd92861 feat(new_checks): New AWS Organizations related checks (#2133)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-03-30 17:36:23 +02:00
Sergio Garcia
e37d8fe45f chore(release): update Prowler Version to 3.3.2 (#2150)
Co-authored-by: github-actions <noreply@github.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-03-30 11:33:33 +02:00
Sergio Garcia
4cce91ec97 chore(regions_update): Changes in regions for AWS services. (#2153)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-30 11:29:00 +02:00
Pepe Fagoaga
72fdde35dc fix(pypi): Set base branch when updating release version (#2152) 2023-03-30 10:59:58 +02:00
Pepe Fagoaga
d425187778 fix(pypi): Build from release branch (#2151) 2023-03-30 10:14:49 +02:00
Sergio Garcia
e419aa1f1a chore(regions_update): Changes in regions for AWS services. (#2149)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-29 11:45:35 +02:00
Pepe Fagoaga
5506547f7f fix(ssm): Handle ValidationException when retrieving documents (#2146) 2023-03-29 09:16:52 +02:00
Nacho Rivera
568ed72b3e fix(audit_info): azure subscriptions parsing error (#2147) 2023-03-29 09:15:53 +02:00
Nacho Rivera
e8cc0e6684 fix(delete check): delete check ec2_securitygroup_in_use_without_ingress_filtering (#2148) 2023-03-29 09:13:43 +02:00
Sergio Garcia
4331f69395 chore(regions_update): Changes in regions for AWS services. (#2145)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-28 13:08:02 +02:00
dependabot[bot]
7cc67ae7cb build(deps): bump botocore from 1.29.90 to 1.29.100 (#2142)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-28 13:07:23 +02:00
dependabot[bot]
244b3438fc build(deps): bump mkdocs-material from 9.1.3 to 9.1.4 (#2140)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-28 12:39:00 +02:00
Nacho Rivera
1a741f7ca0 fix(azure output): change default values of audit identity metadata (#2144) 2023-03-28 10:42:47 +02:00
dependabot[bot]
1447800e2b build(deps): bump pydantic from 1.10.6 to 1.10.7 (#2139)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-28 10:41:09 +02:00
Sergio Garcia
f968fe7512 fix(readme): add GCP provider to README introduction (#2143) 2023-03-28 10:40:56 +02:00
dependabot[bot]
0a2349fad7 build(deps): bump alive-progress from 3.0.1 to 3.1.0 (#2138)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-28 09:55:18 +02:00
Sergio Garcia
941b8cbc1e chore(docs): Developer Guide - how to create a new check (#2137) 2023-03-27 20:20:13 +02:00
Pepe Fagoaga
3b7b16acfd fix(resource_not_found): Handle error (#2136) 2023-03-27 17:27:50 +02:00
Nacho Rivera
fbc7bb68fc feat(defender service): retrieving key dicts with get (#2129) 2023-03-27 17:13:11 +02:00
Pepe Fagoaga
0d16880596 fix(s3): handle if ignore_public_acls is None (#2128) 2023-03-27 17:00:20 +02:00
Sergio Garcia
3b5218128f fix(brew): move brew formula action to the bottom (#2135) 2023-03-27 11:24:28 +02:00
Pepe Fagoaga
cb731bf1db fix(aws_provider): Fix assessment session name (#2132) 2023-03-25 00:11:16 +01:00
Sergio Garcia
7c4d6eb02d fix(gcp): handle error when Project ID is None (#2130) 2023-03-24 18:30:33 +01:00
Sergio Garcia
c14e7fb17a feat(gcp): add Google Cloud provider with 43 checks (#2125) 2023-03-24 13:38:41 +01:00
Sergio Garcia
fe57811bc5 chore(regions_update): Changes in regions for AWS services. (#2126)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-24 10:18:33 +01:00
Sergio Garcia
e073b48f7d chore(regions_update): Changes in regions for AWS services. (#2123)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-23 15:58:47 +01:00
Ben Nugent
a9df609593 fix(quickinventory): AttributError when creating inventory table (#2122) 2023-03-23 10:22:14 +01:00
Sergio Garcia
6c3db9646e fix(output bucket): solve IsADirectoryError using compliance flag (#2121) 2023-03-22 13:38:41 +01:00
Sergio Garcia
ff9c4c717e chore(regions_update): Changes in regions for AWS services. (#2120)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-22 12:18:44 +01:00
Sergio Garcia
182374b46f docs: improve reporting documentation (#2119) 2023-03-22 10:02:52 +01:00
Sergio Garcia
0871cda526 docs: improve quick inventory section (#2117) 2023-03-21 18:09:40 +01:00
Toni de la Fuente
1b47cba37a docs(developer-guide): added phase 1 of the developer guide (#1904)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-03-21 15:35:26 +01:00
Pepe Fagoaga
e5bef36905 docs: Remove list severities (#2116) 2023-03-21 14:18:07 +01:00
Sergio Garcia
706d723703 chore(version): check latest version (#2106) 2023-03-21 11:16:13 +01:00
Sergio Garcia
51eacbfac5 feat(allowlist): add tags filter to allowlist (#2105) 2023-03-21 11:14:59 +01:00
dependabot[bot]
5c2a411982 build(deps): bump boto3 from 1.26.86 to 1.26.90 (#2114)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-21 11:04:26 +01:00
Sergio Garcia
08d65cbc41 chore(regions_update): Changes in regions for AWS services. (#2115)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-21 11:03:54 +01:00
dependabot[bot]
9d2bf429c1 build(deps): bump mkdocs-material from 9.1.2 to 9.1.3 (#2113)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-21 10:18:36 +01:00
dependabot[bot]
d34f863bd4 build(deps-dev): bump moto from 4.1.4 to 4.1.5 (#2111)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-03-21 09:27:44 +01:00
Sergio Garcia
b4abf1c2c7 chore(regions_update): Changes in regions for AWS services. (#2104)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-21 08:32:26 +01:00
dependabot[bot]
68baaf589e build(deps-dev): bump coverage from 7.2.1 to 7.2.2 (#2112)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-21 08:18:47 +01:00
dependabot[bot]
be74e41d84 build(deps-dev): bump openapi-spec-validator from 0.5.5 to 0.5.6 (#2110)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-21 07:52:50 +01:00
Sergio Garcia
848122b0ec chore(release): update Prowler Version to 3.3.0 (#2102)
Co-authored-by: github-actions <noreply@github.com>
2023-03-16 22:30:02 +01:00
Nacho Rivera
0edcb7c0d9 fix(ulimit check): try except when checking ulimit (#2096)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-03-16 17:39:46 +01:00
Pepe Fagoaga
cc58e06b5e fix(providers): Move provider's logic outside main (#2043)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-03-16 17:32:53 +01:00
Sergio Garcia
0d6ca606ea fix(ec2_securitygroup_allow_wide_open_public_ipv4): correct check title (#2101) 2023-03-16 17:25:32 +01:00
Sergio Garcia
75ee93789f chore(regions_update): Changes in regions for AWS services. (#2095)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-16 17:14:40 +01:00
Sergio Garcia
05daddafbf feat(SecurityHub): add compliance details to Security Hub findings (#2100) 2023-03-16 17:11:55 +01:00
Nacho Rivera
7bbce6725d fix(ulimit check): test only when platform is not windows (#2094) 2023-03-16 08:38:37 +01:00
Nacho Rivera
789b211586 feat(lambda_cloudtrail check): improved logic and status extended (#2092) 2023-03-15 12:32:58 +01:00
Sergio Garcia
826a043748 chore(regions_update): Changes in regions for AWS services. (#2091)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-15 12:28:03 +01:00
Sergio Garcia
6761048298 fix(cloudwatch): solve inexistent filterPattern error (#2087) 2023-03-14 14:46:34 +01:00
Sergio Garcia
738fc9acad feat(compliance): add compliance field to HTML, CSV and JSON outputs including frameworks and reqs (#2060)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-03-14 14:20:46 +01:00
Sergio Garcia
43c0540de7 chore(regions_update): Changes in regions for AWS services. (#2085)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-14 13:11:02 +01:00
Sergio Garcia
2d1c3d8121 fix(emr): solve emr_cluster_publicly_accesible error (#2086) 2023-03-14 13:10:21 +01:00
dependabot[bot]
f48a5c650d build(deps-dev): bump pytest-xdist from 3.2.0 to 3.2.1 (#2084)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-14 10:21:17 +01:00
dependabot[bot]
66c18eddb8 build(deps): bump botocore from 1.29.86 to 1.29.90 (#2083)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-14 10:01:23 +01:00
dependabot[bot]
fdd2ee6365 build(deps-dev): bump bandit from 1.7.4 to 1.7.5 (#2082)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-14 09:03:46 +01:00
dependabot[bot]
c207f60ad8 build(deps): bump pydantic from 1.10.5 to 1.10.6 (#2081)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-14 08:02:28 +01:00
dependabot[bot]
0eaa95c8c0 build(deps): bump mkdocs-material from 9.1.1 to 9.1.2 (#2080)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-14 07:48:02 +01:00
Pepe Fagoaga
df2fca5935 fix(bug_report): typo in bug reporting template (#2078)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2023-03-13 18:42:34 +01:00
Toni de la Fuente
dcaf5d9c7d update(docs): update readme with new ECR alias (#2079) 2023-03-13 18:07:51 +01:00
Sergio Garcia
0112969a97 fix(compliance): add check to 2.1.5 CIS (#2077) 2023-03-13 09:25:51 +01:00
Sergio Garcia
3ec0f3d69c chore(regions_update): Changes in regions for AWS services. (#2075)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-13 07:51:13 +01:00
Pepe Fagoaga
5555d300a1 fix(bug_report): Update wording (#2074) 2023-03-10 12:21:51 +01:00
Nacho Rivera
8155ef4b60 feat(templates): New versions of issues and fr templates (#2072) 2023-03-10 10:32:17 +01:00
Sergio Garcia
a12402f6c8 chore(regions_update): Changes in regions for AWS services. (#2073)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-10 10:27:29 +01:00
Sergio Garcia
cf28b814cb fix(ec2): avoid terminated instances (#2063) 2023-03-10 08:11:35 +01:00
Pepe Fagoaga
b05f67db19 chore(actions): Missing cache in the PR (#2067) 2023-03-09 11:50:49 +01:00
Pepe Fagoaga
260f4659d5 chore(actions): Use GHA cache (#2066) 2023-03-09 10:29:16 +01:00
dependabot[bot]
9e700f298c build(deps-dev): bump pylint from 2.16.4 to 2.17.0 (#2062)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-08 15:41:22 +01:00
dependabot[bot]
56510734c4 build(deps): bump boto3 from 1.26.85 to 1.26.86 (#2061)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-08 15:14:18 +01:00
Pepe Fagoaga
3938a4d14e chore(dependabot): Change to weekly (#2057) 2023-03-08 14:41:34 +01:00
Sergio Garcia
fa3b9eeeaf chore(regions_update): Changes in regions for AWS services. (#2058)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-08 14:38:56 +01:00
dependabot[bot]
eb9d6fa25c build(deps): bump botocore from 1.29.85 to 1.29.86 (#2054)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-08 09:57:44 +01:00
Alex Nelson
b53307c1c2 docs: Corrected spelling mistake in multiacount (#2056) 2023-03-08 09:57:08 +01:00
dependabot[bot]
c3fc708a66 build(deps): bump boto3 from 1.26.82 to 1.26.85 (#2053)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-08 09:03:00 +01:00
Sergio Garcia
b34ffbe6d0 feat(inventory): add tags to quick inventory (#2051) 2023-03-07 14:20:50 +01:00
Sergio Garcia
f364315e48 chore(iam): update Prowler permissions (#2050) 2023-03-07 14:14:31 +01:00
Sergio Garcia
3ddb5a13a5 fix(ulimit): handle low ulimit OSError (#2042)
Co-authored-by: Toni de la Fuente <toni@blyx.com>
2023-03-07 13:19:24 +01:00
dependabot[bot]
a24cc399a4 build(deps-dev): bump moto from 4.1.3 to 4.1.4 (#2045)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-03-07 12:45:50 +01:00
Sergio Garcia
305f4b2688 chore(regions_update): Changes in regions for AWS services. (#2049)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-07 11:27:28 +01:00
dependabot[bot]
9823171d65 build(deps-dev): bump pylint from 2.16.3 to 2.16.4 (#2048)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-07 10:11:19 +01:00
dependabot[bot]
4761bd8fda build(deps): bump mkdocs-material from 9.1.0 to 9.1.1 (#2047)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-07 09:33:19 +01:00
dependabot[bot]
9c22698723 build(deps-dev): bump pytest from 7.2.1 to 7.2.2 (#2046)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-07 08:32:19 +01:00
dependabot[bot]
e3892bbcc6 build(deps): bump botocore from 1.29.84 to 1.29.85 (#2044)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-07 08:18:53 +01:00
Sergio Garcia
629b156f52 fix(quick inventory): add non-tagged s3 buckets to inventory (#2041) 2023-03-06 16:55:03 +01:00
Gary Mclean
c45dd47d34 fix(windows-path): --list-services bad split (#2028)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
2023-03-06 14:00:07 +01:00
Sergio Garcia
ef8831f784 feat(quick_inventory): add regions to inventory table (#2026) 2023-03-06 13:41:30 +01:00
Sergio Garcia
c5a42cf5de feat(rds_instance_transport_encrypted): add new check (#1963)
Co-authored-by: Toni de la Fuente <toni@blyx.com>
2023-03-06 13:18:41 +01:00
dependabot[bot]
90ebbfc20f build(deps-dev): bump pylint from 2.16.2 to 2.16.3 (#2038)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-06 13:18:26 +01:00
Fennerr
17cd0dc91d feat(new_check): cloudwatch_log_group_no_secrets_in_logs (#1980)
Co-authored-by: Sergio Garcia <sergargar1@gmail.com>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
Co-authored-by: Jeffrey Souza <JeffreySouza@users.noreply.github.com>
2023-03-06 12:16:46 +01:00
dependabot[bot]
fa1f42af59 build(deps): bump botocore from 1.29.82 to 1.29.84 (#2037)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-06 12:14:48 +01:00
Sergio Garcia
f45ea1ab53 fix(check): change cloudformation_outputs_find_secrets name (#2027) 2023-03-06 12:11:58 +01:00
Sergio Garcia
0dde3fe483 chore(poetry): add poetry checks to pre-commit (#2040) 2023-03-06 11:44:04 +01:00
dependabot[bot]
277dc7dd09 build(deps-dev): bump freezegun from 1.2.1 to 1.2.2 (#2033)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-06 11:06:23 +01:00
dependabot[bot]
3215d0b856 build(deps-dev): bump coverage from 7.1.0 to 7.2.1 (#2032)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-06 09:55:19 +01:00
dependabot[bot]
0167d5efcd build(deps): bump mkdocs-material from 9.0.15 to 9.1.0 (#2031)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-06 09:15:44 +01:00
Sergio Garcia
b48ac808a6 chore(regions_update): Changes in regions for AWS services. (#2035)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-03 10:14:20 +01:00
dependabot[bot]
616524775c build(deps-dev): bump docker from 6.0.0 to 6.0.1 (#2030)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-03 10:02:11 +01:00
dependabot[bot]
5832849b11 build(deps): bump boto3 from 1.26.81 to 1.26.82 (#2029)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-03 09:43:43 +01:00
Sergio Garcia
467c5d01e9 fix(cloudtrail): list tags only in owned trails (#2025) 2023-03-02 16:16:19 +01:00
Sergio Garcia
24711a2f39 feat(tags): add resource tags to S-W services (#2020) 2023-03-02 14:21:05 +01:00
Nacho Rivera
24e8286f35 feat(): 7 chars in dispatch commit message (#2024) 2023-03-02 14:20:31 +01:00
Sergio Garcia
e8a1378ad0 feat(tags): add resource tags to G-R services (#2009) 2023-03-02 13:56:22 +01:00
Sergio Garcia
76bb418ea9 feat(tags): add resource tags to E services (#2007)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-03-02 13:55:26 +01:00
Nacho Rivera
cd8770a3e3 fix(actions): fixed dispatch commit message (#2023) 2023-03-02 13:55:03 +01:00
Sergio Garcia
da834c0935 feat(tags): add resource tags to C-D services (#2003)
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
2023-03-02 13:14:53 +01:00
Nacho Rivera
024ffb1117 fix(head): Pass head commit to dispatch action (#2022) 2023-03-02 12:06:41 +01:00
Nacho Rivera
eed7ab9793 fix(iam): refactor IAM service (#2010) 2023-03-02 11:16:05 +01:00
Sergio Garcia
032feb343f feat(tags): add resource tags in A services (#1997) 2023-03-02 10:59:49 +01:00
Pepe Fagoaga
eabccba3fa fix(actions): push should be true (#2019) 2023-03-02 10:37:29 +01:00
Nacho Rivera
d86d656316 feat(dispatch): add tag info to dispatch (#2002) 2023-03-02 10:31:30 +01:00
Sergio Garcia
fa73c91b0b chore(regions_update): Changes in regions for AWS services. (#2018)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-02 10:23:59 +01:00
Pepe Fagoaga
2eee50832d fix(actions): Stop using github storage (#2016) 2023-03-02 10:23:04 +01:00
Toni de la Fuente
b40736918b docs(install): Add brew and github installation to quick start (#1991) 2023-03-02 10:21:57 +01:00
Sergio Garcia
ffb1a2e30f chore(regions_update): Changes in regions for AWS services. (#1995)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-02 10:21:41 +01:00
Sergio Garcia
d6c3c0c6c1 feat(s3_bucket_level_public_access_block): new check (#1953) 2023-03-02 10:18:27 +01:00
dependabot[bot]
ee251721ac build(deps): bump botocore from 1.29.81 to 1.29.82 (#2015)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-02 09:53:24 +01:00
dependabot[bot]
fdbb9195d5 build(deps-dev): bump moto from 4.1.2 to 4.1.3 (#2014)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-02 09:23:48 +01:00
dependabot[bot]
c68b08d9af build(deps-dev): bump black from 22.10.0 to 22.12.0 (#2013)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-02 08:59:18 +01:00
dependabot[bot]
3653bbfca0 build(deps-dev): bump flake8 from 5.0.4 to 6.0.0 (#2012)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-02 08:32:41 +01:00
dependabot[bot]
05c7cc7277 build(deps): bump boto3 from 1.26.80 to 1.26.81 (#2011)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-02 07:54:33 +01:00
Sergio Garcia
5670bf099b chore(regions_update): Changes in regions for AWS services. (#2006)
Co-authored-by: sergargar <sergargar@users.noreply.github.com>
2023-03-01 10:16:58 +01:00
Nacho Rivera
0c324b0f09 fix(awslambdacloudtrail): include advanced event and all lambdas in check (#1994) 2023-03-01 10:04:06 +01:00
dependabot[bot]
968557e38e build(deps): bump botocore from 1.29.80 to 1.29.81 (#2005)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-01 08:59:54 +01:00
dependabot[bot]
882cdebacb build(deps): bump boto3 from 1.26.79 to 1.26.80 (#2004) 2023-03-01 08:40:41 +01:00
Sergio Garcia
07753e1774 feat(encryption): add new encryption category (#1999) 2023-02-28 13:42:11 +01:00
Pepe Fagoaga
5b984507fc fix(emr): KeyError EmrManagedSlaveSecurityGroup (#2000) 2023-02-28 13:41:58 +01:00
Sergio Garcia
27df481967 chore(metadata): remove tags from metadata (#1998) 2023-02-28 12:27:59 +01:00
dependabot[bot]
0943031f23 build(deps): bump mkdocs-material from 9.0.14 to 9.0.15 (#1993)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-02-28 11:02:59 +01:00
dependabot[bot]
2d95168de0 build(deps): bump botocore from 1.29.79 to 1.29.80 (#1992)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-02-28 10:46:25 +01:00
Sergio Garcia
97cae8f92c chore(brew): bump new version to brew (#1990) 2023-02-27 18:07:05 +01:00
github-actions
eb213bac92 chore(release): 3.2.4 2023-02-27 14:25:52 +01:00
1837 changed files with 106255 additions and 25057 deletions

View File

@@ -1,52 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: "[Bug]: "
labels: bug, status/needs-triage
assignees: ''
---
<!--
Please use this template to create your bug report. By providing as much info as possible you help us understand the issue, reproduce it and resolve it for you quicker. Therefore, take a couple of extra minutes to make sure you have provided all info needed.
PROTIP: record your screen and attach it as a gif to showcase the issue.
- How to record and attach gif: https://bit.ly/2Mi8T6K
-->
**What happened?**
A clear and concise description of what the bug is or what is not working as expected
**How to reproduce it**
Steps to reproduce the behavior:
1. What command are you running?
2. Cloud provider you are launching
3. Environment you have like single account, multi-account, organizations, multi or single subsctiption, etc.
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots or Logs**
If applicable, add screenshots to help explain your problem.
Also, you can add logs (anonymize them first!). Here a command that may help to share a log
`prowler <your arguments> --log-level DEBUG --log-file $(date +%F)_debug.log` then attach here the log file.
**From where are you running Prowler?**
Please, complete the following information:
- Resource: (e.g. EC2 instance, Fargate task, Docker container manually, EKS, Cloud9, CodeBuild, workstation, etc.)
- OS: [e.g. Amazon Linux 2, Mac, Alpine, Windows, etc. ]
- Prowler Version [`prowler --version`]:
- Python version [`python --version`]:
- Pip version [`pip --version`]:
- Installation method (Are you running it from pip package or cloning the github repo?):
- Others:
**Additional context**
Add any other context about the problem here.

97
.github/ISSUE_TEMPLATE/bug_report.yml vendored Normal file
View File

@@ -0,0 +1,97 @@
name: 🐞 Bug Report
description: Create a report to help us improve
title: "[Bug]: "
labels: ["bug", "status/needs-triage"]
body:
- type: textarea
id: reproduce
attributes:
label: Steps to Reproduce
description: Steps to reproduce the behavior
placeholder: |-
1. What command are you running?
2. Cloud provider you are launching
3. Environment you have, like single account, multi-account, organizations, multi or single subscription, etc.
4. See error
validations:
required: true
- type: textarea
id: expected
attributes:
label: Expected behavior
description: A clear and concise description of what you expected to happen.
validations:
required: true
- type: textarea
id: actual
attributes:
label: Actual Result with Screenshots or Logs
description: If applicable, add screenshots to help explain your problem. Also, you can add logs (anonymize them first!). Here a command that may help to share a log `prowler <your arguments> --log-level DEBUG --log-file $(date +%F)_debug.log` then attach here the log file.
validations:
required: true
- type: dropdown
id: type
attributes:
label: How did you install Prowler?
options:
- Cloning the repository from github.com (git clone)
- From pip package (pip install prowler)
- From brew (brew install prowler)
- Docker (docker pull toniblyx/prowler)
validations:
required: true
- type: textarea
id: environment
attributes:
label: Environment Resource
description: From where are you running Prowler?
placeholder: |-
1. EC2 instance
2. Fargate task
3. Docker container locally
4. EKS
5. Cloud9
6. CodeBuild
7. Workstation
8. Other(please specify)
validations:
required: true
- type: textarea
id: os
attributes:
label: OS used
description: Which OS are you using?
placeholder: |-
1. Amazon Linux 2
2. MacOS
3. Alpine Linux
4. Windows
5. Other(please specify)
validations:
required: true
- type: input
id: prowler-version
attributes:
label: Prowler version
description: Which Prowler version are you using?
placeholder: |-
prowler --version
validations:
required: true
- type: input
id: pip-version
attributes:
label: Pip version
description: Which pip version are you using?
placeholder: |-
pip --version
validations:
required: true
- type: textarea
id: additional
attributes:
description: Additional context
label: Context
validations:
required: false

View File

@@ -0,0 +1,36 @@
name: 💡 Feature Request
description: Suggest an idea for this project
labels: ["enhancement", "status/needs-triage"]
body:
- type: textarea
id: Problem
attributes:
label: New feature motivation
description: Is your feature request related to a problem? Please describe
placeholder: |-
1. A clear and concise description of what the problem is. Ex. I'm always frustrated when
validations:
required: true
- type: textarea
id: Solution
attributes:
label: Solution Proposed
description: A clear and concise description of what you want to happen.
validations:
required: true
- type: textarea
id: Alternatives
attributes:
label: Describe alternatives you've considered
description: A clear and concise description of any alternative solutions or features you've considered.
validations:
required: true
- type: textarea
id: Context
attributes:
label: Additional context
description: Add any other context or screenshots about the feature request here.
validations:
required: false

View File

@@ -1,20 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement, status/needs-triage
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@@ -8,8 +8,13 @@ updates:
- package-ecosystem: "pip" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "daily"
interval: "weekly"
target-branch: master
labels:
- "dependencies"
- "pip"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
target-branch: master

View File

@@ -15,183 +15,102 @@ on:
env:
AWS_REGION_STG: eu-west-1
AWS_REGION_PLATFORM: eu-west-1
AWS_REGION_PRO: us-east-1
AWS_REGION: us-east-1
IMAGE_NAME: prowler
LATEST_TAG: latest
STABLE_TAG: stable
TEMPORARY_TAG: temporary
DOCKERFILE_PATH: ./Dockerfile
PYTHON_VERSION: 3.9
jobs:
# Lint Dockerfile using Hadolint
# dockerfile-linter:
# runs-on: ubuntu-latest
# steps:
# -
# name: Checkout
# uses: actions/checkout@v3
# -
# name: Install Hadolint
# run: |
# VERSION=$(curl --silent "https://api.github.com/repos/hadolint/hadolint/releases/latest" | \
# grep '"tag_name":' | \
# sed -E 's/.*"v([^"]+)".*/\1/' \
# ) && curl -L -o /tmp/hadolint https://github.com/hadolint/hadolint/releases/download/v${VERSION}/hadolint-Linux-x86_64 \
# && chmod +x /tmp/hadolint
# -
# name: Run Hadolint
# run: |
# /tmp/hadolint util/Dockerfile
# Build Prowler OSS container
container-build:
container-build-push:
# needs: dockerfile-linter
runs-on: ubuntu-latest
env:
POETRY_VIRTUALENVS_CREATE: "false"
steps:
- name: Checkout
uses: actions/checkout@v3
- name: setup python (release)
uses: actions/checkout@v4
- name: Setup python (release)
if: github.event_name == 'release'
uses: actions/setup-python@v2
uses: actions/setup-python@v5
with:
python-version: 3.9 #install the python needed
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies (release)
if: github.event_name == 'release'
run: |
pipx install poetry
pipx inject poetry poetry-bumpversion
- name: Update Prowler version (release)
if: github.event_name == 'release'
run: |
poetry version ${{ github.event.release.tag_name }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build
uses: docker/build-push-action@v2
with:
# Use local context to get changes
# https://github.com/docker/build-push-action#path-context
context: .
# Without pushing to registries
push: false
tags: ${{ env.IMAGE_NAME }}:${{ env.TEMPORARY_TAG }}
file: ${{ env.DOCKERFILE_PATH }}
outputs: type=docker,dest=/tmp/${{ env.IMAGE_NAME }}.tar
- name: Share image between jobs
uses: actions/upload-artifact@v2
with:
name: ${{ env.IMAGE_NAME }}.tar
path: /tmp/${{ env.IMAGE_NAME }}.tar
# Lint Prowler OSS container using Dockle
# container-linter:
# needs: container-build
# runs-on: ubuntu-latest
# steps:
# -
# name: Get container image from shared
# uses: actions/download-artifact@v2
# with:
# name: ${{ env.IMAGE_NAME }}.tar
# path: /tmp
# -
# name: Load Docker image
# run: |
# docker load --input /tmp/${{ env.IMAGE_NAME }}.tar
# docker image ls -a
# -
# name: Install Dockle
# run: |
# VERSION=$(curl --silent "https://api.github.com/repos/goodwithtech/dockle/releases/latest" | \
# grep '"tag_name":' | \
# sed -E 's/.*"v([^"]+)".*/\1/' \
# ) && curl -L -o dockle.deb https://github.com/goodwithtech/dockle/releases/download/v${VERSION}/dockle_${VERSION}_Linux-64bit.deb \
# && sudo dpkg -i dockle.deb && rm dockle.deb
# -
# name: Run Dockle
# run: dockle ${{ env.IMAGE_NAME }}:${{ env.TEMPORARY_TAG }}
# Push Prowler OSS container to registries
container-push:
# needs: container-linter
needs: container-build
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read # This is required for actions/checkout
steps:
- name: Get container image from shared
uses: actions/download-artifact@v2
with:
name: ${{ env.IMAGE_NAME }}.tar
path: /tmp
- name: Load Docker image
run: |
docker load --input /tmp/${{ env.IMAGE_NAME }}.tar
docker image ls -a
- name: Login to DockerHub
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to Public ECR
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: public.ecr.aws
username: ${{ secrets.PUBLIC_ECR_AWS_ACCESS_KEY_ID }}
password: ${{ secrets.PUBLIC_ECR_AWS_SECRET_ACCESS_KEY }}
env:
AWS_REGION: ${{ env.AWS_REGION_PRO }}
AWS_REGION: ${{ env.AWS_REGION }}
- name: Tag (latest)
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build and push container image (latest)
if: github.event_name == 'push'
run: |
docker tag ${{ env.IMAGE_NAME }}:${{ env.TEMPORARY_TAG }} ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
docker tag ${{ env.IMAGE_NAME }}:${{ env.TEMPORARY_TAG }} ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
- # Push to master branch - push "latest" tag
name: Push (latest)
if: github.event_name == 'push'
run: |
docker push ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
docker push ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
- # Tag the new release (stable and release tag)
name: Tag (release)
if: github.event_name == 'release'
run: |
docker tag ${{ env.IMAGE_NAME }}:${{ env.TEMPORARY_TAG }} ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ github.event.release.tag_name }}
docker tag ${{ env.IMAGE_NAME }}:${{ env.TEMPORARY_TAG }} ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ github.event.release.tag_name }}
docker tag ${{ env.IMAGE_NAME }}:${{ env.TEMPORARY_TAG }} ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
docker tag ${{ env.IMAGE_NAME }}:${{ env.TEMPORARY_TAG }} ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
- # Push the new release (stable and release tag)
name: Push (release)
if: github.event_name == 'release'
run: |
docker push ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ github.event.release.tag_name }}
docker push ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ github.event.release.tag_name }}
docker push ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
docker push ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
- name: Delete artifacts
if: always()
uses: geekyeggo/delete-artifact@v1
uses: docker/build-push-action@v5
with:
name: ${{ env.IMAGE_NAME }}.tar
push: true
tags: |
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
file: ${{ env.DOCKERFILE_PATH }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Build and push container image (release)
if: github.event_name == 'release'
uses: docker/build-push-action@v5
with:
# Use local context to get changes
# https://github.com/docker/build-push-action#path-context
context: .
push: true
tags: |
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ github.event.release.tag_name }}
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ github.event.release.tag_name }}
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
file: ${{ env.DOCKERFILE_PATH }}
cache-from: type=gha
cache-to: type=gha,mode=max
dispatch-action:
needs: container-push
needs: container-build-push
runs-on: ubuntu-latest
steps:
- name: Get latest commit info
if: github.event_name == 'push'
run: |
LATEST_COMMIT_HASH=$(echo ${{ github.event.after }} | cut -b -7)
echo "LATEST_COMMIT_HASH=${LATEST_COMMIT_HASH}" >> $GITHUB_ENV
- name: Dispatch event for latest
if: github.event_name == 'push'
run: |
curl https://api.github.com/repos/${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}/dispatches -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.ACCESS_TOKEN }}" -H "X-GitHub-Api-Version: 2022-11-28" --data '{"event_type":"dispatch","client_payload":{"version":"latest"}'
curl https://api.github.com/repos/${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}/dispatches -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.ACCESS_TOKEN }}" -H "X-GitHub-Api-Version: 2022-11-28" --data '{"event_type":"dispatch","client_payload":{"version":"latest", "tag": "${{ env.LATEST_COMMIT_HASH }}"}}'
- name: Dispatch event for release
if: github.event_name == 'release'
run: |

View File

@@ -13,10 +13,10 @@ name: "CodeQL"
on:
push:
branches: [ "master", prowler-2, prowler-3.0-dev ]
branches: [ "master", "prowler-4.0-dev" ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "master" ]
branches: [ "master", "prowler-4.0-dev" ]
schedule:
- cron: '00 12 * * *'
@@ -37,11 +37,11 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v3
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -52,6 +52,6 @@ jobs:
# queries: security-extended,security-and-quality
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View File

@@ -7,11 +7,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: TruffleHog OSS
uses: trufflesecurity/trufflehog@v3.4.4
uses: trufflesecurity/trufflehog@v3.67.2
with:
path: ./
base: ${{ github.event.repository.default_branch }}

View File

@@ -4,27 +4,45 @@ on:
push:
branches:
- "master"
- "prowler-4.0-dev"
pull_request:
branches:
- "master"
- "prowler-4.0-dev"
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.9"]
python-version: ["3.9", "3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
- uses: actions/checkout@v4
- name: Test if changes are in not ignored paths
id: are-non-ignored-files-changed
uses: tj-actions/changed-files@v42
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
files: ./**
files_ignore: |
.github/**
README.md
docs/**
permissions/**
mkdocs.yml
- name: Install poetry
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
python -m pip install --upgrade pip
pip install poetry
pipx install poetry
- name: Set up Python ${{ matrix.python-version }}
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: "poetry"
- name: Install dependencies
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry install
poetry run pip list
VERSION=$(curl --silent "https://api.github.com/repos/hadolint/hadolint/releases/latest" | \
@@ -32,27 +50,44 @@ jobs:
sed -E 's/.*"v([^"]+)".*/\1/' \
) && curl -L -o /tmp/hadolint "https://github.com/hadolint/hadolint/releases/download/v${VERSION}/hadolint-Linux-x86_64" \
&& chmod +x /tmp/hadolint
- name: Poetry check
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry lock --check
- name: Lint with flake8
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run flake8 . --ignore=E266,W503,E203,E501,W605,E128 --exclude contrib
- name: Checking format with black
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run black --check .
- name: Lint with pylint
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run pylint --disable=W,C,R,E -j 0 -rn -sn prowler/
- name: Bandit
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run bandit -q -lll -x '*_test.py,./contrib/' -r .
- name: Safety
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run safety check
- name: Vulture
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run vulture --exclude "contrib" --min-confidence 100 .
- name: Hadolint
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
/tmp/hadolint Dockerfile --ignore=DL3013
- name: Test with pytest
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run pytest tests -n auto
poetry run pytest -n auto --cov=./prowler --cov-report=xml tests
- name: Upload coverage reports to Codecov
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
uses: codecov/codecov-action@v4
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -16,17 +16,18 @@ jobs:
name: Release Prowler to PyPI
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{ env.GITHUB_BRANCH }}
- name: setup python
uses: actions/setup-python@v2
with:
python-version: 3.9 #install the python needed
- name: Install dependencies
run: |
pipx install poetry
pipx inject poetry poetry-bumpversion
- name: setup python
uses: actions/setup-python@v5
with:
python-version: 3.9
cache: 'poetry'
- name: Change version and Build package
run: |
poetry version ${{ env.RELEASE_TAG }}
@@ -43,7 +44,7 @@ jobs:
poetry publish
# Create pull request with new version
- name: Create Pull Request
uses: peter-evans/create-pull-request@v4
uses: peter-evans/create-pull-request@v6
with:
token: ${{ secrets.PROWLER_ACCESS_TOKEN }}
commit-message: "chore(release): update Prowler Version to ${{ env.RELEASE_TAG }}."

View File

@@ -23,12 +23,12 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{ env.GITHUB_BRANCH }}
- name: setup python
uses: actions/setup-python@v2
uses: actions/setup-python@v5
with:
python-version: 3.9 #install the python needed
@@ -38,7 +38,7 @@ jobs:
pip install boto3
- name: Configure AWS Credentials -- DEV
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: ${{ env.AWS_REGION_DEV }}
role-to-assume: ${{ secrets.DEV_IAM_ROLE_ARN }}
@@ -50,12 +50,12 @@ jobs:
# Create pull request
- name: Create Pull Request
uses: peter-evans/create-pull-request@v4
uses: peter-evans/create-pull-request@v6
with:
token: ${{ secrets.PROWLER_ACCESS_TOKEN }}
commit-message: "feat(regions_update): Update regions for AWS services."
branch: "aws-services-regions-updated-${{ github.sha }}"
labels: "status/waiting-for-revision, severity/low"
labels: "status/waiting-for-revision, severity/low, provider/aws"
title: "chore(regions_update): Changes in regions for AWS services."
body: |
### Description

5
.gitignore vendored
View File

@@ -46,3 +46,8 @@ junit-reports/
# .env
.env*
# Coverage
.coverage*
.coverage
coverage*

View File

@@ -1,7 +1,7 @@
repos:
## GENERAL
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
rev: v4.5.0
hooks:
- id: check-merge-conflict
- id: check-yaml
@@ -15,10 +15,11 @@ repos:
## TOML
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
rev: v2.7.0
rev: v2.12.0
hooks:
- id: pretty-format-toml
args: [--autofix]
files: pyproject.toml
## BASH
- repo: https://github.com/koalaman/shellcheck-precommit
@@ -27,7 +28,7 @@ repos:
- id: shellcheck
## PYTHON
- repo: https://github.com/myint/autoflake
rev: v2.0.1
rev: v2.2.1
hooks:
- id: autoflake
args:
@@ -38,27 +39,29 @@ repos:
]
- repo: https://github.com/timothycrosley/isort
rev: 5.12.0
rev: 5.13.2
hooks:
- id: isort
args: ["--profile", "black"]
- repo: https://github.com/psf/black
rev: 23.1.0
rev: 24.1.1
hooks:
- id: black
- repo: https://github.com/pycqa/flake8
rev: 6.0.0
rev: 7.0.0
hooks:
- id: flake8
exclude: contrib
args: ["--ignore=E266,W503,E203,E501,W605"]
- repo: https://github.com/haizaar/check-pipfile-lock
rev: v0.0.5
- repo: https://github.com/python-poetry/poetry
rev: 1.7.0
hooks:
- id: check-pipfile-lock
- id: poetry-check
- id: poetry-lock
args: ["--no-update"]
- repo: https://github.com/hadolint/hadolint
rev: v2.12.1-beta
@@ -72,17 +75,23 @@ repos:
name: pylint
entry: bash -c 'pylint --disable=W,C,R,E -j 0 -rn -sn prowler/'
language: system
files: '.*\.py'
- id: pytest-check
name: pytest-check
entry: bash -c 'pytest tests -n auto'
- id: trufflehog
name: TruffleHog
description: Detect secrets in your data.
entry: bash -c 'trufflehog --no-update git file://. --only-verified --fail'
# For running trufflehog in docker, use the following entry instead:
# entry: bash -c 'docker run -v "$(pwd):/workdir" -i --rm trufflesecurity/trufflehog:latest git file:///workdir --only-verified --fail'
language: system
stages: ["commit", "push"]
- id: bandit
name: bandit
description: "Bandit is a tool for finding common security issues in Python code"
entry: bash -c 'bandit -q -lll -x '*_test.py,./contrib/' -r .'
language: system
files: '.*\.py'
- id: safety
name: safety
@@ -95,3 +104,4 @@ repos:
description: "Vulture finds unused code in Python programs."
entry: bash -c 'vulture --exclude "contrib" --min-confidence 100 .'
language: system
files: '.*\.py'

View File

@@ -55,7 +55,7 @@ further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at community@prowler.cloud. All
reported by contacting the project team at [support.prowler.com](https://customer.support.prowler.com/servicedesk/customer/portals). All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.

13
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,13 @@
# Do you want to learn on how to...
- Contribute with your code or fixes to Prowler
- Create a new check for a provider
- Create a new security compliance framework
- Add a custom output format
- Add a new integration
- Contribute with documentation
Want some swag as appreciation for your contribution?
# Prowler Developer Guide
https://docs.prowler.cloud/en/latest/tutorials/developer-guide/

View File

@@ -1,9 +1,10 @@
FROM python:3.9-alpine
FROM python:3.11-alpine
LABEL maintainer="https://github.com/prowler-cloud/prowler"
# Update system dependencies
RUN apk --no-cache upgrade
#hadolint ignore=DL3018
RUN apk --no-cache upgrade && apk --no-cache add curl
# Create nonroot user
RUN mkdir -p /home/prowler && \

View File

@@ -2,12 +2,19 @@
##@ Testing
test: ## Test with pytest
pytest -n auto -vvv -s -x
rm -rf .coverage && \
pytest -n auto -vvv -s --cov=./prowler --cov-report=xml tests
coverage: ## Show Test Coverage
coverage run --skip-covered -m pytest -v && \
coverage report -m && \
rm -rf .coverage
rm -rf .coverage && \
coverage report -m
coverage-html: ## Show Test Coverage
rm -rf ./htmlcov && \
coverage html && \
open htmlcov/index.html
##@ Linting
format: ## Format Code

View File

@@ -1,25 +1,25 @@
<p align="center">
<img align="center" src="https://github.com/prowler-cloud/prowler/blob/62c1ce73bbcdd6b9e5ba03dfcae26dfd165defd9/docs/img/prowler-pro-dark.png?raw=True#gh-dark-mode-only" width="150" height="36">
<img align="center" src="https://github.com/prowler-cloud/prowler/blob/62c1ce73bbcdd6b9e5ba03dfcae26dfd165defd9/docs/img/prowler-pro-light.png?raw=True#gh-light-mode-only" width="15%" height="15%">
<img align="center" src="https://github.com/prowler-cloud/prowler/blob/master/docs/img/prowler-logo-black.png?raw=True#gh-light-mode-only" width="350" height="115">
<img align="center" src="https://github.com/prowler-cloud/prowler/blob/master/docs/img/prowler-logo-white.png?raw=True#gh-dark-mode-only" width="350" height="115">
</p>
<p align="center">
<b><i>See all the things you and your team can do with ProwlerPro at <a href="https://prowler.pro">prowler.pro</a></i></b>
<b><i>Prowler SaaS </b> and <b>Prowler Open Source</b> are as dynamic and adaptable as the environment theyre meant to protect. Trusted by the leaders in security.
</p>
<p align="center">
<b>Learn more at <a href="https://prowler.com">prowler.com</i></b>
</p>
<hr>
<p align="center">
<img src="https://user-images.githubusercontent.com/3985464/113734260-7ba06900-96fb-11eb-82bc-d4f68a1e2710.png" />
</p>
<p align="center">
<a href="https://join.slack.com/t/prowler-workspace/shared_invite/zt-1hix76xsl-2uq222JIXrC7Q8It~9ZNog"><img alt="Slack Shield" src="https://img.shields.io/badge/slack-prowler-brightgreen.svg?logo=slack"></a>
<a href="https://pypi.org/project/prowler-cloud/"><img alt="Python Version" src="https://img.shields.io/pypi/v/prowler.svg"></a>
<a href="https://pypi.python.org/pypi/prowler-cloud/"><img alt="Python Version" src="https://img.shields.io/pypi/pyversions/prowler.svg"></a>
<a href="https://pypi.org/project/prowler/"><img alt="Python Version" src="https://img.shields.io/pypi/v/prowler.svg"></a>
<a href="https://pypi.python.org/pypi/prowler/"><img alt="Python Version" src="https://img.shields.io/pypi/pyversions/prowler.svg"></a>
<a href="https://pypistats.org/packages/prowler"><img alt="PyPI Prowler Downloads" src="https://img.shields.io/pypi/dw/prowler.svg?label=prowler%20downloads"></a>
<a href="https://pypistats.org/packages/prowler-cloud"><img alt="PyPI Prowler-Cloud Downloads" src="https://img.shields.io/pypi/dw/prowler-cloud.svg?label=prowler-cloud%20downloads"></a>
<a href="https://formulae.brew.sh/formula/prowler#default"><img alt="Brew Prowler Downloads" src="https://img.shields.io/homebrew/installs/dm/prowler?label=brew%20downloads"></a>
<a href="https://hub.docker.com/r/toniblyx/prowler"><img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/toniblyx/prowler"></a>
<a href="https://hub.docker.com/r/toniblyx/prowler"><img alt="Docker" src="https://img.shields.io/docker/cloud/build/toniblyx/prowler"></a>
<a href="https://hub.docker.com/r/toniblyx/prowler"><img alt="Docker" src="https://img.shields.io/docker/image-size/toniblyx/prowler"></a>
<a href="https://gallery.ecr.aws/o4g1s5r6/prowler"><img width="120" height=19" alt="AWS ECR Gallery" src="https://user-images.githubusercontent.com/3985464/151531396-b6535a68-c907-44eb-95a1-a09508178616.png"></a>
<a href="https://gallery.ecr.aws/prowler-cloud/prowler"><img width="120" height=19" alt="AWS ECR Gallery" src="https://user-images.githubusercontent.com/3985464/151531396-b6535a68-c907-44eb-95a1-a09508178616.png"></a>
<a href="https://codecov.io/gh/prowler-cloud/prowler"><img src="https://codecov.io/gh/prowler-cloud/prowler/graph/badge.svg?token=OflBGsdpDl"/></a>
</p>
<p align="center">
<a href="https://github.com/prowler-cloud/prowler"><img alt="Repo size" src="https://img.shields.io/github/repo-size/prowler-cloud/prowler"></a>
@@ -31,24 +31,38 @@
<a href="https://twitter.com/ToniBlyx"><img alt="Twitter" src="https://img.shields.io/twitter/follow/toniblyx?style=social"></a>
<a href="https://twitter.com/prowlercloud"><img alt="Twitter" src="https://img.shields.io/twitter/follow/prowlercloud?style=social"></a>
</p>
<hr>
# Description
`Prowler` is an Open Source security tool to perform AWS and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness.
`Prowler` is an Open Source security tool to perform AWS, GCP and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness.
It contains hundreds of controls covering CIS, PCI-DSS, ISO27001, GDPR, HIPAA, FFIEC, SOC2, AWS FTR, ENS and custom security frameworks.
It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks.
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.cloud/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.cloud/en/latest/tutorials/misc/#categories) |
|---|---|---|---|---|
| AWS | 302 | 61 -> `prowler aws --list-services` | 27 -> `prowler aws --list-compliance` | 6 -> `prowler aws --list-categories` |
| GCP | 73 | 11 -> `prowler gcp --list-services` | 1 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
| Azure | 37 | 4 -> `prowler azure --list-services` | CIS soon | 1 -> `prowler azure --list-categories` |
| Kubernetes | Work In Progress | - | CIS soon | - |
# 📖 Documentation
The full documentation can now be found at [https://docs.prowler.cloud](https://docs.prowler.cloud)
## Looking for Prowler v2 documentation?
For Prowler v2 Documentation, please go to https://github.com/prowler-cloud/prowler/tree/2.12.1.
# ⚙️ Install
## Pip package
Prowler is available as a project in [PyPI](https://pypi.org/project/prowler-cloud/), thus can be installed using pip with Python >= 3.9:
Prowler is available as a project in [PyPI](https://pypi.org/project/prowler-cloud/), thus can be installed using pip with Python >= 3.9, < 3.13:
```console
pip install prowler
prowler -v
```
More details at https://docs.prowler.cloud
## Containers
@@ -61,11 +75,11 @@ The available versions of Prowler are the following:
The container images are available here:
- [DockerHub](https://hub.docker.com/r/toniblyx/prowler/tags)
- [AWS Public ECR](https://gallery.ecr.aws/o4g1s5r6/prowler)
- [AWS Public ECR](https://gallery.ecr.aws/prowler-cloud/prowler)
## From Github
Python >= 3.9 is required with pip and poetry:
Python >= 3.9, < 3.13 is required with pip and poetry:
```
git clone https://github.com/prowler-cloud/prowler
@@ -75,20 +89,15 @@ poetry install
python prowler.py -v
```
# 📖 Documentation
The full documentation can now be found at [https://docs.prowler.cloud](https://docs.prowler.cloud)
# 📐✏️ High level architecture
You can run Prowler from your workstation, an EC2 instance, Fargate or any other container, Codebuild, CloudShell and Cloud9.
![Architecture](https://github.com/prowler-cloud/prowler/blob/62c1ce73bbcdd6b9e5ba03dfcae26dfd165defd9/docs/img/architecture.png?raw=True)
![Architecture](https://github.com/prowler-cloud/prowler/assets/38561120/080261d9-773d-4af1-af79-217a273e3176)
# 📝 Requirements
Prowler has been written in Python using the [AWS SDK (Boto3)](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html#) and [Azure SDK](https://azure.github.io/azure-sdk-for-python/).
Prowler has been written in Python using the [AWS SDK (Boto3)](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html#), [Azure SDK](https://azure.github.io/azure-sdk-for-python/) and [GCP API Python Client](https://github.com/googleapis/google-api-python-client/).
## AWS
Since Prowler uses AWS Credentials under the hood, you can follow any authentication method as described [here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-precedence).
@@ -108,8 +117,8 @@ Make sure you have properly configured your AWS-CLI with a valid Access Key and
Those credentials must be associated to a user or role with proper permissions to do all checks. To make sure, add the following AWS managed policies to the user or role being used:
- arn:aws:iam::aws:policy/SecurityAudit
- arn:aws:iam::aws:policy/job-function/ViewOnlyAccess
- `arn:aws:iam::aws:policy/SecurityAudit`
- `arn:aws:iam::aws:policy/job-function/ViewOnlyAccess`
> Moreover, some read-only additional permissions are needed for several checks, make sure you attach also the custom policy [prowler-additions-policy.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-additions-policy.json) to the role you are using.
@@ -137,7 +146,7 @@ export AZURE_CLIENT_SECRET="XXXXXXX"
If you try to execute Prowler with the `--sp-env-auth` flag and those variables are empty or not exported, the execution is going to fail.
### AZ CLI / Browser / Managed Identity authentication
The other three cases do not need additional configuration, `--az-cli-auth` and `--managed-identity-auth` are automated options, `--browser-auth` needs the user to authenticate using the default browser to start the scan.
The other three cases do not need additional configuration, `--az-cli-auth` and `--managed-identity-auth` are automated options, `--browser-auth` needs the user to authenticate using the default browser to start the scan. Also `--browser-auth` needs the tenant id to be specified with `--tenant-id`.
### Permissions
@@ -163,6 +172,18 @@ Regarding the subscription scope, Prowler by default scans all the subscriptions
- `Reader`
## Google Cloud Platform
Prowler will follow the same credentials search as [Google authentication libraries](https://cloud.google.com/docs/authentication/application-default-credentials#search_order):
1. [GOOGLE_APPLICATION_CREDENTIALS environment variable](https://cloud.google.com/docs/authentication/application-default-credentials#GAC)
2. [User credentials set up by using the Google Cloud CLI](https://cloud.google.com/docs/authentication/application-default-credentials#personal)
3. [The attached service account, returned by the metadata server](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa)
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the `Viewer` role to the member associated with the credentials.
> By default, `prowler` will scan all accessible GCP Projects, use flag `--project-ids` to specify the projects to be scanned.
# 💻 Basic Usage
To run prowler, you will need to specify the provider (e.g aws or azure):
@@ -237,12 +258,14 @@ prowler azure [--sp-env-auth, --az-cli-auth, --browser-auth, --managed-identity-
```
> By default, `prowler` will scan all Azure subscriptions.
# 🎉 New Features
## Google Cloud Platform
- Python: we got rid of all bash and it is now all in Python.
- Faster: huge performance improvements (same account from 2.5 hours to 4 minutes).
- Developers and community: we have made it easier to contribute with new checks and new compliance frameworks. We also included unit tests.
- Multi-cloud: in addition to AWS, we have added Azure, we plan to include GCP and OCI soon, let us know if you want to contribute!
Optionally, you can provide the location of an application credential JSON file with the following argument:
```console
prowler gcp --credentials-file path
```
> By default, `prowler` will scan all accessible GCP Projects, use flag `--project-ids` to specify the projects to be scanned.
# 📃 License

View File

@@ -14,7 +14,7 @@ As an **AWS Partner** and we have passed the [AWS Foundation Technical Review (F
If you would like to report a vulnerability or have a security concern regarding Prowler Open Source or ProwlerPro service, please submit the information by contacting to help@prowler.pro.
The information you share with Verica as part of this process is kept confidential within Verica and the Prowler team. We will only share this information with a third party if the vulnerability you report is found to affect a third-party product, in which case we will share this information with the third-party product's author or manufacturer. Otherwise, we will only share this information as permitted by you.
The information you share with ProwlerPro as part of this process is kept confidential within ProwlerPro. We will only share this information with a third party if the vulnerability you report is found to affect a third-party product, in which case we will share this information with the third-party product's author or manufacturer. Otherwise, we will only share this information as permitted by you.
We will review the submitted report, and assign it a tracking number. We will then respond to you, acknowledging receipt of the report, and outline the next steps in the process.

View File

@@ -1,45 +1,24 @@
# Build command
# docker build --platform=linux/amd64 --no-cache -t prowler:latest .
FROM public.ecr.aws/amazonlinux/amazonlinux:2022
ARG PROWLER_VERSION=latest
ARG PROWLERVER=2.9.0
ARG USERNAME=prowler
ARG USERID=34000
FROM toniblyx/prowler:${PROWLER_VERSION}
# Install Dependencies
RUN \
dnf update -y && \
dnf install -y bash file findutils git jq python3 python3-pip \
python3-setuptools python3-wheel shadow-utils tar unzip which && \
dnf remove -y awscli && \
dnf clean all && \
useradd -l -s /bin/sh -U -u ${USERID} ${USERNAME} && \
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
unzip awscliv2.zip && \
./aws/install && \
pip3 install --no-cache-dir --upgrade pip && \
pip3 install --no-cache-dir "git+https://github.com/ibm/detect-secrets.git@master#egg=detect-secrets" && \
rm -rf aws awscliv2.zip /var/cache/dnf
USER 0
# hadolint ignore=DL3018
RUN apk --no-cache add bash aws-cli jq
# Place script and env vars
COPY .awsvariables run-prowler-securityhub.sh /
ARG MULTI_ACCOUNT_SECURITY_HUB_PATH=/home/prowler/multi-account-securityhub
# Installs prowler and change permissions
RUN \
curl -L "https://github.com/prowler-cloud/prowler/archive/refs/tags/${PROWLERVER}.tar.gz" -o "prowler.tar.gz" && \
tar xvzf prowler.tar.gz && \
rm -f prowler.tar.gz && \
mv prowler-${PROWLERVER} prowler && \
chown ${USERNAME}:${USERNAME} /run-prowler-securityhub.sh && \
chmod 500 /run-prowler-securityhub.sh && \
chown ${USERNAME}:${USERNAME} /.awsvariables && \
chmod 400 /.awsvariables && \
chown ${USERNAME}:${USERNAME} -R /prowler && \
chmod +x /prowler/prowler
USER prowler
# Drop to user
USER ${USERNAME}
# Move script and environment variables
RUN mkdir "${MULTI_ACCOUNT_SECURITY_HUB_PATH}"
COPY --chown=prowler:prowler .awsvariables run-prowler-securityhub.sh "${MULTI_ACCOUNT_SECURITY_HUB_PATH}"/
RUN chmod 500 "${MULTI_ACCOUNT_SECURITY_HUB_PATH}"/run-prowler-securityhub.sh & \
chmod 400 "${MULTI_ACCOUNT_SECURITY_HUB_PATH}"/.awsvariables
# Run script
ENTRYPOINT ["/run-prowler-securityhub.sh"]
WORKDIR ${MULTI_ACCOUNT_SECURITY_HUB_PATH}
ENTRYPOINT ["./run-prowler-securityhub.sh"]

View File

@@ -1,20 +1,17 @@
#!/bin/bash
# Run Prowler against All AWS Accounts in an AWS Organization
# Change Directory (rest of the script, assumes you're in the root directory)
cd / || exit
# Show Prowler Version
./prowler/prowler -V
prowler -v
# Source .awsvariables
# shellcheck disable=SC1091
source .awsvariables
# Get Values from Environment Variables
echo "ROLE: $ROLE"
echo "PARALLEL_ACCOUNTS: $PARALLEL_ACCOUNTS"
echo "REGION: $REGION"
echo "ROLE: ${ROLE}"
echo "PARALLEL_ACCOUNTS: ${PARALLEL_ACCOUNTS}"
echo "REGION: ${REGION}"
# Function to unset AWS Profile Variables
unset_aws() {
@@ -24,33 +21,33 @@ unset_aws
# Find THIS Account AWS Number
CALLER_ARN=$(aws sts get-caller-identity --output text --query "Arn")
PARTITION=$(echo "$CALLER_ARN" | cut -d: -f2)
THISACCOUNT=$(echo "$CALLER_ARN" | cut -d: -f5)
echo "THISACCOUNT: $THISACCOUNT"
echo "PARTITION: $PARTITION"
PARTITION=$(echo "${CALLER_ARN}" | cut -d: -f2)
THISACCOUNT=$(echo "${CALLER_ARN}" | cut -d: -f5)
echo "THISACCOUNT: ${THISACCOUNT}"
echo "PARTITION: ${PARTITION}"
# Function to Assume Role to THIS Account & Create Session
this_account_session() {
unset_aws
role_credentials=$(aws sts assume-role --role-arn arn:"$PARTITION":iam::"$THISACCOUNT":role/"$ROLE" --role-session-name ProwlerRun --output json)
AWS_ACCESS_KEY_ID=$(echo "$role_credentials" | jq -r .Credentials.AccessKeyId)
AWS_SECRET_ACCESS_KEY=$(echo "$role_credentials" | jq -r .Credentials.SecretAccessKey)
AWS_SESSION_TOKEN=$(echo "$role_credentials" | jq -r .Credentials.SessionToken)
role_credentials=$(aws sts assume-role --role-arn arn:"${PARTITION}":iam::"${THISACCOUNT}":role/"${ROLE}" --role-session-name ProwlerRun --output json)
AWS_ACCESS_KEY_ID=$(echo "${role_credentials}" | jq -r .Credentials.AccessKeyId)
AWS_SECRET_ACCESS_KEY=$(echo "${role_credentials}" | jq -r .Credentials.SecretAccessKey)
AWS_SESSION_TOKEN=$(echo "${role_credentials}" | jq -r .Credentials.SessionToken)
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN
}
# Find AWS Master Account
this_account_session
AWSMASTER=$(aws organizations describe-organization --query Organization.MasterAccountId --output text)
echo "AWSMASTER: $AWSMASTER"
echo "AWSMASTER: ${AWSMASTER}"
# Function to Assume Role to Master Account & Create Session
master_account_session() {
unset_aws
role_credentials=$(aws sts assume-role --role-arn arn:"$PARTITION":iam::"$AWSMASTER":role/"$ROLE" --role-session-name ProwlerRun --output json)
AWS_ACCESS_KEY_ID=$(echo "$role_credentials" | jq -r .Credentials.AccessKeyId)
AWS_SECRET_ACCESS_KEY=$(echo "$role_credentials" | jq -r .Credentials.SecretAccessKey)
AWS_SESSION_TOKEN=$(echo "$role_credentials" | jq -r .Credentials.SessionToken)
role_credentials=$(aws sts assume-role --role-arn arn:"${PARTITION}":iam::"${AWSMASTER}":role/"${ROLE}" --role-session-name ProwlerRun --output json)
AWS_ACCESS_KEY_ID=$(echo "${role_credentials}" | jq -r .Credentials.AccessKeyId)
AWS_SECRET_ACCESS_KEY=$(echo "${role_credentials}" | jq -r .Credentials.SecretAccessKey)
AWS_SESSION_TOKEN=$(echo "${role_credentials}" | jq -r .Credentials.SessionToken)
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN
}
@@ -60,20 +57,20 @@ ACCOUNTS_IN_ORGS=$(aws organizations list-accounts --query Accounts[*].Id --outp
# Run Prowler against Accounts in AWS Organization
echo "AWS Accounts in Organization"
echo "$ACCOUNTS_IN_ORGS"
for accountId in $ACCOUNTS_IN_ORGS; do
echo "${ACCOUNTS_IN_ORGS}"
for accountId in ${ACCOUNTS_IN_ORGS}; do
# shellcheck disable=SC2015
test "$(jobs | wc -l)" -ge $PARALLEL_ACCOUNTS && wait -n || true
test "$(jobs | wc -l)" -ge "${PARALLEL_ACCOUNTS}" && wait -n || true
{
START_TIME=$SECONDS
START_TIME=${SECONDS}
# Unset AWS Profile Variables
unset_aws
# Run Prowler
echo -e "Assessing AWS Account: $accountId, using Role: $ROLE on $(date)"
echo -e "Assessing AWS Account: ${accountId}, using Role: ${ROLE} on $(date)"
# Pipe stdout to /dev/null to reduce unnecessary Cloudwatch logs
./prowler/prowler -R "$ROLE" -A "$accountId" -M json-asff -q -S -f "$REGION" > /dev/null
prowler aws -R arn:"${PARTITION}":iam::"${accountId}":role/"${ROLE}" -q -S -f "${REGION}" > /dev/null
TOTAL_SEC=$((SECONDS - START_TIME))
printf "Completed AWS Account: $accountId in %02dh:%02dm:%02ds" $((TOTAL_SEC / 3600)) $((TOTAL_SEC % 3600 / 60)) $((TOTAL_SEC % 60))
printf "Completed AWS Account: ${accountId} in %02dh:%02dm:%02ds" $((TOTAL_SEC / 3600)) $((TOTAL_SEC % 3600 / 60)) $((TOTAL_SEC % 60))
echo ""
} &
done

View File

@@ -1,6 +1,6 @@
# Organizational Prowler with Serverless
Langage: [Korean](README_kr.md)
Language: [Korean](README_kr.md)
This project is created to apply prowler in a multi-account environment within AWS Organizations.
CloudWatch triggers CodeBuild every fixed time.
@@ -18,12 +18,12 @@ For more information on how to use prowler, see [here](https://github.com/prowle
2. **Master Account**
1. Deploy [ProwlerRole.yaml](templates/ProwlerRole.yaml) stack to CloudFormation in a bid to create resources to master account itself.
(The template will be also deployed for other member accounts as a StackSet)
- ProwlerCodeBuildAccount : Audit Acccount ID where CodeBuild resides. (preferably Audit/Security account)
- ProwlerCodeBuildAccount : Audit Account ID where CodeBuild resides. (preferably Audit/Security account)
- ProwlerCodeBuildRole : Role name to use in CodeBuild service
- ProwlerCrossAccountRole : Role name to assume for Cross account
- ProwlerS3 : The S3 bucket name where reports will be put
1. Create **StackSet** with [ProwlerRole.yaml](templates/ProwlerRole.yaml) to deploy Role into member accounts in AWS Organizations.
- ProwlerCodeBuildAccount : Audit Acccount ID where CodeBuild resides. (preferably Audit/Security account)
- ProwlerCodeBuildAccount : Audit Account ID where CodeBuild resides. (preferably Audit/Security account)
- ProwlerCodeBuildRole : Role name to use in CodeBuild service
- ProwlerCrossAccountRole : Role name to assume for Cross account
- ProwlerS3 : The S3 bucket name where reports will be put
@@ -45,4 +45,4 @@ For more information on how to use prowler, see [here](https://github.com/prowle
- ProwlerReportS3Account : The account where the report S3 bucket resides.
1. If you'd like to change the scheduled time,
1. You can change the cron expression of ScheduleExpression within [ProwlerCodeBuildStack.yaml](templates/ProwlerCodeBuildStack.yaml).
2. Alternatively, you can make changes directrly from Events > Rules > ProwlerExecuteRule > Actions > Edit in CloudWatch console.
2. Alternatively, you can make changes directly from Events > Rules > ProwlerExecuteRule > Actions > Edit in CloudWatch console.

View File

@@ -1,6 +1,6 @@
# Organizational Prowler with Serverless
Langage: [English](README.md)
Language: [English](README.md)
이 문서는 AWS Organization 내의 multi account 환경에서 prowler 를 적용하기 위해 작성된 문서입니다.
일정 시간마다 CloudWatch는 CodeBuild 를 트리거합니다.
@@ -22,7 +22,7 @@ prowler 의 자세한 사용방법은 [이 곳](https://github.com/prowler-cloud
[ProwlerRole.yaml](templates/ProwlerRole.yaml)
- ProwlerCodeBuildAccount : CodeBuild 가 있는 Audit Acccount ID
- ProwlerCodeBuildAccount : CodeBuild 가 있는 Audit Account ID
- ProwlerCodeBuildRole : CodeBuild의 생성될 Role 이름
- ProwlerCrossAccountRole : Cross account 용 Assume할 Role 이름
- ProwlerS3 : report 가 저장될 S3 bucket 명
@@ -30,7 +30,7 @@ prowler 의 자세한 사용방법은 [이 곳](https://github.com/prowler-cloud
[ProwlerRole.yaml](templates/ProwlerRole.yaml)
- ProwlerCodeBuildAccount : CodeBuild 가 있는 Audit Acccount
- ProwlerCodeBuildAccount : CodeBuild 가 있는 Audit Account
- ProwlerCodeBuildRole : CodeBuild에서 사용할 Role 이름
- ProwlerCrossAccountRole : Cross account 용 Assume할 Role 이름
- ProwlerS3 : report 가 저장될 S3 bucket 명

View File

@@ -2,7 +2,7 @@
## Introduction
The following demonstartes how to quickly install the resources necessary to perform a security baseline using Prowler. The speed is based on the prebuilt terraform module that can configure all the resources necessuary to run Prowler with the findings being sent to AWS Security Hub.
The following demonstrates how to quickly install the resources necessary to perform a security baseline using Prowler. The speed is based on the prebuilt terraform module that can configure all the resources necessary to run Prowler with the findings being sent to AWS Security Hub.
## Install
@@ -24,7 +24,7 @@ Installing Prowler with Terraform is simple and can be completed in under 1 minu
![Prowler Install](https://prowler-docs.s3.amazonaws.com/Prowler-Terraform-Install.gif)
- It is likely an error will return related to the SecurityHub subscription. This appears to be Terraform related and you can validate the configuration by navigating to the SecurityHub console. Click Integreations and search for Prowler. Take note of the green check where it says *Accepting findings*
- It is likely an error will return related to the SecurityHub subscription. This appears to be Terraform related and you can validate the configuration by navigating to the SecurityHub console. Click Integrations and search for Prowler. Take note of the green check where it says *Accepting findings*
![Prowler Subscription](https://prowler-docs.s3.amazonaws.com/Validate-Prowler-Subscription.gif)

View File

@@ -92,7 +92,7 @@ To make sure rules are working fine, run `/var/ossec/bin/ossec-logtest` and copy
```
You must see 3 phases goin on.
To check if there is any error you can enable the debug mode of `modulesd` setting the `wazuh_modules.debug=0` variable to 2 in `/var/ossec/etc/internal_options.conf` file. Restart wazun-manager and errors should appear in the `/var/ossec/logs/ossec.log` file.
To check if there is any error you can enable the debug mode of `modulesd` setting the `wazuh_modules.debug=0` variable to 2 in `/var/ossec/etc/internal_options.conf` file. Restart wazuh-manager and errors should appear in the `/var/ossec/logs/ossec.log` file.
## Thanks

View File

@@ -0,0 +1,9 @@
# Audit Info
In each Prowler provider we have a Python object called `audit_info` which is in charge of keeping the credentials, the configuration and the state of each audit, and it's passed to each service during the `__init__`.
- AWS: https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/aws/lib/audit_info/models.py#L34-L54
- GCP: https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/aws/lib/audit_info/models.py#L7-L30
- Azure: https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/azure/lib/audit_info/models.py#L17-L31
This `audit_info` object is shared during the Prowler execution and for that reason is important to mock it in each test to isolate them. See the [testing guide](./unit-testing.md) for more information.

View File

@@ -0,0 +1,316 @@
# Create a new Check for a Provider
Here you can find how to create new checks for Prowler.
**To create a check is required to have a Prowler provider service already created, so if the service is not present or the attribute you want to audit is not retrieved by the service, please refer to the [Service](./services.md) documentation.**
## Introduction
To create a new check for a supported Prowler provider, you will need to create a folder with the check name inside the specific service for the selected provider.
We are going to use the `ec2_ami_public` check form the `AWS` provider as an example. So the folder name will `prowler/providers/aws/services/ec2/ec2_ami_public` (following the format `prowler/providers/<provider>/services/<service>/<check_name>`), with the name of check following the pattern: `service_subservice/resource_action`.
Inside that folder, we need to create three files:
- An empty `__init__.py`: to make Python treat this check folder as a package.
- A `check_name.py` with the above format containing the check's logic. Refer to the [check](./checks.md#check)
- A `check_name.metadata.json` containing the check's metadata. Refer to the [check metadata](./checks.md#check-metadata)
## Check
The Prowler's check structure is very simple and following it there is nothing more to do to include a check in a provider's service because the load is done dynamically based on the paths.
The following is the code for the `ec2_ami_public` check:
```python title="Check Class"
# At the top of the file we need to import the following:
# - Check class which is in charge of the following:
# - Retrieve the check metadata and expose the `metadata()`
# to return a JSON representation of the metadata,
# read more at Check Metadata Model down below.
# - Enforce that each check requires to have the `execute()` function
from prowler.lib.check.models import Check, Check_Report_AWS
# Then you have to import the provider service client
# read more at the Service documentation.
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
# For each check we need to create a python class called the same as the
# file which inherits from the Check class.
class ec2_ami_public(Check):
"""ec2_ami_public verifies if an EC2 AMI is publicly shared"""
# Then, within the check's class we need to create the "execute(self)"
# function, which is enforce by the "Check" class to implement
# the Check's interface and let Prowler to run this check.
def execute(self):
# Inside the execute(self) function we need to create
# the list of findings initialised to an empty list []
findings = []
# Then, using the service client we need to iterate by the resource we
# want to check, in this case EC2 AMIs stored in the
# "ec2_client.images" object.
for image in ec2_client.images:
# Once iterating for the images, we have to intialise
# the Check_Report_AWS class passing the check's metadata
# using the "metadata" function explained above.
report = Check_Report_AWS(self.metadata())
# For each Prowler check we MUST fill the following
# Check_Report_AWS fields:
# - region
# - resource_id
# - resource_arn
# - resource_tags
# - status
# - status_extended
report.region = image.region
report.resource_id = image.id
report.resource_arn = image.arn
# The resource_tags should be filled if the resource has the ability
# of having tags, please check the service first.
report.resource_tags = image.tags
# Then we need to create the business logic for the check
# which always should be simple because the Prowler service
# must do the heavy lifting and the check should be in charge
# of parsing the data provided
report.status = "PASS"
report.status_extended = f"EC2 AMI {image.id} is not public."
# In this example each "image" object has a boolean attribute
# called "public" to set if the AMI is publicly shared
if image.public:
report.status = "FAIL"
report.status_extended = (
f"EC2 AMI {image.id} is currently public."
)
# Then at the same level as the "report"
# object we need to append it to the findings list.
findings.append(report)
# Last thing to do is to return the findings list to Prowler
return findings
```
### Check Status
All the checks MUST fill the `report.status` and `report.status_extended` with the following criteria:
- Status -- `report.status`
- `PASS` --> If the check is passing against the configured value.
- `FAIL` --> If the check is passing against the configured value.
- `INFO` --> This value cannot be used unless a manual operation is required in order to determine if the `report.status` is whether `PASS` or `FAIL`.
- Status Extended -- `report.status_extended`
- MUST end in a dot `.`
- MUST include the service audited with the resource and a brief explanation of the result generated, e.g.: `EC2 AMI ami-0123456789 is not public.`
### Check Region
All the checks MUST fill the `report.region` with the following criteria:
- If the audited resource is regional use the `region` attribute within the resource object.
- If the audited resource is global use the `service_client.region` within the service client object.
### Resource ID, Name and ARN
All the checks MUST fill the `report.resource_id` and `report.resource_arn` with the following criteria:
- AWS
- Resource ID -- `report.resource_id`
- AWS Account --> Account Number `123456789012`
- AWS Resource --> Resource ID / Name
- Root resource --> `<root_account>`
- Resource ARN -- `report.resource_arn`
- AWS Account --> Root ARN `arn:aws:iam::123456789012:root`
- AWS Resource --> Resource ARN
- Root resource --> Root ARN `arn:aws:iam::123456789012:root`
- GCP
- Resource ID -- `report.resource_id`
- GCP Resource --> Resource ID
- Resource Name -- `report.resource_name`
- GCP Resource --> Resource Name
- Azure
- Resource ID -- `report.resource_id`
- Azure Resource --> Resource ID
- Resource Name -- `report.resource_name`
- Azure Resource --> Resource Name
### Python Model
The following is the Python model for the check's class.
As per August 5th 2023 the `Check_Metadata_Model` can be found [here](https://github.com/prowler-cloud/prowler/blob/master/prowler/lib/check/models.py#L59-L80).
```python
class Check(ABC, Check_Metadata_Model):
"""Prowler Check"""
def __init__(self, **data):
"""Check's init function. Calls the CheckMetadataModel init."""
# Parse the Check's metadata file
metadata_file = (
os.path.abspath(sys.modules[self.__module__].__file__)[:-3]
+ ".metadata.json"
)
# Store it to validate them with Pydantic
data = Check_Metadata_Model.parse_file(metadata_file).dict()
# Calls parents init function
super().__init__(**data)
def metadata(self) -> dict:
"""Return the JSON representation of the check's metadata"""
return self.json()
@abstractmethod
def execute(self):
"""Execute the check's logic"""
```
### Using the audit config
Prowler has a [configuration file](../tutorials/configuration_file.md) which is used to pass certain configuration values to the checks, like the following:
```python title="ec2_securitygroup_with_many_ingress_egress_rules.py"
class ec2_securitygroup_with_many_ingress_egress_rules(Check):
def execute(self):
findings = []
# max_security_group_rules, default: 50
max_security_group_rules = ec2_client.audit_config.get(
"max_security_group_rules", 50
)
for security_group in ec2_client.security_groups:
```
```yaml title="config.yaml"
# AWS Configuration
aws:
# AWS EC2 Configuration
# aws.ec2_securitygroup_with_many_ingress_egress_rules
# The default value is 50 rules
max_security_group_rules: 50
```
As you can see in the above code, within the service client, in this case the `ec2_client`, there is an object called `audit_config` which is a Python dictionary containing the values read from the configuration file.
In order to use it, you have to check first if the value is present in the configuration file. If the value is not present, you can create it in the `config.yaml` file and then, read it from the check.
> It is mandatory to always use the `dictionary.get(value, default)` syntax to set a default value in the case the configuration value is not present.
## Check Metadata
Each Prowler check has metadata associated which is stored at the same level of the check's folder in a file called A `check_name.metadata.json` containing the check's metadata.
> We are going to include comments in this example metadata JSON but they cannot be included because the JSON format does not allow comments.
```json
{
# Provider holds the Prowler provider which the checks belongs to
"Provider": "aws",
# CheckID holds check name
"CheckID": "ec2_ami_public",
# CheckTitle holds the title of the check
"CheckTitle": "Ensure there are no EC2 AMIs set as Public.",
# CheckType holds Software and Configuration Checks, check more here
# https://docs.aws.amazon.com/securityhub/latest/userguide/asff-required-attributes.html#Types
"CheckType": [
"Infrastructure Security"
],
# ServiceName holds the provider service name
"ServiceName": "ec2",
# SubServiceName holds the service's subservice or resource used by the check
"SubServiceName": "ami",
# ResourceIdTemplate holds the unique ID for the resource used by the check
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
# Severity holds the check's severity, always in lowercase (critical, high, medium, low or informational)
"Severity": "critical",
# ResourceType only for AWS, holds the type from here
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html
"ResourceType": "Other",
# Description holds the title of the check, for now is the same as CheckTitle
"Description": "Ensure there are no EC2 AMIs set as Public.",
# Risk holds the check risk if the result is FAIL
"Risk": "When your AMIs are publicly accessible, they are available in the Community AMIs where everyone with an AWS account can use them to launch EC2 instances. Your AMIs could contain snapshots of your applications (including their data), therefore exposing your snapshots in this manner is not advised.",
# RelatedUrl holds an URL with more information about the check purpose
"RelatedUrl": "",
# Remediation holds the information to help the practitioner to fix the issue in the case of the check raise a FAIL
"Remediation": {
# Code holds different methods to remediate the FAIL finding
"Code": {
# CLI holds the command in the provider native CLI to remediate it
"CLI": "https://docs.bridgecrew.io/docs/public_8#cli-command",
# NativeIaC holds the native IaC code to remediate it, use "https://docs.bridgecrew.io/docs"
"NativeIaC": "",
# Other holds the other commands, scripts or code to remediate it, use "https://www.trendmicro.com/cloudoneconformity"
"Other": "https://docs.bridgecrew.io/docs/public_8#aws-console",
# Terraform holds the Terraform code to remediate it, use "https://docs.bridgecrew.io/docs"
"Terraform": ""
},
# Recommendation holds the recommendation for this check with a description and a related URL
"Recommendation": {
"Text": "We recommend your EC2 AMIs are not publicly accessible, or generally available in the Community AMIs.",
"Url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/cancel-sharing-an-AMI.html"
}
},
# Categories holds the category or categories where the check can be included, if applied
"Categories": [
"internet-exposed"
],
# DependsOn is not actively used for the moment but it will hold other
# checks wich this check is dependant to
"DependsOn": [],
# RelatedTo is not actively used for the moment but it will hold other
# checks wich this check is related to
"RelatedTo": [],
# Notes holds additional information not covered in this file
"Notes": ""
}
```
### Remediation Code
For the Remediation Code we use the following knowledge base to fill it:
- Official documentation for the provider
- https://docs.bridgecrew.io
- https://www.trendmicro.com/cloudoneconformity
- https://github.com/cloudmatos/matos/tree/master/remediations
### RelatedURL and Recommendation
The RelatedURL field must be filled with an URL from the provider's official documentation like https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-intro.html
Also, if not present you can use the Risk and Recommendation texts from the TrendMicro [CloudConformity](https://www.trendmicro.com/cloudoneconformity) guide.
### Python Model
The following is the Python model for the check's metadata model. We use the Pydantic's [BaseModel](https://docs.pydantic.dev/latest/api/base_model/#pydantic.BaseModel) as the parent class.
As per August 5th 2023 the `Check_Metadata_Model` can be found [here](https://github.com/prowler-cloud/prowler/blob/master/prowler/lib/check/models.py#L34-L56).
```python
class Check_Metadata_Model(BaseModel):
"""Check Metadata Model"""
Provider: str
CheckID: str
CheckTitle: str
CheckType: list[str]
ServiceName: str
SubServiceName: str
ResourceIdTemplate: str
Severity: str
ResourceType: str
Description: str
Risk: str
RelatedUrl: str
Remediation: Remediation
Categories: list[str]
DependsOn: list[str]
RelatedTo: list[str]
Notes: str
# We set the compliance to None to
# store the compliance later if supplied
Compliance: list = None
```

View File

@@ -0,0 +1,8 @@
## Contribute with documentation
We use `mkdocs` to build this Prowler documentation site so you can easily contribute back with new docs or improving them.
1. Install `mkdocs` with your favorite package manager.
2. Inside the `prowler` repository folder run `mkdocs serve` and point your browser to `http://localhost:8000` and you will see live changes to your local copy of this documentation site.
3. Make all needed changes to docs or add new documents. To do so just edit existing md files inside `prowler/docs` and if you are adding a new section or file please make sure you add it to `mkdocs.yaml` file in the root folder of the Prowler repo.
4. Once you are done with changes, please send a pull request to us for review and merge. Thank you in advance!

View File

@@ -0,0 +1,3 @@
# Integration Tests
Coming soon ...

View File

@@ -0,0 +1,3 @@
# Create a new integration
Coming soon ...

View File

@@ -0,0 +1,59 @@
# Developer Guide
You can extend Prowler in many different ways, in most cases you will want to create your own checks and compliance security frameworks, here is where you can learn about how to get started with it. We also include how to create custom outputs, integrations and more.
## Get the code and install all dependencies
First of all, you need a version of Python 3.9 or higher and also pip installed to be able to install all dependencies required. Once that is satisfied go a head and clone the repo:
```
git clone https://github.com/prowler-cloud/prowler
cd prowler
```
For isolation and avoid conflicts with other environments, we recommend usage of `poetry`:
```
pip install poetry
```
Then install all dependencies including the ones for developers:
```
poetry install
poetry shell
```
## Contributing with your code or fixes to Prowler
This repo has git pre-commit hooks managed via the [pre-commit](https://pre-commit.com/) tool. [Install](https://pre-commit.com/#install) it how ever you like, then in the root of this repo run:
```shell
pre-commit install
```
You should get an output like the following:
```shell
pre-commit installed at .git/hooks/pre-commit
```
Before we merge any of your pull requests we pass checks to the code, we use the following tools and automation to make sure the code is secure and dependencies up-to-dated (these should have been already installed if you ran `pipenv install -d`):
- [`bandit`](https://pypi.org/project/bandit/) for code security review.
- [`safety`](https://pypi.org/project/safety/) and [`dependabot`](https://github.com/features/security) for dependencies.
- [`hadolint`](https://github.com/hadolint/hadolint) and [`dockle`](https://github.com/goodwithtech/dockle) for our containers security.
- [`Snyk`](https://docs.snyk.io/integrations/snyk-container-integrations/container-security-with-docker-hub-integration) in Docker Hub.
- [`clair`](https://github.com/quay/clair) in Amazon ECR.
- [`vulture`](https://pypi.org/project/vulture/), [`flake8`](https://pypi.org/project/flake8/), [`black`](https://pypi.org/project/black/) and [`pylint`](https://pypi.org/project/pylint/) for formatting and best practices.
You can see all dependencies in file `pyproject.toml`.
## Pull Request Checklist
If you create or review a PR in https://github.com/prowler-cloud/prowler please follow this checklist:
- [ ] Make sure you've read the Prowler Developer Guide at https://docs.prowler.cloud/en/latest/developer-guide/introduction/
- [ ] Are we following the style guide, hence installed all the linters and formatters? Please check https://docs.prowler.cloud/en/latest/developer-guide/introduction/#contributing-with-your-code-or-fixes-to-prowler
- [ ] Are we increasing/decreasing the test coverage? Please, review if we need to include/modify tests for the new code.
- [ ] Are we modifying outputs? Please review it carefully.
- [ ] Do we need to modify the Prowler documentation to reflect the changes introduced?
- [ ] Are we introducing possible breaking changes? Are we modifying a core feature?
## Want some swag as appreciation for your contribution?
If you are like us and you love swag, we are happy to thank you for your contribution with some laptop stickers or whatever other swag we may have at that time. Please, tell us more details and your pull request link in our [Slack workspace here](https://join.slack.com/t/prowler-workspace/shared_invite/zt-1hix76xsl-2uq222JIXrC7Q8It~9ZNog). You can also reach out to Toni de la Fuente on Twitter [here](https://twitter.com/ToniBlyx), his DMs are open.

View File

@@ -0,0 +1,3 @@
# Create a custom output format
Coming soon ...

View File

@@ -0,0 +1,41 @@
# Create a new security compliance framework
## Introduction
If you want to create or contribute with your own security frameworks or add public ones to Prowler you need to make sure the checks are available if not you have to create your own. Then create a compliance file per provider like in `prowler/compliance/<provider>/` and name it as `<framework>_<version>_<provider>.json` then follow the following format to create yours.
## Compliance Framework
Each file version of a framework will have the following structure at high level with the case that each framework needs to be generally identified, one requirement can be also called one control but one requirement can be linked to multiple prowler checks.:
- `Framework`: string. Distinguish name of the framework, like CIS
- `Provider`: string. Provider where the framework applies, such as AWS, Azure, OCI,...
- `Version`: string. Version of the framework itself, like 1.4 for CIS.
- `Requirements`: array of objects. Include all requirements or controls with the mapping to Prowler.
- `Requirements_Id`: string. Unique identifier per each requirement in the specific framework
- `Requirements_Description`: string. Description as in the framework.
- `Requirements_Attributes`: array of objects. Includes all needed attributes per each requirement, like levels, sections, etc. Whatever helps to create a dedicated report with the result of the findings. Attributes would be taken as closely as possible from the framework's own terminology directly.
- `Requirements_Checks`: array. Prowler checks that are needed to prove this requirement. It can be one or multiple checks. In case of no automation possible this can be empty.
```
{
"Framework": "<framework>-<provider>",
"Version": "<version>",
"Requirements": [
{
"Id": "<unique-id>",
"Description": "Requiemente full description",
"Checks": [
"Here is the prowler check or checks that is going to be executed"
],
"Attributes": [
{
<Add here your custom attributes.>
}
]
},
...
]
}
```
Finally, to have a proper output file for your reports, your framework data model has to be created in `prowler/lib/outputs/models.py` and also the CLI table output in `prowler/lib/outputs/compliance.py`.

View File

@@ -0,0 +1,235 @@
# Create a new Provider Service
Here you can find how to create a new service, or to complement an existing one, for a Prowler Provider.
## Introduction
To create a new service, you will need to create a folder inside the specific provider, i.e. `prowler/providers/<provider>/services/<service>/`.
Inside that folder, you MUST create the following files:
- An empty `__init__.py`: to make Python treat this service folder as a package.
- A `<service>_service.py`, containing all the service's logic and API calls.
- A `<service>_client_.py`, containing the initialization of the service's class we have just created so the checks's checks can use it.
## Service
The Prowler's service structure is the following and the way to initialise it is just by importing the service client in a check.
## Service Base Class
All the Prowler provider's services inherits from a base class depending on the provider used.
- [AWS Service Base Class](https://github.com/prowler-cloud/prowler/blob/22f8855ad7dad2e976dabff78611b643e234beaf/prowler/providers/aws/lib/service/service.py)
- [GCP Service Base Class](https://github.com/prowler-cloud/prowler/blob/22f8855ad7dad2e976dabff78611b643e234beaf/prowler/providers/gcp/lib/service/service.py)
- [Azure Service Base Class](https://github.com/prowler-cloud/prowler/blob/22f8855ad7dad2e976dabff78611b643e234beaf/prowler/providers/azure/lib/service/service.py)
Each class is used to initialize the credentials and the API's clients to be used in the service. If some threading is used it must be coded there.
## Service Class
Due to the complexity and differencies of each provider API we are going to use an example service to guide you in how can it be created.
The following is the `<service>_service.py` file:
```python title="Service Class"
from datetime import datetime
from typing import Optional
# The following is just for the AWS provider
from botocore.client import ClientError
# To use the Pydantic's BaseModel
from pydantic import BaseModel
# Prowler logging library
from prowler.lib.logger import logger
# Prowler resource filter, only for the AWS provider
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
# Provider parent class
from prowler.providers.<provider>.lib.service.service import ServiceParentClass
# Create a class for the Service
################## <Service>
class <Service>(ServiceParentClass):
def __init__(self, audit_info):
# Call Service Parent Class __init__
# We use the __class__.__name__ to get it automatically
# from the Service Class name but you can pass a custom
# string if the provider's API service name is different
super().__init__(__class__.__name__, audit_info)
# Create an empty dictionary of items to be gathered,
# using the unique ID as the dictionary key
# e.g., instances
self.<items> = {}
# If you can parallelize by regions or locations
# you can use the __threading_call__ function
# available in the Service Parent Class
self.__threading_call__(self.__describe_<items>__)
# Optionally you can create another function to retrieve
# more data about each item without parallel
self.__describe_<item>__()
def __describe_<items>__(self, regional_client):
"""Get ALL <Service> <Items>"""
logger.info("<Service> - Describing <Items>...")
# We MUST include a try/except block in each function
try:
# Call to the provider API to retrieve the data we want
describe_<items>_paginator = regional_client.get_paginator("describe_<items>")
# Paginator to get every item
for page in describe_<items>_paginator.paginate():
# Another try/except within the loop for to continue looping
# if something unexpected happens
try:
for <item> in page["<Items>"]:
# For the AWS provider we MUST include the following lines to retrieve
# or not data for the resource passed as argument using the --resource-arn
if not self.audit_resources or (
is_resource_filtered(<item>["<item_arn>"], self.audit_resources)
):
# Then we have to include the retrieved resource in the object
# previously created
self.<items>[<item_unique_id>] =
<Item>(
arn=stack["<item_arn>"],
name=stack["<item_name>"],
tags=stack.get("Tags", []),
region=regional_client.region,
)
except Exception as error:
logger.error(
f"{<provider_specific_field>} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
# In the except part we have to use the following code to log the errors
except Exception as error:
# Depending on each provider we can use the following fields in the logger:
# - AWS: regional_client.region or self.region
# - GCP: project_id and location
# - Azure: subscription
logger.error(
f"{<provider_specific_field>} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __describe_<item>__(self):
"""Get Details for a <Service> <Item>"""
logger.info("<Service> - Describing <Item> to get specific details...")
# We MUST include a try/except block in each function
try:
# Loop over the items retrieved in the previous function
for <item> in self.<items>:
# When we perform calls to the Provider API within a for loop we have
# to include another try/except block because in the cloud there are
# ephemeral resources that can be deleted at the time we are checking them
try:
<item>_details = self.regional_clients[<item>.region].describe_<item>(
<Attribute>=<item>.name
)
# For example, check if item is Public. Here is important if we are
# getting values from a dictionary we have to use the "dict.get()"
# function with a default value in the case this value is not present
<item>.public = <item>_details.get("Public", False)
# In this except block, for example for the AWS Provider we can use
# the botocore.ClientError exception and check for a specific error code
# to raise a WARNING instead of an ERROR if some resource is not present.
except ClientError as error:
if error.response["Error"]["Code"] == "InvalidInstanceID.NotFound":
logger.warning(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{<provider_specific_field>} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
continue
# In the except part we have to use the following code to log the errors
except Exception as error:
# Depending on each provider we can use the following fields in the logger:
# - AWS: regional_client.region or self.region
# - GCP: project_id and location
# - Azure: subscription
logger.error(
f"{<item>.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
```
### Service Models
For each class object we need to model we use the Pydantic's [BaseModel](https://docs.pydantic.dev/latest/api/base_model/#pydantic.BaseModel) to take advantage of the data validation.
```python title="Service Model"
# In each service class we have to create some classes using
# the Pydantic's Basemodel for the resources we want to audit.
class <Item>(BaseModel):
"""<Item> holds a <Service> <Item>"""
arn: str
"""<Items>[].arn"""
name: str
"""<Items>[].name"""
region: str
"""<Items>[].region"""
public: bool
"""<Items>[].public"""
# We can create Optional attributes set to None by default
tags: Optional[list]
"""<Items>[].tags"""
```
### Service Objects
In the service each group of resources should be created as a Python [dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries). This is because we are performing lookups all the time and the Python dictionary lookup has [O(1) complexity](https://en.wikipedia.org/wiki/Big_O_notation#Orders_of_common_functions).
We MUST set as the dictionary key a unique ID, like the resource Unique ID or ARN.
Example:
```python
self.vpcs = {}
self.vpcs["vpc-01234567890abcdef"] = VPC_Object_Class()
```
## Service Client
Each Prowler service requires a service client to use the service in the checks.
The following is the `<service>_client.py` containing the initialization of the service's class we have just created so the service's checks can use them:
```python
from prowler.providers.<provider>.lib.audit_info.audit_info import audit_info
from prowler.providers.<provider>.services.<service>.<service>_service import <Service>
<service>_client = <Service>(audit_info)
```
## Permissions
It is really important to check if the current Prowler's permissions for each provider are enough to implement a new service. If we need to include more please refer to the following documentaion and update it:
- AWS: https://docs.prowler.cloud/en/latest/getting-started/requirements/#aws-authentication
- Azure: https://docs.prowler.cloud/en/latest/getting-started/requirements/#permissions
- GCP: https://docs.prowler.cloud/en/latest/getting-started/requirements/#gcp-authentication

View File

@@ -0,0 +1,592 @@
# Unit Tests
The unit tests for the Prowler checks varies between each provider supported.
Here we left some good reads about unit testing and things we've learnt through all the process.
**Python Testing**
- https://docs.python-guide.org/writing/tests/
**Where to patch**
- https://docs.python.org/3/library/unittest.mock.html#where-to-patch
- https://stackoverflow.com/questions/893333/multiple-variables-in-a-with-statement
- https://docs.python.org/3/reference/compound_stmts.html#the-with-statement
**Utils to trace mocking and test execution**
- https://news.ycombinator.com/item?id=36054868
- https://docs.python.org/3/library/sys.html#sys.settrace
- https://github.com/kunalb/panopticon
## General Recommendations
When creating tests for some provider's checks we follow these guidelines trying to cover as much test scenarios as possible:
1. Create a test without resource to generate 0 findings, because Prowler will generate 0 findings if a service does not contain the resources the check is looking for audit.
2. Create test to generate both a `PASS` and a `FAIL` result.
3. Create tests with more than 1 resource to evaluate how the check behaves and if the number of findings is right.
## How to run Prowler tests
To run the Prowler test suite you need to install the testing dependencies already included in the `pyproject.toml` file. If you didn't install it yet please read the developer guide introduction [here](./introduction.md#get-the-code-and-install-all-dependencies).
Then in the project's root path execute `pytest -n auto -vvv -s -x` or use the `Makefile` with `make test`.
Other commands to run tests:
- Run tests for a provider: `pytest -n auto -vvv -s -x tests/providers/<provider>/services`
- Run tests for a provider service: `pytest -n auto -vvv -s -x tests/providers/<provider>/services/<service>`
- Run tests for a provider check: `pytest -n auto -vvv -s -x tests/providers/<provider>/services/<service>/<check>`
> Refer to the [pytest documentation](https://docs.pytest.org/en/7.1.x/getting-started.html) documentation for more information.
## AWS
For the AWS provider we have ways to test a Prowler check based on the following criteria:
> Note: We use and contribute to the [Moto](https://github.com/getmoto/moto) library which allows us to easily mock out tests based on AWS infrastructure. **It's awesome!**
- AWS API calls covered by [Moto](https://github.com/getmoto/moto):
- Service tests with `@mock_<service>`
- Checks tests with `@mock_<service>`
- AWS API calls not covered by Moto:
- Service test with `mock_make_api_call`
- Checks tests with [MagicMock](https://docs.python.org/3/library/unittest.mock.html#unittest.mock.MagicMock)
- AWS API calls partially covered by Moto:
- Service test with `@mock_<service>` and `mock_make_api_call`
- Checks tests with `@mock_<service>` and `mock_make_api_call`
In the following section we are going to explain all of the above scenarios with examples. The main difference between those scenarios comes from if the [Moto](https://github.com/getmoto/moto) library covers the AWS API calls made by the service. You can check the covered API calls [here](https://github.com/getmoto/moto/blob/master/IMPLEMENTATION_COVERAGE.md).
An important point for the AWS testing is that in each check we MUST have a unique `audit_info` which is the key object during the AWS execution to isolate the test execution.
Check the [Audit Info](./audit-info.md) section to get more details.
```python
# We need to import the AWS_Audit_Info and the Audit_Metadata
# to set the audit_info to call AWS APIs
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
from prowler.providers.common.models import Audit_Metadata
AWS_ACCOUNT_NUMBER = "123456789012"
def set_mocked_audit_info(self):
audit_info = AWS_Audit_Info(
session_config=None,
original_session=None,
audit_session=session.Session(
profile_name=None,
botocore_session=None,
),
audit_config=None,
audited_account=AWS_ACCOUNT_NUMBER,
audited_account_arn=f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root",
audited_user_id=None,
audited_partition="aws",
audited_identity_arn=None,
profile=None,
profile_region=None,
credentials=None,
assumed_role_info=None,
audited_regions=["us-east-1", "eu-west-1"],
organizations_metadata=None,
audit_resources=None,
mfa_enabled=False,
audit_metadata=Audit_Metadata(
services_scanned=0,
expected_checks=[],
completed_checks=0,
audit_progress=0,
),
)
return audit_info
```
### Checks
For the AWS tests examples we are going to use the tests for the `iam_password_policy_uppercase` check.
This section is going to be divided based on the API coverage of the [Moto](https://github.com/getmoto/moto) library.
#### API calls covered
If the [Moto](https://github.com/getmoto/moto) library covers the API calls we want to test, we can use the `@mock_<service>` decorator. This will mocked out all the API calls made to AWS keeping the state within the code decorated, in this case the test function.
```python
# We need to import the unittest.mock to allow us to patch some objects
# not to use shared ones between test, hence to isolate the test
from unittest import mock
# Boto3 client and session to call the AWS APIs
from boto3 import client, session
# Moto decorator for the IAM service we want to mock
from moto import mock_iam
# Constants used
AWS_ACCOUNT_NUMBER = "123456789012"
AWS_REGION = "us-east-1"
# We always name the test classes like Test_<check_name>
class Test_iam_password_policy_uppercase:
# We include the Moto decorator for the service we want to use
# You can include more than one if two or more services are
# involved in test
@mock_iam
# We name the tests with test_<service>_<check_name>_<test_action>
def test_iam_password_policy_no_uppercase_flag(self):
# First, we have to create an IAM client
iam_client = client("iam", region_name=AWS_REGION)
# Then, since all the AWS accounts have a password
# policy we want to set to False the RequireUppercaseCharacters
iam_client.update_account_password_policy(RequireUppercaseCharacters=False)
# We set a mocked audit_info for AWS not to share the same audit state
# between checks
current_audit_info = self.set_mocked_audit_info()
# The Prowler service import MUST be made within the decorated
# code not to make real API calls to the AWS service.
from prowler.providers.aws.services.iam.iam_service import IAM
# Prowler for AWS uses a shared object called `current_audit_info` where it stores
# the audit's state, credentials and configuration.
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=current_audit_info,
),
# We have to mock also the iam_client from the check to enforce that the iam_client used is the one
# created within this check because patch != import, and if you execute tests in parallel some objects
# can be already initialised hence the check won't be isolated
mock.patch(
"prowler.providers.aws.services.iam.iam_password_policy_uppercase.iam_password_policy_uppercase.iam_client",
new=IAM(current_audit_info),
):
# We import the check within the two mocks not to initialise the iam_client with some shared information from
# the current_audit_info or the IAM service.
from prowler.providers.aws.services.iam.iam_password_policy_uppercase.iam_password_policy_uppercase import (
iam_password_policy_uppercase,
)
# Once imported, we only need to instantiate the check's class
check = iam_password_policy_uppercase()
# And then, call the execute() function to run the check
# against the IAM client we've set up.
result = check.execute()
# Last but not least, we need to assert all the fields
# from the check's results
assert len(results) == 1
assert result[0].status == "FAIL"
assert result[0].status_extended == "IAM password policy does not require at least one uppercase letter."
assert result[0].resource_arn == f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root"
assert result[0].resource_id == AWS_ACCOUNT_NUMBER
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION
```
#### API calls not covered
If the IAM service for the check's we want to test is not covered by Moto, we have to inject the objects in the service client using [MagicMock](https://docs.python.org/3/library/unittest.mock.html#unittest.mock.MagicMock). As we have pointed above, we cannot instantiate the service since it will make real calls to the AWS APIs.
> The following example uses the IAM GetAccountPasswordPolicy which is covered by Moto but this is only for demonstration purposes.
The following code shows how to use MagicMock to create the service objects.
```python
# We need to import the unittest.mock to allow us to patch some objects
# not to use shared ones between test, hence to isolate the test
from unittest import mock
# Constants used
AWS_ACCOUNT_NUMBER = "123456789012"
AWS_REGION = "us-east-1"
# We always name the test classes like Test_<check_name>
class Test_iam_password_policy_uppercase:
# We name the tests with test_<service>_<check_name>_<test_action>
def test_iam_password_policy_no_uppercase_flag(self):
# Mocked client with MagicMock
mocked_iam_client = mock.MagicMock
# Since the IAM Password Policy has their own model we have to import it
from prowler.providers.aws.services.iam.iam_service import PasswordPolicy
# Create the mock PasswordPolicy object
mocked_iam_client.password_policy = PasswordPolicy(
length=5,
symbols=True,
numbers=True,
# We set the value to False to test the check
uppercase=False,
lowercase=True,
allow_change=False,
expiration=True,
)
# We set a mocked audit_info for AWS not to share the same audit state
# between checks
current_audit_info = self.set_mocked_audit_info()
# In this scenario we have to mock also the IAM service and the iam_client from the check to enforce # that the iam_client used is the one created within this check because patch != import, and if you # execute tests in parallel some objects can be already initialised hence the check won't be isolated.
# In this case we don't use the Moto decorator, we use the mocked IAM client for both objects
with mock.patch(
"prowler.providers.aws.services.iam.iam_service.IAM",
new=mocked_iam_client,
), mock.patch(
"prowler.providers.aws.services.iam.iam_client.iam_client",
new=mocked_iam_client,
):
# We import the check within the two mocks not to initialise the iam_client with some shared information from
# the current_audit_info or the IAM service.
from prowler.providers.aws.services.iam.iam_password_policy_uppercase.iam_password_policy_uppercase import (
iam_password_policy_uppercase,
)
# Once imported, we only need to instantiate the check's class
check = iam_password_policy_uppercase()
# And then, call the execute() function to run the check
# against the IAM client we've set up.
result = check.execute()
# Last but not least, we need to assert all the fields
# from the check's results
assert len(results) == 1
assert result[0].status == "FAIL"
assert result[0].status_extended == "IAM password policy does not require at least one uppercase letter."
assert result[0].resource_arn == f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root"
assert result[0].resource_id == AWS_ACCOUNT_NUMBER
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION
```
As it can be seen in the above scenarios, the check execution should always be into the context of mocked/patched objects. This way we ensure it reviews only the objects created under the scope the test.
#### API calls partially covered
If the API calls we want to use in the service are partially covered by the Moto decorator we have to create our own mocked API calls to use it in combination.
To do so, you need to mock the `botocore.client.BaseClient._make_api_call` function, which is the Boto3 function in charge of making the real API call to the AWS APIs, using `mock.patch <https://docs.python.org/3/library/unittest.mock.html#patch>`:
```python
import boto3
import botocore
from unittest.mock import patch
from moto import mock_iam
# Original botocore _make_api_call function
orig = botocore.client.BaseClient._make_api_call
# Mocked botocore _make_api_call function
def mock_make_api_call(self, operation_name, kwarg):
# As you can see the operation_name has the get_account_password_policy snake_case form but
# we are using the GetAccountPasswordPolicy form.
# Rationale -> https://github.com/boto/botocore/blob/develop/botocore/client.py#L810:L816
if operation_name == 'GetAccountPasswordPolicy':
return {
'PasswordPolicy': {
'MinimumPasswordLength': 123,
'RequireSymbols': True|False,
'RequireNumbers': True|False,
'RequireUppercaseCharacters': True|False,
'RequireLowercaseCharacters': True|False,
'AllowUsersToChangePassword': True|False,
'ExpirePasswords': True|False,
'MaxPasswordAge': 123,
'PasswordReusePrevention': 123,
'HardExpiry': True|False
}
}
# If we don't want to patch the API call
return orig(self, operation_name, kwarg)
# We always name the test classes like Test_<check_name>
class Test_iam_password_policy_uppercase:
# We include the custom API call mock decorator for the service we want to use
@patch("botocore.client.BaseClient._make_api_call", new=mock_make_api_call)
# We include also the IAM Moto decorator for the API calls supported
@mock_iam
# We name the tests with test_<service>_<check_name>_<test_action>
def test_iam_password_policy_no_uppercase_flag(self):
# Check the previous section to see the check test since is the same
```
Note that this does not use Moto, to keep it simple, but if you use any `moto`-decorators in addition to the patch, the call to `orig(self, operation_name, kwarg)` will be intercepted by Moto.
> The above code comes from here https://docs.getmoto.org/en/latest/docs/services/patching_other_services.html
#### Mocking more than one service
If the test your are creating belongs to a check that uses more than one provider service, you should mock each of the services used. For example, the check `cloudtrail_logs_s3_bucket_access_logging_enabled` requires the CloudTrail and the S3 client, hence the service's mock part of the test will be as follows:
```python
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=mock_audit_info,
), mock.patch(
"prowler.providers.aws.services.cloudtrail.cloudtrail_logs_s3_bucket_access_logging_enabled.cloudtrail_logs_s3_bucket_access_logging_enabled.cloudtrail_client",
new=Cloudtrail(mock_audit_info),
), mock.patch(
"prowler.providers.aws.services.cloudtrail.cloudtrail_logs_s3_bucket_access_logging_enabled.cloudtrail_logs_s3_bucket_access_logging_enabled.s3_client",
new=S3(mock_audit_info),
):
```
As you can see in the above code, it is required to mock the AWS audit info and both services used.
#### Patching vs. Importing
This is an important topic within the Prowler check's unit testing. Due to the dynamic nature of the check's load, the process of importing the service client from a check is the following:
1. `<check>.py`:
```python
from prowler.providers.<provider>.services.<service>.<service>_client import <service>_client
```
2. `<service>_client.py`:
```python
from prowler.providers.<provider>.lib.audit_info.audit_info import audit_info
from prowler.providers.<provider>.services.<service>.<service>_service import <SERVICE>
<service>_client = <SERVICE>(audit_info)
```
Due to the above import path it's not the same to patch the following objects because if you run a bunch of tests, either in parallel or not, some clients can be already instantiated by another check, hence your test execution will be using another test's service instance:
- `<service>_client` imported at `<check>.py`
- `<service>_client` initialised at `<service>_client.py`
- `<SERVICE>` imported at `<service>_client.py`
A useful read about this topic can be found in the following article: https://stackoverflow.com/questions/8658043/how-to-mock-an-import
#### Different ways to mock the service client
##### Mocking the service client at the service client level
Mocking a service client using the following code ...
```python title="Mocking the service_client"
with mock.patch(
"prowler.providers.<provider>.lib.audit_info.audit_info.audit_info",
new=audit_info,
), mock.patch(
"prowler.providers.aws.services.<service>.<check>.<check>.<service>_client",
new=<SERVICE>(audit_info),
):
```
will cause that the service will be initialised twice:
1. When the `<SERVICE>(audit_info)` is mocked out using `mock.patch` to have the object ready for the patching.
2. At the `<service>_client.py` when we are patching it since the `mock.patch` needs to go to that object an initialise it, hence the `<SERVICE>(audit_info)` will be called again.
Then, when we import the `<service>_client.py` at `<check>.py`, since we are mocking where the object is used, Python will use the mocked one.
In the [next section](./unit-testing.md#mocking-the-service-and-the-service-client-at-the-service-client-level) you will see an improved version to mock objects.
##### Mocking the service and the service client at the service client level
Mocking a service client using the following code ...
```python title="Mocking the service and the service_client"
with mock.patch(
"prowler.providers.<provider>.lib.audit_info.audit_info.audit_info",
new=audit_info,
), mock.patch(
"prowler.providers.aws.services.<service>.<SERVICE>",
new=<SERVICE>(audit_info),
) as service_client, mock.patch(
"prowler.providers.aws.services.<service>.<service>_client.<service>_client",
new=service_client,
):
```
will cause that the service will be initialised once, just when the `<SERVICE>(audit_info)` is mocked out using `mock.patch`.
Then, at the check_level when Python tries to import the client with `from prowler.providers.<provider>.services.<service>.<service>_client`, since it is already mocked out, the execution will continue using the `service_client` without getting into the `<service>_client.py`.
### Services
For testing the AWS services we have to follow the same logic as with the AWS checks, we have to check if the AWS API calls made by the service are covered by Moto and we have to test the service `__init__` to verifiy that the information is being correctly retrieved.
The service tests could act as *Integration Tests* since we test how the service retrieves the information from the provider, but since Moto or the custom mock objects mocks that calls this test will fall into *Unit Tests*.
Please refer to the [AWS checks tests](./unit-testing.md#checks) for more information on how to create tests and check the existing services tests [here](https://github.com/prowler-cloud/prowler/tree/master/tests/providers/aws/services).
## GCP
### Checks
For the GCP Provider we don't have any library to mock out the API calls we use. So in this scenario we inject the objects in the service client using [MagicMock](https://docs.python.org/3/library/unittest.mock.html#unittest.mock.MagicMock).
The following code shows how to use MagicMock to create the service objects for a GCP check test.
```python
# We need to import the unittest.mock to allow us to patch some objects
# not to use shared ones between test, hence to isolate the test
from unittest import mock
# GCP Constants
GCP_PROJECT_ID = "123456789012"
# We are going to create a test for the compute_firewall_rdp_access_from_the_internet_allowed check
class Test_compute_firewall_rdp_access_from_the_internet_allowed:
# We name the tests with test_<service>_<check_name>_<test_action>
def test_compute_compute_firewall_rdp_access_from_the_internet_allowed_one_compliant_rule_with_valid_port(self):
# Mocked client with MagicMock
compute_client = mock.MagicMock
# Assign GCP client configuration
compute_client.project_ids = [GCP_PROJECT_ID]
compute_client.region = "global"
# Import the service resource model to create the mocked object
from prowler.providers.gcp.services.compute.compute_service import Firewall
# Create the custom Firewall object to be tested
firewall = Firewall(
name="test",
id="1234567890",
source_ranges=["0.0.0.0/0"],
direction="INGRESS",
allowed_rules=[{"IPProtocol": "tcp", "ports": ["443"]}],
project_id=GCP_PROJECT_ID,
)
compute_client.firewalls = [firewall]
# In this scenario we have to mock also the Compute service and the compute_client from the check to enforce that the compute_client used is the one created within this check because patch != import, and if you execute tests in parallel some objects can be already initialised hence the check won't be isolated.
# In this case we don't use the Moto decorator, we use the mocked Compute client for both objects
with mock.patch(
"prowler.providers.gcp.services.compute.compute_service.Compute",
new=defender_client,
), mock.patch(
"prowler.providers.gcp.services.compute.compute_client.compute_client",
new=defender_client,
):
# We import the check within the two mocks not to initialise the iam_client with some shared information from
# the current_audit_info or the Compute service.
from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import (
compute_firewall_rdp_access_from_the_internet_allowed,
)
# Once imported, we only need to instantiate the check's class
check = compute_firewall_rdp_access_from_the_internet_allowed()
# And then, call the execute() function to run the check
# against the IAM client we've set up.
result = check.execute()
# Last but not least, we need to assert all the fields
# from the check's results
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].status_extended == f"Firewall {firewall.name} does not expose port 3389 (RDP) to the internet."
assert result[0].resource_name = firewall.name
assert result[0].resource_id == firewall.id
assert result[0].project_id = GCP_PROJECT_ID
assert result[0].location = compute_client.region
```
### Services
Coming soon ...
## Azure
### Checks
For the Azure Provider we don't have any library to mock out the API calls we use. So in this scenario we inject the objects in the service client using [MagicMock](https://docs.python.org/3/library/unittest.mock.html#unittest.mock.MagicMock).
The following code shows how to use MagicMock to create the service objects for a Azure check test.
```python
# We need to import the unittest.mock to allow us to patch some objects
# not to use shared ones between test, hence to isolate the test
from unittest import mock
from uuid import uuid4
# Azure Constants
AZURE_SUBSCRIPTION = str(uuid4())
# We are going to create a test for the Test_defender_ensure_defender_for_arm_is_on check
class Test_defender_ensure_defender_for_arm_is_on:
# We name the tests with test_<service>_<check_name>_<test_action>
def test_defender_defender_ensure_defender_for_arm_is_on_arm_pricing_tier_not_standard(self):
resource_id = str(uuid4())
# Mocked client with MagicMock
defender_client = mock.MagicMock
# Import the service resource model to create the mocked object
from prowler.providers.azure.services.defender.defender_service import Defender_Pricing
# Create the custom Defender object to be tested
defender_client.pricings = {
AZURE_SUBSCRIPTION: {
"Arm": Defender_Pricing(
resource_id=resource_id,
pricing_tier="Not Standard",
free_trial_remaining_time=0,
)
}
}
# In this scenario we have to mock also the Defender service and the defender_client from the check to enforce that the defender_client used is the one created within this check because patch != import, and if you execute tests in parallel some objects can be already initialised hence the check won't be isolated.
# In this case we don't use the Moto decorator, we use the mocked Defender client for both objects
with mock.patch(
"prowler.providers.azure.services.defender.defender_service.Defender",
new=defender_client,
), mock.patch(
"prowler.providers.azure.services.defender.defender_client.defender_client",
new=defender_client,
):
# We import the check within the two mocks not to initialise the iam_client with some shared information from
# the current_audit_info or the Defender service.
from prowler.providers.azure.services.defender.defender_ensure_defender_for_arm_is_on.defender_ensure_defender_for_arm_is_on import (
defender_ensure_defender_for_arm_is_on,
)
# Once imported, we only need to instantiate the check's class
check = defender_ensure_defender_for_arm_is_on()
# And then, call the execute() function to run the check
# against the IAM client we've set up.
result = check.execute()
# Last but not least, we need to assert all the fields
# from the check's results
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"Defender plan Defender for ARM from subscription {AZURE_SUBSCRIPTION} is set to OFF (pricing tier not standard)"
)
assert result[0].subscription == AZURE_SUBSCRIPTION
assert result[0].resource_name == "Defender plan ARM"
assert result[0].resource_id == resource_id
```
### Services
Coming soon ...

View File

@@ -1,6 +1,6 @@
# Requirements
Prowler has been written in Python using the [AWS SDK (Boto3)](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html#) and [Azure SDK](https://learn.microsoft.com/en-us/python/api/overview/azure/?view=azure-python).
Prowler has been written in Python using the [AWS SDK (Boto3)](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html#), [Azure SDK](https://azure.github.io/azure-sdk-for-python/) and [GCP API Python Client](https://github.com/googleapis/google-api-python-client/).
## AWS
Since Prowler uses AWS Credentials under the hood, you can follow any authentication method as described [here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-precedence).
@@ -23,13 +23,20 @@ export AWS_SESSION_TOKEN="XXXXXXXXX"
Those credentials must be associated to a user or role with proper permissions to do all checks. To make sure, add the following AWS managed policies to the user or role being used:
- arn:aws:iam::aws:policy/SecurityAudit
- arn:aws:iam::aws:policy/job-function/ViewOnlyAccess
- `arn:aws:iam::aws:policy/SecurityAudit`
- `arn:aws:iam::aws:policy/job-function/ViewOnlyAccess`
> Moreover, some read-only additional permissions are needed for several checks, make sure you attach also the custom policy [prowler-additions-policy.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-additions-policy.json) to the role you are using.
> If you want Prowler to send findings to [AWS Security Hub](https://aws.amazon.com/security-hub), make sure you also attach the custom policy [prowler-security-hub.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-security-hub.json).
### Multi-Factor Authentication
If your IAM entity enforces MFA you can use `--mfa` and Prowler will ask you to input the following values to get a new session:
- ARN of your MFA device
- TOTP (Time-Based One-Time Password)
## Azure
Prowler for azure supports the following authentication types:
@@ -52,7 +59,7 @@ export AZURE_CLIENT_SECRET="XXXXXXX"
If you try to execute Prowler with the `--sp-env-auth` flag and those variables are empty or not exported, the execution is going to fail.
### AZ CLI / Browser / Managed Identity authentication
The other three cases does not need additional configuration, `--az-cli-auth` and `--managed-identity-auth` are automated options, `--browser-auth` needs the user to authenticate using the default browser to start the scan.
The other three cases does not need additional configuration, `--az-cli-auth` and `--managed-identity-auth` are automated options. To use `--browser-auth` the user needs to authenticate against Azure using the default browser to start the scan, also `tenant-id` is required.
### Permissions
@@ -79,3 +86,17 @@ Regarding the subscription scope, Prowler by default scans all the subscriptions
- `Security Reader`
- `Reader`
## Google Cloud
### GCP Authentication
Prowler will follow the same credentials search as [Google authentication libraries](https://cloud.google.com/docs/authentication/application-default-credentials#search_order):
1. [GOOGLE_APPLICATION_CREDENTIALS environment variable](https://cloud.google.com/docs/authentication/application-default-credentials#GAC)
2. [User credentials set up by using the Google Cloud CLI](https://cloud.google.com/docs/authentication/application-default-credentials#personal)
3. [The attached service account, returned by the metadata server](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa)
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the `Viewer` role to the member associated with the credentials.
> By default, `prowler` will scan all accessible GCP Projects, use flag `--project-ids` to specify the projects to be scanned.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 258 KiB

After

Width:  |  Height:  |  Size: 283 KiB

BIN
docs/img/output-html.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 631 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 320 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 220 KiB

View File

@@ -10,13 +10,13 @@
For **Prowler v2 Documentation**, please go [here](https://github.com/prowler-cloud/prowler/tree/2.12.0) to the branch and its README.md.
- You are currently in the **Getting Started** section where you can find general information and requirements to help you start with the tool.
- In the [Tutorials](tutorials/overview) section you will see how to take advantage of all the features in Prowler.
- In the [Contact Us](contact) section you can find how to reach us out in case of technical issues.
- In the [About](about) section you will find more information about the Prowler team and license.
- In the [Tutorials](./tutorials/misc.md) section you will see how to take advantage of all the features in Prowler.
- In the [Contact Us](./contact.md) section you can find how to reach us out in case of technical issues.
- In the [About](./about.md) section you will find more information about the Prowler team and license.
## About Prowler
**Prowler** is an Open Source security tool to perform AWS and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness.
**Prowler** is an Open Source security tool to perform AWS, Azure and Google Cloud security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness.
It contains hundreds of controls covering CIS, PCI-DSS, ISO27001, GDPR, HIPAA, FFIEC, SOC2, AWS FTR, ENS and custom security frameworks.
@@ -40,7 +40,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler-clo
* `Python >= 3.9`
* `Python pip >= 3.9`
* AWS and/or Azure credentials
* AWS, GCP and/or Azure credentials
_Commands_:
@@ -54,7 +54,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler-clo
_Requirements_:
* Have `docker` installed: https://docs.docker.com/get-docker/.
* AWS and/or Azure credentials
* AWS, GCP and/or Azure credentials
* In the command below, change `-v` to your local directory path in order to access the reports.
_Commands_:
@@ -71,7 +71,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler-clo
_Requirements for Ubuntu 20.04.3 LTS_:
* AWS and/or Azure credentials
* AWS, GCP and/or Azure credentials
* Install python 3.9 with: `sudo apt-get install python3.9`
* Remove python 3.8 to avoid conflicts if you can: `sudo apt-get remove python3.8`
* Make sure you have the python3 distutils package installed: `sudo apt-get install python3-distutils`
@@ -87,12 +87,29 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler-clo
prowler -v
```
=== "GitHub"
_Requirements for Developers_:
* AWS, GCP and/or Azure credentials
* `git`, `Python >= 3.9`, `pip` and `poetry` installed (`pip install poetry`)
_Commands_:
```
git clone https://github.com/prowler-cloud/prowler
cd prowler
poetry shell
poetry install
python prowler.py -v
```
=== "Amazon Linux 2"
_Requirements_:
* AWS and/or Azure credentials
* Latest Amazon Linux 2 should come with Python 3.9 already installed however it may need pip. Install Python pip 3.9 with: `sudo dnf install -y python3-pip`.
* AWS, GCP and/or Azure credentials
* Latest Amazon Linux 2 should come with Python 3.9 already installed however it may need pip. Install Python pip 3.9 with: `sudo yum install -y python3-pip`.
* Make sure setuptools for python is already installed with: `pip3 install setuptools`
_Commands_:
@@ -103,28 +120,32 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler-clo
prowler -v
```
=== "AWS CloudShell"
Prowler can be easely executed in AWS CloudShell but it has some prerequsites to be able to to so. AWS CloudShell is a container running with `Amazon Linux release 2 (Karoo)` that comes with Python 3.7, since Prowler requires Python >= 3.9 we need to first install a newer version of Python. Follow the steps below to successfully execute Prowler v3 in AWS CloudShell:
=== "Brew"
_Requirements_:
* First install all dependences and then Python, in this case we need to compile it because there is not a package available at the time this document is written:
```
sudo yum -y install gcc openssl-devel bzip2-devel libffi-devel
wget https://www.python.org/ftp/python/3.9.16/Python-3.9.16.tgz
tar zxf Python-3.9.16.tgz
cd Python-3.9.16/
./configure --enable-optimizations
sudo make altinstall
python3.9 --version
cd
```
* `Brew` installed in your Mac or Linux
* AWS, GCP and/or Azure credentials
_Commands_:
* Once Python 3.9 is available we can install Prowler from pip:
``` bash
brew install prowler
prowler -v
```
pip3.9 install prowler
=== "AWS CloudShell"
After the migration of AWS CloudShell from Amazon Linux 2 to Amazon Linux 2023 [[1]](https://aws.amazon.com/about-aws/whats-new/2023/12/aws-cloudshell-migrated-al2023/) [2](https://docs.aws.amazon.com/cloudshell/latest/userguide/cloudshell-AL2023-migration.html), there is no longer a need to manually compile Python 3.9 as it's already included in AL2023. Prowler can thus be easily installed following the Generic method of installation via pip. Follow the steps below to successfully execute Prowler v3 in AWS CloudShell:
_Requirements_:
* Open AWS CloudShell `bash`.
_Commands_:
```
pip install prowler
prowler -v
```
@@ -154,7 +175,7 @@ The available versions of Prowler are the following:
The container images are available here:
- [DockerHub](https://hub.docker.com/r/toniblyx/prowler/tags)
- [AWS Public ECR](https://gallery.ecr.aws/o4g1s5r6/prowler)
- [AWS Public ECR](https://gallery.ecr.aws/prowler-cloud/prowler)
## High level architecture
@@ -163,14 +184,14 @@ You can run Prowler from your workstation, an EC2 instance, Fargate or any other
![Architecture](img/architecture.png)
## Basic Usage
To run Prowler, you will need to specify the provider (e.g aws or azure):
To run Prowler, you will need to specify the provider (e.g aws, gcp or azure):
> If no provider specified, AWS will be used for backward compatibility with most of v2 options.
```console
prowler <provider>
```
![Prowler Execution](img/short-display.png)
> Running the `prowler` command without options will use your environment variable credentials, see [Requirements](getting-started/requirements/) section to review the credentials settings.
> Running the `prowler` command without options will use your environment variable credentials, see [Requirements](./getting-started/requirements.md) section to review the credentials settings.
If you miss the former output you can use `--verbose` but Prowler v3 is smoking fast, so you won't see much ;)
@@ -195,6 +216,7 @@ For executing specific checks or services you can use options `-c`/`checks` or `
```console
prowler azure --checks storage_blob_public_access_level_is_disabled
prowler aws --services s3 ec2
prowler gcp --services iam compute
```
Also, checks and services can be excluded with options `-e`/`--excluded-checks` or `--excluded-services`:
@@ -202,9 +224,10 @@ Also, checks and services can be excluded with options `-e`/`--excluded-checks`
```console
prowler aws --excluded-checks s3_bucket_public_access
prowler azure --excluded-services defender iam
prowler gcp --excluded-services kms
```
More options and executions methods that will save your time in [Miscelaneous](tutorials/misc.md).
More options and executions methods that will save your time in [Miscellaneous](tutorials/misc.md).
You can always use `-h`/`--help` to access to the usage information and all the possible options:
@@ -221,6 +244,8 @@ prowler aws --profile custom-profile -f us-east-1 eu-south-2
```
> By default, `prowler` will scan all AWS regions.
See more details about AWS Authentication in [Requirements](getting-started/requirements.md)
### Azure
With Azure you need to specify which auth method is going to be used:
@@ -233,15 +258,37 @@ prowler azure --sp-env-auth
prowler azure --az-cli-auth
# To use browser authentication
prowler azure --browser-auth
prowler azure --browser-auth --tenant-id "XXXXXXXX"
# To use managed identity auth
prowler azure --managed-identity-auth
```
More details in [Requirements](getting-started/requirements.md)
See more details about Azure Authentication in [Requirements](getting-started/requirements.md)
Prowler by default scans all the subscriptions that is allowed to scan, if you want to scan a single subscription or various concrete subscriptions you can use the following flag (using az cli auth as example):
Prowler by default scans all the subscriptions that is allowed to scan, if you want to scan a single subscription or various specific subscriptions you can use the following flag (using az cli auth as example):
```console
prowler azure --az-cli-auth --subscription-ids <subscription ID 1> <subscription ID 2> ... <subscription ID N>
```
### Google Cloud
Prowler will use by default your User Account credentials, you can configure it using:
- `gcloud init` to use a new account
- `gcloud config set account <account>` to use an existing account
Then, obtain your access credentials using: `gcloud auth application-default login`
Otherwise, you can generate and download Service Account keys in JSON format (refer to https://cloud.google.com/iam/docs/creating-managing-service-account-keys) and provide the location of the file with the following argument:
```console
prowler gcp --credentials-file path
```
Prowler by default scans all the GCP Projects that is allowed to scan, if you want to scan a single project or various specific projects you can use the following flag:
```console
prowler gcp --project-ids <Project ID 1> <Project ID 2> ... <Project ID N>
```
See more details about GCP Authentication in [Requirements](getting-started/requirements.md)

View File

@@ -15,7 +15,7 @@ As an **AWS Partner** and we have passed the [AWS Foundation Technical Review (F
If you would like to report a vulnerability or have a security concern regarding Prowler Open Source or ProwlerPro service, please submit the information by contacting to help@prowler.pro.
The information you share with Verica as part of this process is kept confidential within Verica and the Prowler team. We will only share this information with a third party if the vulnerability you report is found to affect a third-party product, in which case we will share this information with the third-party product's author or manufacturer. Otherwise, we will only share this information as permitted by you.
The information you share with ProwlerPro as part of this process is kept confidential within ProwlerPro. We will only share this information with a third party if the vulnerability you report is found to affect a third-party product, in which case we will share this information with the third-party product's author or manufacturer. Otherwise, we will only share this information as permitted by you.
We will review the submitted report, and assign it a tracking number. We will then respond to you, acknowledging receipt of the report, and outline the next steps in the process.

View File

@@ -11,4 +11,4 @@
This error is also related with a lack of system requirements. To improve performance, Prowler stores information in memory so it may need to be run in a system with more than 1GB of memory.
See section [Logging](/tutorials/logging/) for further information or [contact us](/contact/).
See section [Logging](./tutorials/logging.md) for further information or [contact us](./contact.md).

View File

@@ -7,37 +7,98 @@ You can use `-w`/`--allowlist-file` with the path of your allowlist yaml file, b
## Allowlist Yaml File Syntax
### Account, Check and/or Region can be * to apply for all the cases
### Resources is a list that can have either Regex or Keywords:
### Account, Check and/or Region can be * to apply for all the cases.
### Resources and tags are lists that can have either Regex or Keywords.
### Tags is an optional list that matches on tuples of 'key=value' and are "ANDed" together.
### Use an alternation Regex to match one of multiple tags with "ORed" logic.
### For each check you can except Accounts, Regions, Resources and/or Tags.
########################### ALLOWLIST EXAMPLE ###########################
Allowlist:
Accounts:
"123456789012":
Checks:
Checks:
"iam_user_hardware_mfa_enabled":
Regions:
Regions:
- "us-east-1"
Resources:
Resources:
- "user-1" # Will ignore user-1 in check iam_user_hardware_mfa_enabled
- "user-2" # Will ignore user-2 in check iam_user_hardware_mfa_enabled
"*":
Regions:
"ec2_*":
Regions:
- "*"
Resources:
- "test" # Will ignore every resource containing the string "test" in every account and region
Resources:
- "*" # Will ignore every EC2 check in every account and region
"*":
Regions:
- "*"
Resources:
- "test"
Tags:
- "test=test" # Will ignore every resource containing the string "test" and the tags 'test=test' and
- "project=test|project=stage" # either of ('project=test' OR project=stage) in account 123456789012 and every region
"*":
Checks:
Checks:
"s3_bucket_object_versioning":
Regions:
Regions:
- "eu-west-1"
- "us-east-1"
Resources:
Resources:
- "ci-logs" # Will ignore bucket "ci-logs" AND ALSO bucket "ci-logs-replica" in specified check and regions
- "logs" # Will ignore EVERY BUCKET containing the string "logs" in specified check and regions
- "[[:alnum:]]+-logs" # Will ignore all buckets containing the terms ci-logs, qa-logs, etc. in specified check and regions
- ".+-logs" # Will ignore all buckets containing the terms ci-logs, qa-logs, etc. in specified check and regions
"ecs_task_definitions_no_environment_secrets":
Regions:
- "*"
Resources:
- "*"
Exceptions:
Accounts:
- "0123456789012"
Regions:
- "eu-west-1"
- "eu-south-2" # Will ignore every resource in check ecs_task_definitions_no_environment_secrets except the ones in account 0123456789012 located in eu-south-2 or eu-west-1
"*":
Regions:
- "*"
Resources:
- "*"
Tags:
- "environment=dev" # Will ignore every resource containing the tag 'environment=dev' in every account and region
"123456789012":
Checks:
"*":
Regions:
- "*"
Resources:
- "*"
Exceptions:
Resources:
- "test"
Tags:
- "environment=prod" # Will ignore every resource except in account 123456789012 except the ones containing the string "test" and tag environment=prod
## Allowlist specific regions
If you want to allowlist/mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w allowlist.yaml`:
Allowlist:
Accounts:
"*":
Checks:
"*":
Regions:
- "ap-southeast-1"
- "ap-southeast-2"
Resources:
- "*"
## Default AWS Allowlist
Prowler provides you a Default AWS Allowlist with the AWS Resources that should be allowlisted such as all resources created by AWS Control Tower when setting up a landing zone.
You can execute Prowler with this allowlist using the following command:
```sh
prowler aws --allowlist prowler/config/aws_allowlist.yaml
```
## Supported Allowlist Locations
The allowlisting flag supports the following locations:
@@ -70,6 +131,10 @@ prowler aws -w arn:aws:dynamodb:<region_name>:<account_id>:table/<table_name>
- Checks (String): This field can contain either a Prowler Check Name or an `*` (which applies to all the scanned checks).
- Regions (List): This field contains a list of regions where this allowlist rule is applied (it can also contains an `*` to apply all scanned regions).
- Resources (List): This field contains a list of regex expressions that applies to the resources that are wanted to be allowlisted.
- Tags (List): -Optional- This field contains a list of tuples in the form of 'key=value' that applies to the resources tags that are wanted to be allowlisted.
- Exceptions (Map): -Optional- This field contains a map of lists of accounts/regions/resources/tags that are wanted to be excepted in the allowlist.
The following example will allowlist all resources in all accounts for the EC2 checks in the regions `eu-west-1` and `us-east-1` with the tags `environment=dev` and `environment=prod`, except the resources containing the string `test` in the account `012345678912` and region `eu-west-1` with the tag `environment=prod`:
<img src="../img/allowlist-row.png"/>
@@ -101,7 +166,7 @@ generates an Allowlist:
```
def handler(event, context):
checks = {}
checks["vpc_flow_logs_enabled"] = { "Regions": [ "*" ], "Resources": [ "" ] }
checks["vpc_flow_logs_enabled"] = { "Regions": [ "*" ], "Resources": [ "" ], Optional("Tags"): [ "key:value" ] }
al = { "Allowlist": { "Accounts": { "*": { "Checks": checks } } } }
return al

View File

@@ -0,0 +1,43 @@
# AWS Authentication
Make sure you have properly configured your AWS-CLI with a valid Access Key and Region or declare AWS variables properly (or instance profile/role):
```console
aws configure
```
or
```console
export AWS_ACCESS_KEY_ID="ASXXXXXXX"
export AWS_SECRET_ACCESS_KEY="XXXXXXXXX"
export AWS_SESSION_TOKEN="XXXXXXXXX"
```
Those credentials must be associated to a user or role with proper permissions to do all checks. To make sure, add the following AWS managed policies to the user or role being used:
- `arn:aws:iam::aws:policy/SecurityAudit`
- `arn:aws:iam::aws:policy/job-function/ViewOnlyAccess`
> Moreover, some read-only additional permissions are needed for several checks, make sure you attach also the custom policy [prowler-additions-policy.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-additions-policy.json) to the role you are using.
> If you want Prowler to send findings to [AWS Security Hub](https://aws.amazon.com/security-hub), make sure you also attach the custom policy [prowler-security-hub.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-security-hub.json).
## Profiles
Prowler can use your custom AWS Profile with:
```console
prowler <provider> -p/--profile <profile_name>
```
## Multi-Factor Authentication
If your IAM entity enforces MFA you can use `--mfa` and Prowler will ask you to input the following values to get a new session:
- ARN of your MFA device
- TOTP (Time-Based One-Time Password)
## STS Endpoint Region
If you are using Prowler in AWS regions that are not enabled by default you need to use the argument `--sts-endpoint-region` to point the AWS STS API calls `assume-role` and `get-caller-identity` to the non-default region, e.g.: `prowler aws --sts-endpoint-region eu-south-2`.

View File

@@ -1,7 +1,9 @@
# Boto3 Retrier Configuration
Prowler's AWS Provider uses the Boto3 [Standard](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html) retry mode to assist in retrying client calls to AWS services when these kinds of errors or exceptions are experienced. This mode includes the following behaviours:
- A default value of 3 for maximum retry attempts. This can be overwritten with the `--aws-retries-max-attempts 5` argument.
- Retry attempts for an expanded list of errors/exceptions:
```
# Transient errors/exceptions
@@ -26,6 +28,18 @@ Prowler's AWS Provider uses the Boto3 [Standard](https://boto3.amazonaws.com/v1/
SlowDown
EC2ThrottledException
```
- Retry attempts on nondescriptive, transient error codes. Specifically, these HTTP status codes: 500, 502, 503, 504.
- Any retry attempt will include an exponential backoff by a base factor of 2 for a maximum backoff time of 20 seconds.
## Notes for validating retry attempts
If you are making changes to Prowler, and want to validate if requests are being retried or given up on, you can take the following approach
* Run prowler with `--log-level DEBUG` and `--log-file debuglogs.txt`
* Search for retry attempts using `grep -i 'Retry needed' debuglogs.txt`
This is based off of the [AWS documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#checking-retry-attempts-in-your-client-logs), which states that if a retry is performed, you will see a message starting with "Retry needed".
You can determine the total number of calls made using `grep -i 'Sending http request' debuglogs.txt | wc -l`

View File

@@ -1,26 +1,26 @@
# AWS CloudShell
Prowler can be easely executed in AWS CloudShell but it has some prerequsites to be able to to so. AWS CloudShell is a container running with `Amazon Linux release 2 (Karoo)` that comes with Python 3.7, since Prowler requires Python >= 3.9 we need to first install a newer version of Python. Follow the steps below to successfully execute Prowler v3 in AWS CloudShell:
- First install all dependences and then Python, in this case we need to compile it because there is not a package available at the time this document is written:
```
sudo yum -y install gcc openssl-devel bzip2-devel libffi-devel
wget https://www.python.org/ftp/python/3.9.16/Python-3.9.16.tgz
tar zxf Python-3.9.16.tgz
cd Python-3.9.16/
./configure --enable-optimizations
sudo make altinstall
python3.9 --version
cd
```
- Once Python 3.9 is available we can install Prowler from pip:
```
pip3.9 install prowler
```
- Now enjoy Prowler:
```
## Installation
After the migration of AWS CloudShell from Amazon Linux 2 to Amazon Linux 2023 [[1]](https://aws.amazon.com/about-aws/whats-new/2023/12/aws-cloudshell-migrated-al2023/) [[2]](https://docs.aws.amazon.com/cloudshell/latest/userguide/cloudshell-AL2023-migration.html), there is no longer a need to manually compile Python 3.9 as it's already included in AL2023. Prowler can thus be easily installed following the Generic method of installation via pip. Follow the steps below to successfully execute Prowler v3 in AWS CloudShell:
```shell
pip install prowler
prowler -v
prowler
```
- To download the results from AWS CloudShell, select Actions -> Download File and add the full path of each file. For the CSV file it will be something like `/home/cloudshell-user/output/prowler-output-123456789012-20221220191331.csv`
## Download Files
To download the results from AWS CloudShell, select Actions -> Download File and add the full path of each file. For the CSV file it will be something like `/home/cloudshell-user/output/prowler-output-123456789012-20221220191331.csv`
## Clone Prowler from Github
The limited storage that AWS CloudShell provides for the user's home directory causes issues when installing the poetry dependencies to run Prowler from GitHub. Here is a workaround:
```shell
git clone https://github.com/prowler-cloud/prowler.git
cd prowler
pip install poetry
mkdir /tmp/pypoetry
poetry config cache-dir /tmp/pypoetry
poetry shell
poetry install
python prowler.py -v
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 341 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 291 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 306 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 346 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 293 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 252 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 603 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 273 KiB

View File

@@ -1,6 +1,6 @@
# Scan Multiple AWS Accounts
Prowler can scan multiple accounts when it is ejecuted from one account that can assume a role in those given accounts to scan using [Assume Role feature](role-assumption.md) and [AWS Organizations integration feature](organizations.md).
Prowler can scan multiple accounts when it is executed from one account that can assume a role in those given accounts to scan using [Assume Role feature](role-assumption.md) and [AWS Organizations integration feature](organizations.md).
## Scan multiple specific accounts sequentially
@@ -41,7 +41,7 @@ for accountId in $ACCOUNTS_LIST; do
done
```
## Scan mutiple accounts from AWS Organizations in parallel
## Scan multiple accounts from AWS Organizations in parallel
- Declare a variable with all the accounts to scan. To do so, get the list of your AWS accounts in your AWS Organization by running the following command (will create a variable with all your ACTIVE accounts). Remember to run that command with the permissions needed to get that information in your AWS Organizations Management account.

View File

@@ -1,18 +1,19 @@
# AWS Organizations
## Get AWS Account details from your AWS Organization:
## Get AWS Account details from your AWS Organization
Prowler allows you to get additional information of the scanned account in CSV and JSON outputs. When scanning a single account you get the Account ID as part of the output.
If you have AWS Organizations Prowler can get your account details like Account Name, Email, ARN, Organization ID and Tags and you will have them next to every finding in the CSV and JSON outputs.
- In order to do that you can use the option `-O`/`--organizations-role <organizations_role_arn>`. See the following sample command:
In order to do that you can use the option `-O`/`--organizations-role <organizations_role_arn>`. See the following sample command:
```shell
prowler aws \
-O arn:aws:iam::<management_organizations_account_id>:role/<role_name>
```
prowler aws -O arn:aws:iam::<management_organizations_account_id>:role/<role_name>
```
> Make sure the role in your AWS Organizatiosn management account has the permissions `organizations:ListAccounts*` and `organizations:ListTagsForResource`.
> Make sure the role in your AWS Organizations management account has the permissions `organizations:ListAccounts*` and `organizations:ListTagsForResource`.
- In that command Prowler will scan the account and getting the account details from the AWS Organizations management account assuming a role and creating two reports with those details in JSON and CSV.
In that command Prowler will scan the account and getting the account details from the AWS Organizations management account assuming a role and creating two reports with those details in JSON and CSV.
In the JSON output below (redacted) you can see tags coded in base64 to prevent breaking CSV or JSON due to its format:
@@ -30,20 +31,28 @@ The additional fields in CSV header output are as follow:
ACCOUNT_DETAILS_EMAIL,ACCOUNT_DETAILS_NAME,ACCOUNT_DETAILS_ARN,ACCOUNT_DETAILS_ORG,ACCOUNT_DETAILS_TAGS
```
## Assume Role and across all accounts in AWS Organizations or just a list of accounts:
## Extra: run Prowler across all accounts in AWS Organizations by assuming roles
If you want to run Prowler across all accounts of AWS Organizations you can do this:
- First get a list of accounts that are not suspended:
1. First get a list of accounts that are not suspended:
```
ACCOUNTS_IN_ORGS=$(aws organizations list-accounts --query Accounts[?Status==`ACTIVE`].Id --output text)
```
```shell
ACCOUNTS_IN_ORGS=$(aws organizations list-accounts \
--query "Accounts[?Status=='ACTIVE'].Id" \
--output text \
)
```
- Then run Prowler to assume a role (same in all members) per each account, in this example it is just running one particular check:
2. Then run Prowler to assume a role (same in all members) per each account:
```
for accountId in $ACCOUNTS_IN_ORGS; do prowler aws -O arn:aws:iam::<management_organizations_account_id>:role/<role_name>; done
```
```shell
for accountId in $ACCOUNTS_IN_ORGS;
do
prowler aws \
-O arn:aws:iam::<management_organizations_account_id>:role/<role_name> \
-R arn:aws:iam::"${accountId}":role/<role_name>;
done
```
- Using the same for loop it can be scanned a list of accounts with a variable like `ACCOUNTS_LIST='11111111111 2222222222 333333333'`
> Using the same for loop it can be scanned a list of accounts with a variable like `ACCOUNTS_LIST='11111111111 2222222222 333333333'`

View File

@@ -0,0 +1,88 @@
# AWS Regions and Partitions
By default Prowler is able to scan the following AWS partitions:
- Commercial: `aws`
- China: `aws-cn`
- GovCloud (US): `aws-us-gov`
> To check the available regions for each partition and service please refer to the following document [aws_regions_by_service.json](https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/aws/aws_regions_by_service.json)
It is important to take into consideration that to scan the China (`aws-cn`) or GovCloud (`aws-us-gov`) partitions it is either required to have a valid region for that partition in your AWS credentials or to specify the regions you want to audit for that partition using the `-f/--region` flag.
> Please, refer to https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#configuring-credentials for more information about the AWS credentials configuration.
Prowler can scan specific region(s) with:
```console
prowler aws -f/--region eu-west-1 us-east-1
```
You can get more information about the available partitions and regions in the following [Botocore](https://github.com/boto/botocore) [file](https://github.com/boto/botocore/blob/22a19ea7c4c2c4dd7df4ab8c32733cba0c7597a4/botocore/data/partitions.json).
## AWS China
To scan your AWS account in the China partition (`aws-cn`):
- Using the `-f/--region` flag:
```
prowler aws --region cn-north-1 cn-northwest-1
```
- Using the region configured in your AWS profile at `~/.aws/credentials` or `~/.aws/config`:
```
[default]
aws_access_key_id = XXXXXXXXXXXXXXXXXXX
aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
region = cn-north-1
```
> With this option all the partition regions will be scanned without the need of use the `-f/--region` flag
## AWS GovCloud (US)
To scan your AWS account in the GovCloud (US) partition (`aws-us-gov`):
- Using the `-f/--region` flag:
```
prowler aws --region us-gov-east-1 us-gov-west-1
```
- Using the region configured in your AWS profile at `~/.aws/credentials` or `~/.aws/config`:
```
[default]
aws_access_key_id = XXXXXXXXXXXXXXXXXXX
aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
region = us-gov-east-1
```
> With this option all the partition regions will be scanned without the need of use the `-f/--region` flag
## AWS ISO (US & Europe)
For the AWS ISO partitions, which are known as "secret partitions" and are air-gapped from the Internet, there is no builtin way to scan it. If you want to audit an AWS account in one of the AWS ISO partitions you should manually update the [aws_regions_by_service.json](https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/aws/aws_regions_by_service.json) and include the partition, region and services, e.g.:
```json
"iam": {
"regions": {
"aws": [
"eu-west-1",
"us-east-1",
],
"aws-cn": [
"cn-north-1",
"cn-northwest-1"
],
"aws-us-gov": [
"us-gov-east-1",
"us-gov-west-1"
],
"aws-iso": [
"aws-iso-global",
"us-iso-east-1",
"us-iso-west-1"
],
"aws-iso-b": [
"aws-iso-b-global",
"us-isob-east-1"
],
"aws-iso-e": [],
}
},
```

View File

@@ -5,7 +5,7 @@ Prowler uses the AWS SDK (Boto3) underneath so it uses the same authentication m
However, there are few ways to run Prowler against multiple accounts using IAM Assume Role feature depending on each use case:
1. You can just set up your custom profile inside `~/.aws/config` with all needed information about the role to assume then call it with `prowler aws -p/--profile your-custom-profile`.
- An example profile that performs role-chaining is given below. The `credential_source` can either be set to `Environment`, `Ec2InstanceMetadata`, or `EcsContainer`.
- An example profile that performs role-chaining is given below. The `credential_source` can either be set to `Environment`, `Ec2InstanceMetadata`, or `EcsContainer`.
- Alternatively, you could use the `source_profile` instead of `credential_source` to specify a separate named profile that contains IAM user credentials with permission to assume the target the role. More information can be found [here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html).
```
[profile crossaccountrole]
@@ -23,6 +23,30 @@ prowler aws -R arn:aws:iam::<account_id>:role/<role_name>
prowler aws -T/--session-duration <seconds> -I/--external-id <external_id> -R arn:aws:iam::<account_id>:role/<role_name>
```
## Custom Role Session Name
Prowler can use your custom Role Session name with:
```console
prowler aws --role-session-name <role_session_name>
```
> It defaults to `ProwlerAssessmentSession`
## STS Endpoint Region
If you are using Prowler in AWS regions that are not enabled by default you need to use the argument `--sts-endpoint-region` to point the AWS STS API calls `assume-role` and `get-caller-identity` to the non-default region, e.g.: `prowler aws --sts-endpoint-region eu-south-2`.
> Since v3.11.0, Prowler uses a regional token in STS sessions so it can scan all AWS regions without needing the `--sts-endpoint-region` argument.
> Make sure that you have enabled the AWS Region you want to scan in BOTH AWS Accounts (assumed role account and account from which you assume the role).
## Role MFA
If your IAM Role has MFA configured you can use `--mfa` along with `-R`/`--role <role_arn>` and Prowler will ask you to input the following values to get a new temporary session for the IAM Role provided:
- ARN of your MFA device
- TOTP (Time-Based One-Time Password)
## Create Role
To create a role to be assumed in one or multiple accounts you can use either as CloudFormation Stack or StackSet the following [template](https://github.com/prowler-cloud/prowler/blob/master/permissions/create_role_to_assume_cfn.yaml) and adapt it.

26
docs/tutorials/aws/s3.md Normal file
View File

@@ -0,0 +1,26 @@
# Send report to AWS S3 Bucket
To save your report in an S3 bucket, use `-B`/`--output-bucket`.
```sh
prowler <provider> -B my-bucket
```
If you can use a custom folder and/or filename, use `-o`/`--output-directory` and/or `-F`/`--output-filename`.
```sh
prowler <provider> \
-B my-bucket \
--output-directory test-folder \
--output-filename output-filename
```
By default Prowler sends HTML, JSON and CSV output formats, if you want to send a custom output format or a single one of the defaults you can specify it with the `-M`/`--output-modes` flag.
```sh
prowler <provider> -M csv -B my-bucket
```
> In the case you do not want to use the assumed role credentials but the initial credentials to put the reports into the S3 bucket, use `-D`/`--output-bucket-no-assume` instead of `-B`/`--output-bucket`.
> Make sure that the used credentials have `s3:PutObject` permissions in the S3 path where the reports are going to be uploaded.

View File

@@ -1,41 +1,134 @@
# AWS Security Hub Integration
Prowler supports natively and as **official integration** sending findings to [AWS Security Hub](https://aws.amazon.com/security-hub). This integration allows Prowler to import its findings to AWS Security Hub.
Prowler supports natively and as **official integration** sending findings to [AWS Security Hub](https://aws.amazon.com/security-hub). This integration allows **Prowler** to import its findings to AWS Security Hub.
With Security Hub, you now have a single place that aggregates, organizes, and prioritizes your security alerts, or findings, from multiple AWS services, such as Amazon GuardDuty, Amazon Inspector, Amazon Macie, AWS Identity and Access Management (IAM) Access Analyzer, and AWS Firewall Manager, as well as from AWS Partner solutions and from Prowler for free.
Before sending findings to Prowler, you will need to perform next steps:
Before sending findings, you will need to enable AWS Security Hub and the **Prowler** integration.
1. Since Security Hub is a region based service, enable it in the region or regions you require. Use the AWS Management Console or using the AWS CLI with this command if you have enough permissions:
- `aws securityhub enable-security-hub --region <region>`.
2. Enable Prowler as partner integration integration. Use the AWS Management Console or using the AWS CLI with this command if you have enough permissions:
- `aws securityhub enable-import-findings-for-product --region <region> --product-arn arn:aws:securityhub:<region>::product/prowler/prowler` (change region also inside the ARN).
- Using the AWS Management Console:
![Screenshot 2020-10-29 at 10 26 02 PM](https://user-images.githubusercontent.com/3985464/97634660-5ade3400-1a36-11eb-9a92-4a45cc98c158.png)
3. Allow Prowler to import its findings to AWS Security Hub by adding the policy below to the role or user running Prowler:
- [prowler-security-hub.json](https://github.com/prowler-cloud/prowler/blob/master/iam/prowler-security-hub.json)
## Enable AWS Security Hub
To enable the integration you have to perform the following steps, in _at least_ one AWS region of a given AWS account, to enable **AWS Security Hub** and **Prowler** as a partner integration.
Since **AWS Security Hub** is a region based service, you will need to enable it in the region or regions you require. You can configure it using the AWS Management Console or the AWS CLI.
> Take into account that enabling this integration will incur in costs in AWS Security Hub, please refer to its pricing [here](https://aws.amazon.com/security-hub/pricing/) for more information.
### Using the AWS Management Console
#### Enable AWS Security Hub
If you have currently AWS Security Hub enabled you can skip to the [next section](#enable-prowler-integration).
1. Open the **AWS Security Hub** console at https://console.aws.amazon.com/securityhub/.
2. When you open the Security Hub console for the first time make sure that you are in the region you want to enable, then choose **Go to Security Hub**.
![](./img/enable.png)
3. On the next page, the Security standards section lists the security standards that Security Hub supports. Select the check box for a standard to enable it, and clear the check box to disable it.
4. Choose **Enable Security Hub**.
![](./img/enable-2.png)
#### Enable Prowler Integration
If you have currently the Prowler integration enabled in AWS Security Hub you can skip to the [next section](#send-findings) and start sending findings.
Once **AWS Security Hub** is enabled you will need to enable **Prowler** as partner integration to allow **Prowler** to send findings to your **AWS Security Hub**.
1. Open the **AWS Security Hub** console at https://console.aws.amazon.com/securityhub/.
2. Select the **Integrations** tab in the right-side menu bar.
![](./img/enable-partner-integration.png)
3. Search for _Prowler_ in the text search box and the **Prowler** integration will appear.
4. Once there, click on **Accept Findings** to allow **AWS Security Hub** to receive findings from **Prowler**.
![](./img/enable-partner-integration-2.png)
5. A new modal will appear to confirm that you are enabling the **Prowler** integration.
![](./img/enable-partner-integration-3.png)
6. Right after click on **Accept Findings**, you will see that the integration is enabled in **AWS Security Hub**.
![](./img/enable-partner-integration-4.png)
### Using the AWS CLI
To enable **AWS Security Hub** and the **Prowler** integration you have to run the following commands using the AWS CLI:
```shell
aws securityhub enable-security-hub --region <region>
```
> For this command to work you will need the `securityhub:EnableSecurityHub` permission.
> You will need to set the AWS region where you want to enable AWS Security Hub.
Once **AWS Security Hub** is enabled you will need to enable **Prowler** as partner integration to allow **Prowler** to send findings to your AWS Security Hub. You have to run the following commands using the AWS CLI:
```shell
aws securityhub enable-import-findings-for-product --region eu-west-1 --product-arn arn:aws:securityhub:<region>::product/prowler/prowler
```
> You will need to set the AWS region where you want to enable the integration and also the AWS region also within the ARN.
> For this command to work you will need the `securityhub:securityhub:EnableImportFindingsForProduct` permission.
## Send Findings
Once it is enabled, it is as simple as running the command below (for all regions):
```sh
./prowler aws -S
prowler aws --security-hub
```
or for only one filtered region like eu-west-1:
```sh
./prowler -S -f eu-west-1
prowler --security-hub --region eu-west-1
```
> **Note 1**: It is recommended to send only fails to Security Hub and that is possible adding `-q` to the command.
> **Note 1**: It is recommended to send only fails to Security Hub and that is possible adding `-q/--quiet` to the command. You can use, instead of the `-q/--quiet` argument, the `--send-sh-only-fails` argument to save all the findings in the Prowler outputs but just to send FAIL findings to AWS Security Hub.
> **Note 2**: Since Prowler perform checks to all regions by defauls you may need to filter by region when runing Security Hub integration, as shown in the example above. Remember to enable Security Hub in the region or regions you need by calling `aws securityhub enable-security-hub --region <region>` and run Prowler with the option `-f <region>` (if no region is used it will try to push findings in all regions hubs).
> **Note 2**: Since Prowler perform checks to all regions by default you may need to filter by region when running Security Hub integration, as shown in the example above. Remember to enable Security Hub in the region or regions you need by calling `aws securityhub enable-security-hub --region <region>` and run Prowler with the option `-f/--region <region>` (if no region is used it will try to push findings in all regions hubs). Prowler will send findings to the Security Hub on the region where the scanned resource is located.
> **Note 3** to have updated findings in Security Hub you have to run Prowler periodically. Once a day or every certain amount of hours.
> **Note 3**: To have updated findings in Security Hub you have to run Prowler periodically. Once a day or every certain amount of hours.
Once you run findings for first time you will be able to see Prowler findings in Findings section:
### See you Prowler findings in AWS Security Hub
![Screenshot 2020-10-29 at 10 29 05 PM](https://user-images.githubusercontent.com/3985464/97634676-66c9f600-1a36-11eb-9341-70feb06f6331.png)
Once configured the **AWS Security Hub** in your next scan you will receive the **Prowler** findings in the AWS regions configured. To review those findings in **AWS Security Hub**:
1. Open the **AWS Security Hub** console at https://console.aws.amazon.com/securityhub/.
2. Select the **Findings** tab in the right-side menu bar.
![](./img/findings.png)
3. Use the search box filters and use the **Product Name** filter with the value _Prowler_ to see the findings sent from **Prowler**.
4. Then, you can click on the check **Title** to see the details and the history of a finding.
![](./img/finding-details.png)
As you can see in the related requirements section, in the detailed view of the findings, **Prowler** also sends compliance information related to every finding.
## Send findings to Security Hub assuming an IAM Role
When you are auditing a multi-account AWS environment, you can send findings to a Security Hub of another account by assuming an IAM role from that account using the `-R` flag in the Prowler command:
```sh
prowler --security-hub --role arn:aws:iam::123456789012:role/ProwlerExecutionRole
```
> Remember that the used role needs to have permissions to send findings to Security Hub. To get more information about the permissions required, please refer to the following IAM policy [prowler-security-hub.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-security-hub.json)
## Send only failed findings to Security Hub
When using the **AWS Security Hub** integration you can send only the `FAIL` findings generated by **Prowler**. Therefore, the **AWS Security Hub** usage costs eventually would be lower. To follow that recommendation you could add the `-q/--quiet` flag to the Prowler command:
```sh
prowler --security-hub --quiet
```
You can use, instead of the `-q/--quiet` argument, the `--send-sh-only-fails` argument to save all the findings in the Prowler outputs but just to send FAIL findings to AWS Security Hub:
```sh
prowler --security-hub --send-sh-only-fails
```
## Skip sending updates of findings to Security Hub
@@ -43,5 +136,5 @@ By default, Prowler archives all its findings in Security Hub that have not appe
You can skip this logic by using the option `--skip-sh-update` so Prowler will not archive older findings:
```sh
./prowler -S --skip-sh-update
prowler --security-hub --skip-sh-update
```

View File

@@ -1,8 +1,8 @@
# Check mapping between Prowler v3 and v2
Prowler v3 comes with different identifiers but we maintained the same checks that were implemented in v2. The reason for this change is because in previows versions of Prowler, check names were mostly based on CIS Benchmark for AWS. In v3 all checks are independent from any security framework and they have its own name and ID.
Prowler v3 comes with different identifiers but we maintained the same checks that were implemented in v2. The reason for this change is because in previous versions of Prowler, check names were mostly based on CIS Benchmark for AWS. In v3 all checks are independent from any security framework and they have its own name and ID.
If you need more information about how new compliance implementation works in Prowler v3 see [Compliance](../../compliance/) section.
If you need more information about how new compliance implementation works in Prowler v3 see [Compliance](../compliance.md) section.
```
checks_v3_to_v2_mapping = {
@@ -12,13 +12,13 @@ checks_v3_to_v2_mapping = {
"account_security_questions_are_registered_in_the_aws_account": "check115",
"acm_certificates_expiration_check": "extra730",
"acm_certificates_transparency_logs_enabled": "extra724",
"apigateway_authorizers_enabled": "extra746",
"apigateway_client_certificate_enabled": "extra743",
"apigateway_endpoint_public": "extra745",
"apigateway_logging_enabled": "extra722",
"apigateway_waf_acl_attached": "extra744",
"apigatewayv2_access_logging_enabled": "extra7156",
"apigatewayv2_authorizers_enabled": "extra7157",
"apigateway_restapi_authorizers_enabled": "extra746",
"apigateway_restapi_client_certificate_enabled": "extra743",
"apigateway_restapi_public": "extra745",
"apigateway_restapi_logging_enabled": "extra722",
"apigateway_restapi_waf_acl_attached": "extra744",
"apigatewayv2_api_access_logging_enabled": "extra7156",
"apigatewayv2_api_authorizers_enabled": "extra7157",
"appstream_fleet_default_internet_access_disabled": "extra7193",
"appstream_fleet_maximum_session_duration": "extra7190",
"appstream_fleet_session_disconnect_timeout": "extra7191",
@@ -31,7 +31,7 @@ checks_v3_to_v2_mapping = {
"awslambda_function_url_cors_policy": "extra7180",
"awslambda_function_url_public": "extra7179",
"awslambda_function_using_supported_runtimes": "extra762",
"cloudformation_outputs_find_secrets": "extra742",
"cloudformation_stack_outputs_find_secrets": "extra742",
"cloudformation_stacks_termination_protection_enabled": "extra7154",
"cloudfront_distributions_field_level_encryption_enabled": "extra767",
"cloudfront_distributions_geo_restrictions_enabled": "extra732",
@@ -84,7 +84,7 @@ checks_v3_to_v2_mapping = {
"ec2_ebs_snapshots_encrypted": "extra740",
"ec2_ebs_volume_encryption": "extra729",
"ec2_elastic_ip_shodan": "extra7102",
"ec2_elastic_ip_unassgined": "extra7146",
"ec2_elastic_ip_unassigned": "extra7146",
"ec2_instance_imdsv2_enabled": "extra786",
"ec2_instance_internet_facing_with_instance_profile": "extra770",
"ec2_instance_managed_by_ssm": "extra7124",
@@ -113,7 +113,6 @@ checks_v3_to_v2_mapping = {
"ec2_securitygroup_allow_wide_open_public_ipv4": "extra778",
"ec2_securitygroup_default_restrict_traffic": "check43",
"ec2_securitygroup_from_launch_wizard": "extra7173",
"ec2_securitygroup_in_use_without_ingress_filtering": "extra74",
"ec2_securitygroup_not_used": "extra75",
"ec2_securitygroup_with_many_ingress_egress_rules": "extra777",
"ecr_repositories_lifecycle_policy_enabled": "extra7194",
@@ -138,7 +137,6 @@ checks_v3_to_v2_mapping = {
"elbv2_internet_facing": "extra79",
"elbv2_listeners_underneath": "extra7158",
"elbv2_logging_enabled": "extra717",
"elbv2_request_smugling": "extra7142",
"elbv2_ssl_listeners": "extra793",
"elbv2_waf_acl_attached": "extra7129",
"emr_cluster_account_public_block_enabled": "extra7178",
@@ -159,9 +157,8 @@ checks_v3_to_v2_mapping = {
"iam_administrator_access_with_mfa": "extra71",
"iam_avoid_root_usage": "check11",
"iam_check_saml_providers_sts": "extra733",
"iam_disable_30_days_credentials": "extra774",
"iam_disable_45_days_credentials": "extra7198",
"iam_disable_90_days_credentials": "check13",
"iam_customer_attached_policy_no_administrative_privileges": "subset of check122",
"iam_customer_unattached_policy_no_administrative_privileges": "subset of check122",
"iam_no_custom_policy_permissive_role_assumption": "extra7100",
"iam_no_expired_server_certificates_stored": "extra7199",
"iam_no_root_access_key": "check112",
@@ -174,11 +171,12 @@ checks_v3_to_v2_mapping = {
"iam_password_policy_uppercase": "check15",
"iam_policy_allows_privilege_escalation": "extra7185",
"iam_policy_attached_only_to_group_or_roles": "check116",
"iam_policy_no_administrative_privileges": "check122",
"iam_root_hardware_mfa_enabled": "check114",
"iam_root_mfa_enabled": "check113",
"iam_rotate_access_key_90_days": "check14",
"iam_support_role_created": "check120",
"iam_user_accesskey_unused": "subset of check13,extra774,extra7198",
"iam_user_console_access_unused": "subset of check13,extra774,extra7198",
"iam_user_hardware_mfa_enabled": "extra7125",
"iam_user_mfa_enabled_console_access": "check12",
"iam_user_no_setup_initial_access_key": "check121",

View File

@@ -18,10 +18,10 @@ prowler azure --sp-env-auth
prowler azure --az-cli-auth
# To use browser authentication
prowler azure --browser-auth
prowler azure --browser-auth --tenant-id "XXXXXXXX"
# To use managed identity auth
prowler azure --managed-identity-auth
```
To use Prowler you need to set up also the permissions required to access your resources in your Azure account, to more details refer to [Requirements](/getting-started/requirements)
To use Prowler you need to set up also the permissions required to access your resources in your Azure account, to more details refer to [Requirements](../../getting-started/requirements.md)

View File

@@ -0,0 +1,16 @@
# Use non default Azure regions
Microsoft provides clouds for compliance with regional laws, which are available for your use.
By default, Prowler uses `AzureCloud` cloud which is the comercial one. (you can list all the available with `az cloud list --output table`).
At the time of writing this documentation the available Azure Clouds from different regions are the following:
- AzureCloud
- AzureChinaCloud
- AzureUSGovernment
- AzureGermanCloud
If you want to change the default one you must include the flag `--azure-region`, i.e.:
```console
prowler azure --az-cli-auth --azure-region AzureChinaCloud
```

View File

@@ -0,0 +1,20 @@
# Check Aliases
Prowler allows you to use aliases for the checks. You only have to add the `CheckAliases` key to the check's metadata with a list of the aliases:
"Provider": "<provider>",
"CheckID": "<check_id>",
"CheckTitle": "<check_title>",
"CheckAliases": [
"<check_alias_1>"
"<check_alias_2>",
...
],
...
Then, you can execute the check either with its check ID or with one of the previous aliases:
```console
prowler <provider> -c/--checks <check_alias_1>
Using alias <check_alias_1> for check <check_id>...
```

View File

@@ -13,6 +13,7 @@ Currently, the available frameworks are:
- `ens_rd2022_aws`
- `aws_audit_manager_control_tower_guardrails_aws`
- `aws_foundational_security_best_practices_aws`
- `aws_well_architected_framework_security_pillar_aws`
- `cisa_aws`
- `fedramp_low_revision_4_aws`
- `fedramp_moderate_revision_4_aws`
@@ -81,36 +82,4 @@ Standard results will be shown and additionally the framework information as the
## Create and contribute adding other Security Frameworks
If you want to create or contribute with your own security frameworks or add public ones to Prowler you need to make sure the checks are available if not you have to create your own. Then create a compliance file per provider like in `prowler/compliance/aws/` and name it as `<framework>_<version>_<provider>.json` then follow the following format to create yours.
Each file version of a framework will have the following structure at high level with the case that each framework needs to be generally identified), one requirement can be also called one control but one requirement can be linked to multiple prowler checks.:
- `Framework`: string. Indistiguish name of the framework, like CIS
- `Provider`: string. Provider where the framework applies, such as AWS, Azure, OCI,...
- `Version`: string. Version of the framework itself, like 1.4 for CIS.
- `Requirements`: array of objects. Include all requirements or controls with the mapping to Prowler.
- `Requirements_Id`: string. Unique identifier per each requirement in the specific framework
- `Requirements_Description`: string. Description as in the framework.
- `Requirements_Attributes`: array of objects. Includes all needed attributes per each requirement, like levels, sections, etc. Whatever helps to create a dedicated report with the result of the findings. Attributes would be taken as closely as possible from the framework's own terminology directly.
- `Requirements_Checks`: array. Prowler checks that are needed to prove this requirement. It can be one or multiple checks. In case of no automation possible this can be empty.
```
{
"Framework": "<framework>-<provider>",
"Version": "<version>",
"Requirements": [
{
"Id": "<unique-id>",
"Description": "Requiemente full description",
"Checks": [
"Here is the prowler check or checks that is going to be executed"
],
"Attributes": [
{
<Add here your custom attributes.>
}
]
}
```
Finally, to have a proper output file for your reports, your framework data model has to be created in `prowler/lib/outputs/models.py` and also the CLI table output in `prowler/lib/outputs/compliance.py`.
This information is part of the Developer Guide and can be found here: https://docs.prowler.cloud/en/latest/tutorials/developer-guide/.

View File

@@ -1,76 +1,133 @@
# Configuration File
Several Prowler's checks have user configurable variables that can be modified in a common **configuration file**.
This file can be found in the following path:
Several Prowler's checks have user configurable variables that can be modified in a common **configuration file**. This file can be found in the following [path](https://github.com/prowler-cloud/prowler/blob/master/prowler/config/config.yaml):
```
prowler/config/config.yaml
```
## Configurable Checks
The following list includes all the checks with configurable variables that can be changed in the mentioned configuration yaml file:
Also you can input a custom configuration file using the `--config-file` argument.
1. aws.ec2_elastic_ip_shodan
- shodan_api_key (String)
- aws.ec2_securitygroup_with_many_ingress_egress_rules
- max_security_group_rules (Integer)
- aws.ec2_instance_older_than_specific_days
- max_ec2_instance_age_in_days (Integer)
- aws.vpc_endpoint_connections_trust_boundaries
- trusted_account_ids (List of Strings)
- aws.vpc_endpoint_services_allowed_principals_trust_boundaries
- trusted_account_ids (List of Strings)
- aws.cloudwatch_log_group_retention_policy_specific_days_enabled
- log_group_retention_days (Integer)
- aws.appstream_fleet_session_idle_disconnect_timeout
- max_idle_disconnect_timeout_in_seconds (Integer)
- aws.appstream_fleet_session_disconnect_timeout
- max_disconnect_timeout_in_seconds (Integer)
- aws.appstream_fleet_maximum_session_duration
- max_session_duration_seconds (Integer)
- aws.awslambda_function_using_supported_runtimes
- obsolete_lambda_runtimes (List of Strings)
## AWS
## Config Yaml File
### Configurable Checks
The following list includes all the AWS checks with configurable variables that can be changed in the configuration yaml file:
# AWS EC2 Configuration
# aws.ec2_elastic_ip_shodan
shodan_api_key: null
# aws.ec2_securitygroup_with_many_ingress_egress_rules --> by default is 50 rules
max_security_group_rules: 50
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
max_ec2_instance_age_in_days: 180
| Check Name | Value | Type |
|---------------------------------------------------------------|--------------------------------------------------|-----------------|
| `iam_user_accesskey_unused` | `max_unused_access_keys_days` | Integer |
| `iam_user_console_access_unused` | `max_console_access_days` | Integer |
| `ec2_elastic_ip_shodan` | `shodan_api_key` | String |
| `ec2_securitygroup_with_many_ingress_egress_rules` | `max_security_group_rules` | Integer |
| `ec2_instance_older_than_specific_days` | `max_ec2_instance_age_in_days` | Integer |
| `vpc_endpoint_connections_trust_boundaries` | `trusted_account_ids` | List of Strings |
| `vpc_endpoint_services_allowed_principals_trust_boundaries` | `trusted_account_ids` | List of Strings |
| `cloudwatch_log_group_retention_policy_specific_days_enabled` | `log_group_retention_days` | Integer |
| `appstream_fleet_session_idle_disconnect_timeout` | `max_idle_disconnect_timeout_in_seconds` | Integer |
| `appstream_fleet_session_disconnect_timeout` | `max_disconnect_timeout_in_seconds` | Integer |
| `appstream_fleet_maximum_session_duration` | `max_session_duration_seconds` | Integer |
| `awslambda_function_using_supported_runtimes` | `obsolete_lambda_runtimes` | Integer |
| `organizations_scp_check_deny_regions` | `organizations_enabled_regions` | List of Strings |
| `organizations_delegated_administrators` | `organizations_trusted_delegated_administrators` | List of Strings |
| `ecr_repositories_scan_vulnerabilities_in_latest_image` | `ecr_repository_vulnerability_minimum_severity` | String |
| `trustedadvisor_premium_support_plan_subscribed` | `verify_premium_support_plans` | Boolean |
| `config_recorder_all_regions_enabled` | `allowlist_non_default_regions` | Boolean |
| `drs_job_exist` | `allowlist_non_default_regions` | Boolean |
| `guardduty_is_enabled` | `allowlist_non_default_regions` | Boolean |
| `securityhub_enabled` | `allowlist_non_default_regions` | Boolean |
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
# Multi account environment: Any additional trusted account number should be added as a space separated list, e.g.
# trusted_account_ids : ["123456789012", "098765432109", "678901234567"]
trusted_account_ids: []
## Azure
# AWS Cloudwatch Configuration
# aws.cloudwatch_log_group_retention_policy_specific_days_enabled --> by default is 365 days
log_group_retention_days: 365
### Configurable Checks
# AWS AppStream Session Configuration
# aws.appstream_fleet_session_idle_disconnect_timeout
max_idle_disconnect_timeout_in_seconds: 600 # 10 Minutes
# aws.appstream_fleet_session_disconnect_timeout
max_disconnect_timeout_in_seconds: 300 # 5 Minutes
# aws.appstream_fleet_maximum_session_duration
max_session_duration_seconds: 36000 # 10 Hours
## GCP
# AWS Lambda Configuration
# aws.awslambda_function_using_supported_runtimes
obsolete_lambda_runtimes:
### Configurable Checks
## Config YAML File Structure
> This is the new Prowler configuration file format. The old one without provider keys is still compatible just for the AWS provider.
```yaml title="config.yaml"
# AWS Configuration
aws:
# AWS Global Configuration
# aws.allowlist_non_default_regions --> Allowlist Failed Findings in non-default regions for GuardDuty, SecurityHub, DRS and Config
allowlist_non_default_regions: False
# AWS IAM Configuration
# aws.iam_user_accesskey_unused --> CIS recommends 45 days
max_unused_access_keys_days: 45
# aws.iam_user_console_access_unused --> CIS recommends 45 days
max_console_access_days: 45
# AWS EC2 Configuration
# aws.ec2_elastic_ip_shodan
shodan_api_key: null
# aws.ec2_securitygroup_with_many_ingress_egress_rules --> by default is 50 rules
max_security_group_rules: 50
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
max_ec2_instance_age_in_days: 180
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
# Multi account environment: Any additional trusted account number should be added as a space separated list, e.g.
# trusted_account_ids : ["123456789012", "098765432109", "678901234567"]
trusted_account_ids: []
# AWS Cloudwatch Configuration
# aws.cloudwatch_log_group_retention_policy_specific_days_enabled --> by default is 365 days
log_group_retention_days: 365
# AWS AppStream Session Configuration
# aws.appstream_fleet_session_idle_disconnect_timeout
max_idle_disconnect_timeout_in_seconds: 600 # 10 Minutes
# aws.appstream_fleet_session_disconnect_timeout
max_disconnect_timeout_in_seconds: 300 # 5 Minutes
# aws.appstream_fleet_maximum_session_duration
max_session_duration_seconds: 36000 # 10 Hours
# AWS Lambda Configuration
# aws.awslambda_function_using_supported_runtimes
obsolete_lambda_runtimes:
[
"python3.6",
"python2.7",
"nodejs4.3",
"nodejs4.3-edge",
"nodejs6.10",
"nodejs",
"nodejs8.10",
"nodejs10.x",
"dotnetcore1.0",
"dotnetcore2.0",
"dotnetcore2.1",
"ruby2.5",
"python3.6",
"python2.7",
"nodejs4.3",
"nodejs4.3-edge",
"nodejs6.10",
"nodejs",
"nodejs8.10",
"nodejs10.x",
"dotnetcore1.0",
"dotnetcore2.0",
"dotnetcore2.1",
"ruby2.5",
]
# AWS Organizations
# organizations_scp_check_deny_regions
# organizations_enabled_regions: [
# 'eu-central-1',
# 'eu-west-1',
# "us-east-1"
# ]
organizations_enabled_regions: []
organizations_trusted_delegated_administrators: []
# AWS ECR
# ecr_repositories_scan_vulnerabilities_in_latest_image
# CRITICAL
# HIGH
# MEDIUM
ecr_repository_vulnerability_minimum_severity: "MEDIUM"
# AWS Trusted Advisor
# trustedadvisor_premium_support_plan_subscribed
verify_premium_support_plans: True
# Azure Configuration
azure:
# GCP Configuration
gcp:
```

View File

@@ -0,0 +1,43 @@
# Custom Checks Metadata
In certain organizations, the severity of specific checks might differ from the default values defined in the check's metadata. For instance, while `s3_bucket_level_public_access_block` could be deemed `critical` for some organizations, others might assign a different severity level.
The custom metadata option offers a means to override default metadata set by Prowler
You can utilize `--custom-checks-metadata-file` followed by the path to your custom checks metadata YAML file.
## Available Fields
The list of supported check's metadata fields that can be override are listed as follows:
- Severity
## File Syntax
This feature is available for all the providers supported in Prowler since the metadata format is common between all the providers. The following is the YAML format for the custom checks metadata file:
```yaml title="custom_checks_metadata.yaml"
CustomChecksMetadata:
aws:
Checks:
s3_bucket_level_public_access_block:
Severity: high
s3_bucket_no_mfa_delete:
Severity: high
azure:
Checks:
storage_infrastructure_encryption_is_enabled:
Severity: medium
gcp:
Checks:
compute_instance_public_ip:
Severity: critical
```
## Usage
Executing the following command will assess all checks and generate a report while overriding the metadata for those checks:
```sh
prowler <provider> --custom-checks-metadata-file <path/to/custom/metadata>
```
This customization feature enables organizations to tailor the severity of specific checks based on their unique requirements, providing greater flexibility in security assessment and reporting.

View File

@@ -0,0 +1,25 @@
# GCP authentication
Prowler will use by default your User Account credentials, you can configure it using:
- `gcloud init` to use a new account
- `gcloud config set account <account>` to use an existing account
Then, obtain your access credentials using: `gcloud auth application-default login`
Otherwise, you can generate and download Service Account keys in JSON format (refer to https://cloud.google.com/iam/docs/creating-managing-service-account-keys) and provide the location of the file with the following argument:
```console
prowler gcp --credentials-file path
```
> `prowler` will scan the GCP project associated with the credentials.
Prowler will follow the same credentials search as [Google authentication libraries](https://cloud.google.com/docs/authentication/application-default-credentials#search_order):
1. [GOOGLE_APPLICATION_CREDENTIALS environment variable](https://cloud.google.com/docs/authentication/application-default-credentials#GAC)
2. [User credentials set up by using the Google Cloud CLI](https://cloud.google.com/docs/authentication/application-default-credentials#personal)
3. [The attached service account, returned by the metadata server](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa)
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the `Viewer` role to the member associated with the credentials.

View File

@@ -0,0 +1,70 @@
# Ignore Unused Services
> Currently only available on the AWS provider.
Prowler allows you to ignore unused services findings, so you can reduce the number of findings in Prowler's reports.
```console
prowler <provider> --ignore-unused-services
```
## Services that can be ignored
### AWS
#### Athena
When you create an AWS Account, Athena will create a default primary workgroup for you.
Prowler will check if that workgroup is enabled and if it is being used by checking if there were queries in the last 45 days.
If not, the findings of the following checks will not appear:
- `athena_workgroup_encryption`
- `athena_workgroup_enforce_configuration`
#### CloudTrail
AWS CloudTrail should have at least one trail with a data event to record all S3 object-level API operations, Prowler will check first if there are S3 buckets in your account before alerting this issue.
- `cloudtrail_s3_dataevents_read_enabled`
- `cloudtrail_s3_dataevents_write_enabled`
#### EC2
If EBS default encyption is not enabled, sensitive information at rest is not protected in EC2. But Prowler will only create a finding if there are EBS Volumes where this default configuration could be enforced by default.
- `ec2_ebs_default_encryption`
If your Security groups are not properly configured the attack surface is increased, nonetheless, Prowler will detect those security groups that are being used (they are attached) to only notify those that are being used. This logic applies to the 15 checks related to open ports in security groups.
- `ec2_securitygroup_allow_ingress_from_internet_to_port_X` (15 checks)
Prowler will also check for used Network ACLs to only alerts those with open ports that are being used.
- `ec2_networkacl_allow_ingress_X_port` (3 checks)
#### Glue
It is a best practice to encrypt both metadata and connection passwords in AWS Glue Data Catalogs, however, Prowler will detect if the service is in use by checking if there are any Data Catalog tables.
- `glue_data_catalogs_connection_passwords_encryption_enabled`
- `glue_data_catalogs_metadata_encryption_enabled`
#### Inspector
Amazon Inspector is a vulnerability discovery service that automates continuous scanning for security vulnerabilities within your Amazon EC2, Amazon ECR, and AWS Lambda environments. Prowler recommends to enable it and resolve all the Inspector's findings. Ignoring the unused services, Prowler will only notify you if there are any Lambda functions, EC2 instances or ECR repositories in the region where Amazon inspector should be enabled.
- `inspector2_is_enabled`
#### Macie
Amazon Macie is a security service that uses machine learning to automatically discover, classify and protect sensitive data in S3 buckets. Prowler will only create a finding when Macie is not enabled if there are S3 buckets in your account.
- `macie_is_enabled`
#### Network Firewall
Without a network firewall, it can be difficult to monitor and control traffic within the VPC. However, Prowler will only alert you for those VPCs that are in use, in other words, only the VPCs where you have ENIs (network interfaces).
- `networkfirewall_in_all_vpc`
#### S3
You should enable Public Access Block at the account level to prevent the exposure of your data stored in S3. Prowler though will only check this block configuration if you have S3 buckets in your AWS account.
- `s3_account_level_public_access_blocks`
#### VPC
VPC Flow Logs provide visibility into network traffic that traverses the VPC and can be used to detect anomalous traffic or insight during security workflows. Nevertheless, Prowler will only check if the Flow Logs are enabled for those VPCs that are in use, in other words, only the VPCs where you have ENIs (network interfaces).
- `vpc_flow_logs_enabled`

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 200 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 456 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 69 KiB

View File

@@ -0,0 +1,36 @@
# Integrations
## Slack
Prowler can be integrated with [Slack](https://slack.com/) to send a summary of the execution having configured a Slack APP in your channel with the following command:
```sh
prowler <provider> --slack
```
![Prowler Slack Message](img/slack-prowler-message.png)
> Slack integration needs SLACK_API_TOKEN and SLACK_CHANNEL_ID environment variables.
### Configuration
To configure the Slack Integration, follow the next steps:
1. Create a Slack Application:
- Go to [Slack API page](https://api.slack.com/tutorials/tracks/getting-a-token), scroll down to the *Create app* button and select your workspace:
![Create Slack App](img/create-slack-app.png)
- Install the application in your selected workspaces:
![Install Slack App in Workspace](img/install-in-slack-workspace.png)
- Get the *Slack App OAuth Token* that Prowler needs to send the message:
![Slack App OAuth Token](img/slack-app-token.png)
2. Optionally, create a Slack Channel (you can use an existing one)
3. Integrate the created Slack App to your Slack channel:
- Click on the channel, go to the Integrations tab, and Add an App.
![Slack App Channel Integration](img/integrate-slack-app.png)
4. Set the following environment variables that Prowler will read:
- `SLACK_API_TOKEN`: the *Slack App OAuth Token* that was previously get.
- `SLACK_CHANNEL_ID`: the name of your Slack Channel where Prowler will send the message.

View File

@@ -14,6 +14,11 @@ Prowler can only display the failed findings:
```console
prowler <provider> -q/--quiet
```
## Disable Exit Code 3
Prowler does not trigger exit code 3 with failed checks:
```console
prowler <provider> -z/--ignore-exit-code-3
```
## Hide Prowler Banner
Prowler can run without showing its banner:
```console
@@ -51,15 +56,36 @@ prowler <provider> -e/--excluded-checks ec2 rds
```console
prowler <provider> -C/--checks-file <checks_list>.json
```
## Severities
Each check of Prowler has a severity, there are options related with it:
- List the available checks in the provider:
## Custom Checks
Prowler allows you to include your custom checks with the flag:
```console
prowler <provider> --list-severities
prowler <provider> -x/--checks-folder <custom_checks_folder>
```
- Execute specific severity(s):
> S3 URIs are also supported as folders for custom checks, e.g. s3://bucket/prefix/checks_folder/. Make sure that the used credentials have s3:GetObject permissions in the S3 path where the custom checks are located.
The custom checks folder must contain one subfolder per check, each subfolder must be named as the check and must contain:
- An empty `__init__.py`: to make Python treat this check folder as a package.
- A `check_name.py` containing the check's logic.
- A `check_name.metadata.json` containing the check's metadata.
>The check name must start with the service name followed by an underscore (e.g., ec2_instance_public_ip).
To see more information about how to write checks see the [Developer Guide](../developer-guide/checks.md#create-a-new-check-for-a-provider).
> If you want to run ONLY your custom check(s), import it with -x (--checks-folder) and then run it with -c (--checks), e.g.:
```console
prowler aws -x s3://bucket/prowler/providers/aws/services/s3/s3_bucket_policy/ -c s3_bucket_policy
```
## Severities
Each of Prowler's checks has a severity, which can be:
- informational
- low
- medium
- high
- critical
To execute specific severity(s):
```console
prowler <provider> --severity critical high
```
@@ -91,16 +117,3 @@ prowler <provider> --list-categories
```console
prowler <provider> --categories
```
## AWS
### Scan specific AWS Region
Prowler can scan specific region(s) with:
```console
prowler <provider> -f/--filter-region eu-west-1 us-east-1
```
### Use AWS Profile
Prowler can use your custom AWS Profile with:
```console
prowler <provider> -p/--profile <profile_name>
```

View File

@@ -0,0 +1,187 @@
# Parallel Execution
The strategy used here will be to execute Prowler once per service. You can modify this approach as per your requirements.
This can help for really large accounts, but please be aware of AWS API rate limits:
1. **Service-Specific Limits**: Each AWS service has its own rate limits. For instance, Amazon EC2 might have different rate limits for launching instances versus making API calls to describe instances.
2. **API Rate Limits**: Most of the rate limits in AWS are applied at the API level. Each API call to an AWS service counts towards the rate limit for that service.
3. **Throttling Responses**: When you exceed the rate limit for a service, AWS responds with a throttling error. In AWS SDKs, these are typically represented as `ThrottlingException` or `RateLimitExceeded` errors.
For information on Prowler's retrier configuration please refer to this [page](https://docs.prowler.cloud/en/latest/tutorials/aws/boto3-configuration/).
> Note: You might need to increase the `--aws-retries-max-attempts` parameter from the default value of 3. The retrier follows an exponential backoff strategy.
## Linux
Generate a list of services that Prowler supports, and populate this info into a file:
```bash
prowler aws --list-services | awk -F"- " '{print $2}' | sed '/^$/d' > services
```
Make any modifications for services you would like to skip scanning by modifying this file.
Then create a new PowerShell script file `parallel-prowler.sh` and add the following contents. Update the `$profile` variable to the AWS CLI profile you want to run Prowler with.
```bash
#!/bin/bash
# Change these variables as needed
profile="your_profile"
account_id=$(aws sts get-caller-identity --profile "${profile}" --query 'Account' --output text)
echo "Executing in account: ${account_id}"
# Maximum number of concurrent processes
MAX_PROCESSES=5
# Loop through the services
while read service; do
echo "$(date '+%Y-%m-%d %H:%M:%S'): Starting job for service: ${service}"
# Run the command in the background
(prowler -p "$profile" -s "$service" -F "${account_id}-${service}" --ignore-unused-services --only-logs; echo "$(date '+%Y-%m-%d %H:%M:%S') - ${service} has completed") &
# Check if we have reached the maximum number of processes
while [ $(jobs -r | wc -l) -ge ${MAX_PROCESSES} ]; do
# Wait for a second before checking again
sleep 1
done
done < ./services
# Wait for all background processes to finish
wait
echo "All jobs completed"
```
Output will be stored in the `output/` folder that is in the same directory from which you executed the script.
## Windows
Generate a list of services that Prowler supports, and populate this info into a file:
```powershell
prowler aws --list-services | ForEach-Object {
# Capture lines that are likely service names
if ($_ -match '^\- \w+$') {
$_.Trim().Substring(2)
}
} | Where-Object {
# Filter out empty or null lines
$_ -ne $null -and $_ -ne ''
} | Set-Content -Path "services"
```
Make any modifications for services you would like to skip scanning by modifying this file.
Then create a new PowerShell script file `parallel-prowler.ps1` and add the following contents. Update the `$profile` variable to the AWS CLI profile you want to run prowler with.
Change any parameters you would like when calling prowler in the `Start-Job -ScriptBlock` section. Note that you need to keep the `--only-logs` parameter, else some encoding issue occurs when trying to render the progress-bar and prowler won't successfully execute.
```powershell
$profile = "your_profile"
$account_id = Invoke-Expression -Command "aws sts get-caller-identity --profile $profile --query 'Account' --output text"
Write-Host "Executing Prowler in $account_id"
# Maximum number of concurrent jobs
$MAX_PROCESSES = 5
# Read services from a file
$services = Get-Content -Path "services"
# Array to keep track of started jobs
$jobs = @()
foreach ($service in $services) {
# Start the command as a job
$job = Start-Job -ScriptBlock {
prowler -p ${using:profile} -s ${using:service} -F "${using:account_id}-${using:service}" --ignore-unused-services --only-logs
$endTimestamp = Get-Date -Format "yyyy-MM-dd HH:mm:ss"
Write-Output "${endTimestamp} - $using:service has completed"
}
$jobs += $job
Write-Host "$(Get-Date -Format 'yyyy-MM-dd HH:mm:ss') - Starting job for service: $service"
# Check if we have reached the maximum number of jobs
while (($jobs | Where-Object { $_.State -eq 'Running' }).Count -ge $MAX_PROCESSES) {
Start-Sleep -Seconds 1
# Check for any completed jobs and receive their output
$completedJobs = $jobs | Where-Object { $_.State -eq 'Completed' }
foreach ($completedJob in $completedJobs) {
Receive-Job -Job $completedJob -Keep | ForEach-Object { Write-Host $_ }
$jobs = $jobs | Where-Object { $_.Id -ne $completedJob.Id }
Remove-Job -Job $completedJob
}
}
}
# Check for any remaining completed jobs
$remainingCompletedJobs = $jobs | Where-Object { $_.State -eq 'Completed' }
foreach ($remainingJob in $remainingCompletedJobs) {
Receive-Job -Job $remainingJob -Keep | ForEach-Object { Write-Host $_ }
Remove-Job -Job $remainingJob
}
Write-Host "$(Get-Date -Format 'yyyy-MM-dd HH:mm:ss') - All jobs completed"
```
Output will be stored in `C:\Users\YOUR-USER\Documents\output\`
## Combining the output files
Guidance is provided for the CSV file format. From the ouput directory, execute either the following Bash or PowerShell script. The script will collect the output from the CSV files, only include the header from the first file, and then output the result as CombinedCSV.csv in the current working directory.
There is no logic implemented in terms of which CSV files it will combine. If you have additional CSV files from other actions, such as running a quick inventory, you will need to move that out of the current (or any nested) directory, or move the output you want to combine into its own folder and run the script from there.
```bash
#!/bin/bash
# Initialize a variable to indicate the first file
firstFile=true
# Find all CSV files and loop through them
find . -name "*.csv" -print0 | while IFS= read -r -d '' file; do
if [ "$firstFile" = true ]; then
# For the first file, keep the header
cat "$file" > CombinedCSV.csv
firstFile=false
else
# For subsequent files, skip the header
tail -n +2 "$file" >> CombinedCSV.csv
fi
done
```
```powershell
# Get all CSV files from current directory and its subdirectories
$csvFiles = Get-ChildItem -Recurse -Filter "*.csv"
# Initialize a variable to track if it's the first file
$firstFile = $true
# Loop through each CSV file
foreach ($file in $csvFiles) {
if ($firstFile) {
# For the first file, keep the header and change the flag
$combinedCsv = Import-Csv -Path $file.FullName
$firstFile = $false
} else {
# For subsequent files, skip the header
$tempCsv = Import-Csv -Path $file.FullName
$combinedCsv += $tempCsv | Select-Object * -Skip 1
}
}
# Export the combined data to a new CSV file
$combinedCsv | Export-Csv -Path "CombinedCSV.csv" -NoTypeInformation
```
## TODO: Additional Improvements
Some services need to instantiate another service to perform a check. For instance, `cloudwatch` will instantiate Prowler's `iam` service to perform the `cloudwatch_cross_account_sharing_disabled` check. When the `iam` service is instantiated, it will perform the `__init__` function, and pull all the information required for that service. This provides an opportunity for an improvement in the above script to group related services together so that the `iam` services (or any other cross-service references) isn't repeatedily instantiated by grouping dependant services together. A complete mapping between these services still needs to be further investigated, but these are the cross-references that have been noted:
* inspector2 needs lambda and ec2
* cloudwatch needs iam
* dlm needs ec2

View File

@@ -6,12 +6,12 @@ Prowler has some checks that analyse pentesting risks (Secrets, Internet Exposed
Prowler uses `detect-secrets` library to search for any secrets that are stores in plaintext within your environment.
The actual checks that have this funcionality are:
The actual checks that have this functionality are:
1. autoscaling_find_secrets_ec2_launch_configuration
- awslambda_function_no_secrets_in_code
- awslambda_function_no_secrets_in_variables
- cloudformation_outputs_find_secrets
- cloudformation_stack_outputs_find_secrets
- ec2_instance_secrets_user_data
- ecs_task_definitions_no_environment_secrets
- ssm_document_secrets
@@ -25,7 +25,7 @@ prowler <provider> --categories secrets
Several checks analyse resources that are exposed to the Internet, these are:
1. apigateway_endpoint_public
1. apigateway_restapi_public
- appstream_fleet_default_internet_access_disabled
- awslambda_function_not_publicly_accessible
- ec2_ami_public
@@ -33,9 +33,8 @@ Several checks analyse resources that are exposed to the Internet, these are:
- ec2_instance_internet_facing_with_instance_profile
- ec2_instance_public_ip
- ec2_networkacl_allow_ingress_any_port
- ec2_securitygroup_allow_ingress_from_internet_to_any_port
- ec2_securitygroup_allow_wide_open_public_ipv4
- ec2_securitygroup_in_use_without_ingress_filtering
- ec2_securitygroup_allow_ingress_from_internet_to_any_port
- ecr_repositories_not_publicly_accessible
- eks_control_plane_endpoint_access_restricted
- eks_endpoints_not_publicly_accessible

View File

@@ -14,4 +14,7 @@ prowler <provider> -i
- Also, it creates by default a CSV and JSON to see detailed information about the resources extracted.
![Quick Inventory Example](../img/quick-inventory.png)
![Quick Inventory Example](../img/quick-inventory.jpg)
## Objections
The inventorying process is done with `resourcegroupstaggingapi` calls which means that only resources they have or have had tags will appear (except for the IAM and S3 resources which are done with Boto3 API calls).

View File

@@ -1,9 +1,9 @@
# Reporting
By default, Prowler will generate a CSV, JSON and a HTML report, however you could generate a JSON-ASFF (used by AWS Security Hub) report with `-M` or `--output-modes`:
By default, Prowler will generate a CSV, JSON, JSON-OCSF and a HTML report, however you could generate a JSON-ASFF (used by AWS Security Hub) report with `-M` or `--output-modes`:
```console
prowler <provider> -M csv json json-asff html
prowler <provider> -M csv json json-ocsf json-asff html
```
## Custom Output Flags
@@ -19,21 +19,13 @@ prowler <provider> -M csv json json-asff html -F <custom_report_name>
```console
prowler <provider> -M csv json json-asff html -o <custom_report_directory>
```
> Both flags can be used simultainously to provide a custom directory and filename.
> Both flags can be used simultaneously to provide a custom directory and filename.
```console
prowler <provider> -M csv json json-asff html -F <custom_report_name> -o <custom_report_directory>
prowler <provider> -M csv json json-asff html \
-F <custom_report_name> -o <custom_report_directory>
```
## Send report to AWS S3 Bucket
To save your report in an S3 bucket, use `-B`/`--output-bucket` to define a custom output bucket along with `-M` to define the output format that is going to be uploaded to S3:
```sh
prowler <provider> -M csv -B my-bucket/folder/
```
> In the case you do not want to use the assumed role credentials but the initial credentials to put the reports into the S3 bucket, use `-D`/`--output-bucket-no-assume` instead of `-B`/`--output-bucket.
> Make sure that the used credentials have s3:PutObject permissions in the S3 path where the reports are going to be uploaded.
## Output timestamp format
By default, the timestamp format of the output files is ISO 8601. This can be changed with the flag `--unix-timestamp` generating the timestamp fields in pure unix timestamp format.
## Output Formats
@@ -41,18 +33,88 @@ Prowler supports natively the following output formats:
- CSV
- JSON
- JSON-OCSF
- JSON-ASFF
- HTML
Hereunder is the structure for each of the supported report formats by Prowler:
### HTML
![HTML Output](../img/output-html.png)
### CSV
| ASSESSMENT_START_TIME | FINDING_UNIQUE_ID | PROVIDER | PROFILE | ACCOUNT_ID | ACCOUNT_NAME | ACCOUNT_EMAIL | ACCOUNT_ARN | ACCOUNT_ORG | ACCOUNT_TAGS | REGION | CHECK_ID | CHECK_TITLE | CHECK_TYPE | STATUS | STATUS_EXTENDED | SERVICE_NAME | SUBSERVICE_NAME | SEVERITY | RESOURCE_ID | RESOURCE_ARN | RESOURCE_TYPE | RESOURCE_DETAILS | RESOURCE_TAGS | DESCRIPTION | RISK | RELATED_URL | REMEDIATION_RECOMMENDATION_TEXT | REMEDIATION_RECOMMENDATION_URL | REMEDIATION_RECOMMENDATION_CODE_NATIVEIAC | REMEDIATION_RECOMMENDATION_CODE_TERRAFORM | REMEDIATION_RECOMMENDATION_CODE_CLI | REMEDIATION_RECOMMENDATION_CODE_OTHER | CATEGORIES | DEPENDS_ON | RELATED_TO | NOTES |
| ------- | ----------- | ------ | -------- | ------------ | ----------- | ---------- | ---------- | --------------------- | -------------------------- | -------------- | ----------------- | ------------------------ | --------------- | ---------- | ----------------- | --------- | -------------- | ----------------- | ------------------ | --------------------- | -------------------- | ------------------- | ------------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- |
CSV format has a set of common columns for all the providers, and then provider specific columns.
The common columns are the following:
- ASSESSMENT_START_TIME
- FINDING_UNIQUE_ID
- PROVIDER
- CHECK_ID
- CHECK_TITLE
- CHECK_TYPE
- STATUS
- STATUS_EXTENDED
- SERVICE_NAME
- SUBSERVICE_NAME
- SEVERITY
- RESOURCE_TYPE
- RESOURCE_DETAILS
- RESOURCE_TAGS
- DESCRIPTION
- RISK
- RELATED_URL
- REMEDIATION_RECOMMENDATION_TEXT
- REMEDIATION_RECOMMENDATION_URL
- REMEDIATION_RECOMMENDATION_CODE_NATIVEIAC
- REMEDIATION_RECOMMENDATION_CODE_TERRAFORM
- REMEDIATION_RECOMMENDATION_CODE_CLI
- REMEDIATION_RECOMMENDATION_CODE_OTHER
- COMPLIANCE
- CATEGORIES
- DEPENDS_ON
- RELATED_TO
- NOTES
And then by the provider specific columns:
#### AWS
- PROFILE
- ACCOUNT_ID
- ACCOUNT_NAME
- ACCOUNT_EMAIL
- ACCOUNT_ARN
- ACCOUNT_ORG
- ACCOUNT_TAGS
- REGION
- RESOURCE_ID
- RESOURCE_ARN
#### AZURE
- TENANT_DOMAIN
- SUBSCRIPTION
- RESOURCE_ID
- RESOURCE_NAME
#### GCP
- PROJECT_ID
- LOCATION
- RESOURCE_ID
- RESOURCE_NAME
> Since Prowler v3 the CSV column delimiter is the semicolon (`;`)
### JSON
```
The following code is an example output of the JSON format:
```json
[{
"AssessmentStartTime": "2022-12-01T14:16:57.354413",
"FindingUniqueId": "",
@@ -71,6 +133,10 @@ Hereunder is the structure for each of the supported report formats by Prowler:
"Severity": "low",
"ResourceId": "rds-instance-id",
"ResourceArn": "",
"ResourceTags": {
"test": "test",
"enironment": "dev"
},
"ResourceType": "AwsRdsDbInstance",
"ResourceDetails": "",
"Description": "Ensure RDS instances have minor version upgrade enabled.",
@@ -89,8 +155,17 @@ Hereunder is the structure for each of the supported report formats by Prowler:
}
},
"Categories": [],
"Notes": ""
},{
"Notes": "",
"Compliance": {
"CIS-1.4": [
"1.20"
],
"CIS-1.5": [
"1.20"
]
}
},
{
"AssessmentStartTime": "2022-12-01T14:16:57.354413",
"FindingUniqueId": "",
"Provider": "aws",
@@ -109,7 +184,7 @@ Hereunder is the structure for each of the supported report formats by Prowler:
"ResourceId": "rds-instance-id",
"ResourceArn": "",
"ResourceType": "AwsRdsDbInstance",
"ResourceDetails": "",
"ResourceTags": {},
"Description": "Ensure RDS instances have minor version upgrade enabled.",
"Risk": "Auto Minor Version Upgrade is a feature that you can enable to have your database automatically upgraded when a new minor database engine version is available. Minor version upgrades often patch security vulnerabilities and fix bugs and therefore should be applied.",
"RelatedUrl": "https://aws.amazon.com/blogs/database/best-practices-for-upgrading-amazon-rds-to-major-and-minor-versions-of-postgresql/",
@@ -126,15 +201,278 @@ Hereunder is the structure for each of the supported report formats by Prowler:
}
},
"Categories": [],
"Notes": ""
"Notes": "",
"Compliance": {}
}]
```
> NOTE: Each finding is a `json` object within a list. This has changed in v3 since in v2 the format used was [ndjson](http://ndjson.org/).
### JSON-OCSF
Based on [Open Cybersecurity Schema Framework Security Finding v1.0.0-rc.3](https://schema.ocsf.io/1.0.0-rc.3/classes/security_finding?extensions=)
```json
[{
"finding": {
"title": "Check if ACM Certificates are about to expire in specific days or less",
"desc": "Check if ACM Certificates are about to expire in specific days or less",
"supporting_data": {
"Risk": "Expired certificates can impact service availability.",
"Notes": ""
},
"remediation": {
"kb_articles": [
"https://docs.aws.amazon.com/config/latest/developerguide/acm-certificate-expiration-check.html"
],
"desc": "Monitor certificate expiration and take automated action to renew; replace or remove. Having shorter TTL for any security artifact is a general recommendation; but requires additional automation in place. If not longer required delete certificate. Use AWS config using the managed rule: acm-certificate-expiration-check."
},
"types": [
"Data Protection"
],
"src_url": "https://docs.aws.amazon.com/config/latest/developerguide/acm-certificate-expiration-check.html",
"uid": "prowler-aws-acm_certificates_expiration_check-012345678912-eu-west-1-*.xxxxxxxxxxxxxx",
"related_events": []
},
"resources": [
{
"group": {
"name": "acm"
},
"region": "eu-west-1",
"name": "xxxxxxxxxxxxxx",
"uid": "arn:aws:acm:eu-west-1:012345678912:certificate/xxxxxxxxxxxxxx",
"labels": [
{
"Key": "project",
"Value": "prowler-pro"
},
{
"Key": "environment",
"Value": "dev"
},
{
"Key": "terraform",
"Value": "true"
},
{
"Key": "terraform_state",
"Value": "aws"
}
],
"type": "AwsCertificateManagerCertificate",
"details": ""
}
],
"status_detail": "ACM Certificate for xxxxxxxxxxxxxx expires in 111 days.",
"compliance": {
"status": "Success",
"requirements": [
"CISA: ['your-data-2']",
"SOC2: ['cc_6_7']",
"MITRE-ATTACK: ['T1040']",
"GDPR: ['article_32']",
"HIPAA: ['164_308_a_4_ii_a', '164_312_e_1']",
"AWS-Well-Architected-Framework-Security-Pillar: ['SEC09-BP01']",
"NIST-800-171-Revision-2: ['3_13_1', '3_13_2', '3_13_8', '3_13_11']",
"NIST-800-53-Revision-4: ['ac_4', 'ac_17_2', 'sc_12']",
"NIST-800-53-Revision-5: ['sc_7_12', 'sc_7_16']",
"NIST-CSF-1.1: ['ac_5', 'ds_2']",
"RBI-Cyber-Security-Framework: ['annex_i_1_3']",
"FFIEC: ['d3-pc-im-b-1']",
"FedRamp-Moderate-Revision-4: ['ac-4', 'ac-17-2', 'sc-12']",
"FedRAMP-Low-Revision-4: ['ac-17', 'sc-12']"
],
"status_detail": "ACM Certificate for xxxxxxxxxxxxxx expires in 111 days."
},
"message": "ACM Certificate for xxxxxxxxxxxxxx expires in 111 days.",
"severity_id": 4,
"severity": "High",
"cloud": {
"account": {
"name": "",
"uid": "012345678912"
},
"region": "eu-west-1",
"org": {
"uid": "",
"name": ""
},
"provider": "aws",
"project_uid": ""
},
"time": "2023-06-30 10:28:55.297615",
"metadata": {
"original_time": "2023-06-30T10:28:55.297615",
"profiles": [
"dev"
],
"product": {
"language": "en",
"name": "Prowler",
"version": "3.6.1",
"vendor_name": "Prowler/ProwlerPro",
"feature": {
"name": "acm_certificates_expiration_check",
"uid": "acm_certificates_expiration_check",
"version": "3.6.1"
}
},
"version": "1.0.0-rc.3"
},
"state_id": 0,
"state": "New",
"status_id": 1,
"status": "Success",
"type_uid": 200101,
"type_name": "Security Finding: Create",
"impact_id": 0,
"impact": "Unknown",
"confidence_id": 0,
"confidence": "Unknown",
"activity_id": 1,
"activity_name": "Create",
"category_uid": 2,
"category_name": "Findings",
"class_uid": 2001,
"class_name": "Security Finding"
},{
"finding": {
"title": "Check if ACM Certificates are about to expire in specific days or less",
"desc": "Check if ACM Certificates are about to expire in specific days or less",
"supporting_data": {
"Risk": "Expired certificates can impact service availability.",
"Notes": ""
},
"remediation": {
"kb_articles": [
"https://docs.aws.amazon.com/config/latest/developerguide/acm-certificate-expiration-check.html"
],
"desc": "Monitor certificate expiration and take automated action to renew; replace or remove. Having shorter TTL for any security artifact is a general recommendation; but requires additional automation in place. If not longer required delete certificate. Use AWS config using the managed rule: acm-certificate-expiration-check."
},
"types": [
"Data Protection"
],
"src_url": "https://docs.aws.amazon.com/config/latest/developerguide/acm-certificate-expiration-check.html",
"uid": "prowler-aws-acm_certificates_expiration_check-012345678912-eu-west-1-xxxxxxxxxxxxx",
"related_events": []
},
"resources": [
{
"group": {
"name": "acm"
},
"region": "eu-west-1",
"name": "xxxxxxxxxxxxx",
"uid": "arn:aws:acm:eu-west-1:012345678912:certificate/3ea965a0-368d-4d13-95eb-5042a994edc4",
"labels": [
{
"Key": "name",
"Value": "prowler-pro-saas-dev-acm-internal-wildcard"
},
{
"Key": "project",
"Value": "prowler-pro-saas"
},
{
"Key": "environment",
"Value": "dev"
},
{
"Key": "terraform",
"Value": "true"
},
{
"Key": "terraform_state",
"Value": "aws/saas/base"
}
],
"type": "AwsCertificateManagerCertificate",
"details": ""
}
],
"status_detail": "ACM Certificate for xxxxxxxxxxxxx expires in 119 days.",
"compliance": {
"status": "Success",
"requirements": [
"CISA: ['your-data-2']",
"SOC2: ['cc_6_7']",
"MITRE-ATTACK: ['T1040']",
"GDPR: ['article_32']",
"HIPAA: ['164_308_a_4_ii_a', '164_312_e_1']",
"AWS-Well-Architected-Framework-Security-Pillar: ['SEC09-BP01']",
"NIST-800-171-Revision-2: ['3_13_1', '3_13_2', '3_13_8', '3_13_11']",
"NIST-800-53-Revision-4: ['ac_4', 'ac_17_2', 'sc_12']",
"NIST-800-53-Revision-5: ['sc_7_12', 'sc_7_16']",
"NIST-CSF-1.1: ['ac_5', 'ds_2']",
"RBI-Cyber-Security-Framework: ['annex_i_1_3']",
"FFIEC: ['d3-pc-im-b-1']",
"FedRamp-Moderate-Revision-4: ['ac-4', 'ac-17-2', 'sc-12']",
"FedRAMP-Low-Revision-4: ['ac-17', 'sc-12']"
],
"status_detail": "ACM Certificate for xxxxxxxxxxxxx expires in 119 days."
},
"message": "ACM Certificate for xxxxxxxxxxxxx expires in 119 days.",
"severity_id": 4,
"severity": "High",
"cloud": {
"account": {
"name": "",
"uid": "012345678912"
},
"region": "eu-west-1",
"org": {
"uid": "",
"name": ""
},
"provider": "aws",
"project_uid": ""
},
"time": "2023-06-30 10:28:55.297615",
"metadata": {
"original_time": "2023-06-30T10:28:55.297615",
"profiles": [
"dev"
],
"product": {
"language": "en",
"name": "Prowler",
"version": "3.6.1",
"vendor_name": "Prowler/ProwlerPro",
"feature": {
"name": "acm_certificates_expiration_check",
"uid": "acm_certificates_expiration_check",
"version": "3.6.1"
}
},
"version": "1.0.0-rc.3"
},
"state_id": 0,
"state": "New",
"status_id": 1,
"status": "Success",
"type_uid": 200101,
"type_name": "Security Finding: Create",
"impact_id": 0,
"impact": "Unknown",
"confidence_id": 0,
"confidence": "Unknown",
"activity_id": 1,
"activity_name": "Create",
"category_uid": 2,
"category_name": "Findings",
"class_uid": 2001,
"class_name": "Security Finding"
}]
```
> NOTE: Each finding is a `json` object.
### JSON-ASFF
```
The following code is an example output of the [JSON-ASFF](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-format-syntax.html) format:
```json
[{
"SchemaVersion": "2018-10-08",
"Id": "prowler-rds_instance_minor_version_upgrade_enabled-ACCOUNT_ID-eu-west-1-b1ade474a",
@@ -166,7 +504,30 @@ Hereunder is the structure for each of the supported report formats by Prowler:
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": []
"RelatedRequirements": [
"CISA your-systems-2 booting-up-thing-to-do-first-3",
"CIS-1.5 2.3.2",
"AWS-Foundational-Security-Best-Practices rds",
"RBI-Cyber-Security-Framework annex_i_6",
"FFIEC d3-cc-pm-b-1 d3-cc-pm-b-3"
],
"AssociatedStandards": [
{
"StandardsId": "CISA"
},
{
"StandardsId": "CIS-1.5"
},
{
"StandardsId": "AWS-Foundational-Security-Best-Practices"
},
{
"StandardsId": "RBI-Cyber-Security-Framework"
},
{
"StandardsId": "FFIEC"
}
]
},
"Remediation": {
"Recommendation": {
@@ -205,7 +566,30 @@ Hereunder is the structure for each of the supported report formats by Prowler:
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": []
"RelatedRequirements": [
"CISA your-systems-2 booting-up-thing-to-do-first-3",
"CIS-1.5 2.3.2",
"AWS-Foundational-Security-Best-Practices rds",
"RBI-Cyber-Security-Framework annex_i_6",
"FFIEC d3-cc-pm-b-1 d3-cc-pm-b-3"
],
"AssociatedStandards": [
{
"StandardsId": "CISA"
},
{
"StandardsId": "CIS-1.5"
},
{
"StandardsId": "AWS-Foundational-Security-Best-Practices"
},
{
"StandardsId": "RBI-Cyber-Security-Framework"
},
{
"StandardsId": "FFIEC"
}
]
},
"Remediation": {
"Recommendation": {
@@ -216,4 +600,4 @@ Hereunder is the structure for each of the supported report formats by Prowler:
}]
```
> NOTE: Each finding is a `json` object.
> NOTE: Each finding is a `json` object within a list.

View File

@@ -9,7 +9,7 @@ theme:
language: en
logo: img/prowler-logo.png
name: material
favicon: img/ProwlerPro-icon.svg
favicon: img/prowler-icon.svg
features:
- navigation.tabs
- navigation.tabs.sticky
@@ -33,15 +33,24 @@ nav:
- Reporting: tutorials/reporting.md
- Compliance: tutorials/compliance.md
- Quick Inventory: tutorials/quick-inventory.md
- Slack Integration: tutorials/integrations.md
- Configuration File: tutorials/configuration_file.md
- Logging: tutorials/logging.md
- Allowlist: tutorials/allowlist.md
- Check Aliases: tutorials/check-aliases.md
- Custom Metadata: tutorials/custom-checks-metadata.md
- Ignore Unused Services: tutorials/ignore-unused-services.md
- Pentesting: tutorials/pentesting.md
- Parallel Execution: tutorials/parallel-execution.md
- Developer Guide: developer-guide/introduction.md
- AWS:
- Authentication: tutorials/aws/authentication.md
- Assume Role: tutorials/aws/role-assumption.md
- AWS Security Hub: tutorials/aws/securityhub.md
- AWS Organizations: tutorials/aws/organizations.md
- AWS Regions and Partitions: tutorials/aws/regions-and-partitions.md
- Scan Multiple AWS Accounts: tutorials/aws/multiaccount.md
- Send reports to AWS S3: tutorials/aws/s3.md
- AWS CloudShell: tutorials/aws/cloudshell.md
- Checks v2 to v3 Mapping: tutorials/aws/v2_to_v3_checks_mapping.md
- Tag-based Scan: tutorials/aws/tag-based-scan.md
@@ -49,7 +58,22 @@ nav:
- Boto3 Configuration: tutorials/aws/boto3-configuration.md
- Azure:
- Authentication: tutorials/azure/authentication.md
- Non default clouds: tutorials/azure/use-non-default-cloud.md
- Subscriptions: tutorials/azure/subscriptions.md
- Google Cloud:
- Authentication: tutorials/gcp/authentication.md
- Developer Guide:
- Introduction: developer-guide/introduction.md
- Audit Info: developer-guide/audit-info.md
- Services: developer-guide/services.md
- Checks: developer-guide/checks.md
- Documentation: developer-guide/documentation.md
- Compliance: developer-guide/security-compliance-framework.md
- Outputs: developer-guide/outputs.md
- Integrations: developer-guide/integrations.md
- Testing:
- Unit Tests: developer-guide/unit-testing.md
- Integration Tests: developer-guide/integration-testing.md
- Security: security.md
- Contact Us: contact.md
- Troubleshooting: troubleshooting.md
@@ -78,7 +102,7 @@ extra:
link: https://twitter.com/prowlercloud
# Copyright
copyright: Copyright &copy; 2022 Toni de la Fuente, Maintained by the Prowler Team at Verica, Inc.</a>
copyright: Copyright &copy; 2024 Toni de la Fuente, Maintained by the Prowler Team at ProwlerPro, Inc.</a>
markdown_extensions:
- abbr

View File

@@ -4,7 +4,7 @@ AWSTemplateFormatVersion: '2010-09-09'
# aws cloudformation create-stack \
# --capabilities CAPABILITY_IAM --capabilities CAPABILITY_NAMED_IAM \
# --template-body "file://create_role_to_assume_cfn.yaml" \
# --stack-name "ProwlerExecRole" \
# --stack-name "ProwlerScanRole" \
# --parameters "ParameterKey=AuthorisedARN,ParameterValue=arn:aws:iam::123456789012:root"
#
Description: |
@@ -13,7 +13,7 @@ Description: |
account to assume that role. The role name and the ARN of the trusted user can all be passed
to the CloudFormation stack as parameters. Then you can run Prowler to perform a security
assessment with a command like:
./prowler -A <THIS_ACCOUNT_ID> -R ProwlerExecRole
prowler --role ProwlerScanRole.ARN
Parameters:
AuthorisedARN:
Description: |
@@ -22,12 +22,12 @@ Parameters:
Type: String
ProwlerRoleName:
Description: |
Name of the IAM role that will have these policies attached. Default: ProwlerExecRole
Name of the IAM role that will have these policies attached. Default: ProwlerScanRole
Type: String
Default: 'ProwlerExecRole'
Default: 'ProwlerScanRole'
Resources:
ProwlerExecRole:
ProwlerScanRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
@@ -42,31 +42,50 @@ Resources:
# Bool:
# 'aws:MultiFactorAuthPresent': true
# This is 12h that is maximum allowed, Minimum is 3600 = 1h
# to take advantage of this use -T like in './prowler -A <ACCOUNT_ID_TO_ASSUME> -R ProwlerExecRole -T 43200 -M text,html'
# to take advantage of this use -T like in './prowler --role ProwlerScanRole.ARN -T 43200'
MaxSessionDuration: 43200
ManagedPolicyArns:
- 'arn:aws:iam::aws:policy/SecurityAudit'
- 'arn:aws:iam::aws:policy/job-function/ViewOnlyAccess'
RoleName: !Sub ${ProwlerRoleName}
Policies:
- PolicyName: ProwlerExecRoleAdditionalViewPrivileges
- PolicyName: ProwlerScanRoleAdditionalViewPrivileges
PolicyDocument:
Version : '2012-10-17'
Statement:
- Effect: Allow
Action:
- 'ds:ListAuthorizedApplications'
- 'account:Get*'
- 'appstream:Describe*'
- 'appstream:List*'
- 'codeartifact:List*'
- 'codebuild:BatchGet*'
- 'dlm:Get*'
- 'ds:Get*'
- 'ds:Describe*'
- 'ds:List*'
- 'ec2:GetEbsEncryptionByDefault'
- 'ecr:Describe*'
- 'elasticfilesystem:DescribeBackupPolicy'
- 'glue:GetConnections'
- 'glue:GetSecurityConfiguration'
- 'glue:GetSecurityConfiguration*'
- 'glue:SearchTables'
- 'lambda:GetFunction'
- 'lambda:GetFunction*'
- 'macie2:GetMacieSession'
- 's3:GetAccountPublicAccessBlock'
- 'shield:DescribeProtection'
- 'shield:GetSubscriptionState'
- 'securityhub:BatchImportFindings'
- 'securityhub:GetFindings'
- 'ssm:GetDocument'
- 'support:Describe*'
- 'tag:GetTagKeys'
Resource: '*'
- PolicyName: ProwlerScanRoleAdditionalViewPrivilegesApiGateway
PolicyDocument:
Version : '2012-10-17'
Statement:
- Effect: Allow
Action:
- 'apigateway:GET'
Resource: 'arn:aws:apigateway:*::/restapis/*'

View File

@@ -3,26 +3,38 @@
"Statement": [
{
"Action": [
"account:Get*",
"appstream:Describe*",
"appstream:List*",
"backup:List*",
"cloudtrail:GetInsightSelectors",
"codeartifact:List*",
"codebuild:BatchGet*",
"ds:Describe*",
"dlm:Get*",
"drs:Describe*",
"ds:Get*",
"ds:Describe*",
"ds:List*",
"ec2:GetEbsEncryptionByDefault",
"ecr:Describe*",
"ecr:GetRegistryScanningConfiguration",
"elasticfilesystem:DescribeBackupPolicy",
"glue:GetConnections",
"glue:GetSecurityConfiguration*",
"glue:SearchTables",
"lambda:GetFunction*",
"logs:FilterLogEvents",
"macie2:GetMacieSession",
"s3:GetAccountPublicAccessBlock",
"shield:DescribeProtection",
"shield:GetSubscriptionState",
"securityhub:BatchImportFindings",
"securityhub:GetFindings",
"ssm:GetDocument",
"ssm-incidents:List*",
"support:Describe*",
"tag:GetTagKeys"
"tag:GetTagKeys",
"wellarchitected:List*"
],
"Resource": "*",
"Effect": "Allow",
@@ -34,7 +46,8 @@
"apigateway:GET"
],
"Resource": [
"arn:aws:apigateway:*::/restapis/*"
"arn:aws:apigateway:*::/restapis/*",
"arn:aws:apigateway:*::/apis/*"
]
}
]

4178
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from colorama import Fore, Style
from prowler.lib.banner import print_banner
from prowler.lib.check.check import (
bulk_load_checks_metadata,
@@ -10,32 +13,46 @@ from prowler.lib.check.check import (
exclude_checks_to_run,
exclude_services_to_run,
execute_checks,
get_checks_from_input_arn,
get_regions_from_audit_resources,
list_categories,
list_checks_json,
list_services,
parse_checks_from_folder,
print_categories,
print_checks,
print_compliance_frameworks,
print_compliance_requirements,
print_services,
remove_custom_checks_module,
)
from prowler.lib.check.checks_loader import load_checks_to_execute
from prowler.lib.check.compliance import update_checks_metadata_with_compliance
from prowler.lib.check.custom_checks_metadata import (
parse_custom_checks_metadata_file,
update_checks_metadata,
)
from prowler.lib.cli.parser import ProwlerArgumentParser
from prowler.lib.logger import logger, set_logging_config
from prowler.lib.outputs.compliance import display_compliance_table
from prowler.lib.outputs.html import add_html_footer, fill_html_overview_statistics
from prowler.lib.outputs.json import close_json
from prowler.lib.outputs.outputs import extract_findings_statistics, send_to_s3_bucket
from prowler.lib.outputs.outputs import extract_findings_statistics
from prowler.lib.outputs.slack import send_slack_message
from prowler.lib.outputs.summary_table import display_summary_table
from prowler.providers.aws.lib.allowlist.allowlist import parse_allowlist_file
from prowler.providers.aws.lib.quick_inventory.quick_inventory import quick_inventory
from prowler.providers.aws.aws_provider import get_available_aws_service_regions
from prowler.providers.aws.lib.s3.s3 import send_to_s3_bucket
from prowler.providers.aws.lib.security_hub.security_hub import (
batch_send_to_security_hub,
prepare_security_hub_findings,
resolve_security_hub_previous_findings,
verify_security_hub_integration_enabled_per_region,
)
from prowler.providers.common.allowlist import set_provider_allowlist
from prowler.providers.common.audit_info import (
set_provider_audit_info,
set_provider_execution_parameters,
)
from prowler.providers.common.audit_info import set_provider_audit_info
from prowler.providers.common.outputs import set_provider_output_options
from prowler.providers.common.quick_inventory import run_provider_quick_inventory
def prowler():
@@ -51,8 +68,13 @@ def prowler():
services = args.services
categories = args.categories
checks_file = args.checks_file
checks_folder = args.checks_folder
severities = args.severity
compliance_framework = args.compliance
custom_checks_metadata_file = args.custom_checks_metadata_file
if not args.no_banner:
print_banner(args)
# We treat the compliance framework as another output format
if compliance_framework:
@@ -61,9 +83,6 @@ def prowler():
# Set Logger configuration
set_logging_config(args.log_level, args.log_file, args.only_logs)
if not args.no_banner:
print_banner(args)
if args.list_services:
print_services(list_services(provider))
sys.exit()
@@ -73,33 +92,36 @@ def prowler():
bulk_checks_metadata = bulk_load_checks_metadata(provider)
if args.list_categories:
print_categories(list_categories(provider, bulk_checks_metadata))
print_categories(list_categories(bulk_checks_metadata))
sys.exit()
bulk_compliance_frameworks = {}
# Load compliance frameworks
logger.debug("Loading compliance frameworks from .json files")
# Load the compliance framework if specified with --compliance
# If some compliance argument is specified we have to load it
if (
args.list_compliance
or args.list_compliance_requirements
or compliance_framework
):
bulk_compliance_frameworks = bulk_load_compliance_frameworks(provider)
# Complete checks metadata with the compliance framework specification
update_checks_metadata_with_compliance(
bulk_compliance_frameworks, bulk_checks_metadata
bulk_compliance_frameworks = bulk_load_compliance_frameworks(provider)
# Complete checks metadata with the compliance framework specification
bulk_checks_metadata = update_checks_metadata_with_compliance(
bulk_compliance_frameworks, bulk_checks_metadata
)
# Update checks metadata if the --custom-checks-metadata-file is present
custom_checks_metadata = None
if custom_checks_metadata_file:
custom_checks_metadata = parse_custom_checks_metadata_file(
provider, custom_checks_metadata_file
)
if args.list_compliance:
print_compliance_frameworks(bulk_compliance_frameworks)
sys.exit()
if args.list_compliance_requirements:
print_compliance_requirements(
bulk_compliance_frameworks, args.list_compliance_requirements
)
sys.exit()
bulk_checks_metadata = update_checks_metadata(
bulk_checks_metadata, custom_checks_metadata
)
if args.list_compliance:
print_compliance_frameworks(bulk_compliance_frameworks)
sys.exit()
if args.list_compliance_requirements:
print_compliance_requirements(
bulk_compliance_frameworks, args.list_compliance_requirements
)
sys.exit()
# Load checks to execute
checks_to_execute = load_checks_to_execute(
@@ -114,6 +136,23 @@ def prowler():
provider,
)
# if --list-checks-json, dump a json file and exit
if args.list_checks_json:
print(list_checks_json(provider, sorted(checks_to_execute)))
sys.exit()
# If -l/--list-checks passed as argument, print checks to execute and quit
if args.list_checks:
print_checks(provider, sorted(checks_to_execute), bulk_checks_metadata)
sys.exit()
# Set the audit info based on the selected provider
audit_info = set_provider_audit_info(provider, args.__dict__)
# Import custom checks from folder
if checks_folder:
parse_checks_from_folder(audit_info, checks_folder, provider)
# Exclude checks if -e/--excluded-checks
if excluded_checks:
checks_to_execute = exclude_checks_to_run(checks_to_execute, excluded_checks)
@@ -124,47 +163,37 @@ def prowler():
checks_to_execute, excluded_services, provider
)
# Once the audit_info is set and we have the eventual checks based on the resource identifier,
# it is time to check what Prowler's checks are going to be executed
if audit_info.audit_resources:
checks_from_resources = set_provider_execution_parameters(provider, audit_info)
checks_to_execute = checks_to_execute.intersection(checks_from_resources)
# Sort final check list
checks_to_execute = sorted(checks_to_execute)
# If -l/--list-checks passed as argument, print checks to execute and quit
if args.list_checks:
print_checks(provider, checks_to_execute, bulk_checks_metadata)
sys.exit()
# Parse Allowlist
allowlist_file = set_provider_allowlist(provider, audit_info, args)
# Set the audit info based on the selected provider
audit_info = set_provider_audit_info(provider, args.__dict__)
# Once the audit_info is set and we have the eventual checks from arn, it is time to exclude the others
if audit_info.audit_resources:
audit_info.audited_regions = get_regions_from_audit_resources(
audit_info.audit_resources
)
checks_to_execute = get_checks_from_input_arn(
audit_info.audit_resources, provider
)
# Parse content from Allowlist file and get it, if necessary, from S3
if provider == "aws" and args.allowlist_file:
allowlist_file = parse_allowlist_file(audit_info, args.allowlist_file)
else:
allowlist_file = None
# Setting output options based on the selected provider
# Set output options based on the selected provider
audit_output_options = set_provider_output_options(
provider, args, audit_info, allowlist_file, bulk_checks_metadata
)
# Quick Inventory for AWS
if provider == "aws" and args.quick_inventory:
quick_inventory(audit_info, args.output_directory)
# Run the quick inventory for the provider if available
if hasattr(args, "quick_inventory") and args.quick_inventory:
run_provider_quick_inventory(provider, audit_info, args)
sys.exit()
# Execute checks
findings = []
if len(checks_to_execute):
findings = execute_checks(
checks_to_execute, provider, audit_info, audit_output_options
checks_to_execute,
provider,
audit_info,
audit_output_options,
custom_checks_metadata,
)
else:
logger.error(
@@ -174,10 +203,25 @@ def prowler():
# Extract findings stats
stats = extract_findings_statistics(findings)
if args.slack:
if "SLACK_API_TOKEN" in os.environ and "SLACK_CHANNEL_ID" in os.environ:
_ = send_slack_message(
os.environ["SLACK_API_TOKEN"],
os.environ["SLACK_CHANNEL_ID"],
stats,
provider,
audit_info,
)
else:
logger.critical(
"Slack integration needs SLACK_API_TOKEN and SLACK_CHANNEL_ID environment variables (see more in https://docs.prowler.cloud/en/latest/tutorials/integrations/#slack)."
)
sys.exit(1)
if args.output_modes:
for mode in args.output_modes:
# Close json file if exists
if mode == "json" or mode == "json-asff":
if "json" in mode:
close_json(
audit_output_options.output_filename, args.output_directory, mode
)
@@ -206,9 +250,54 @@ def prowler():
bucket_session,
)
# Resolve previous fails of Security Hub
if provider == "aws" and args.security_hub and not args.skip_sh_update:
resolve_security_hub_previous_findings(args.output_directory, audit_info)
# AWS Security Hub Integration
if provider == "aws" and args.security_hub:
print(
f"{Style.BRIGHT}\nSending findings to AWS Security Hub, please wait...{Style.RESET_ALL}"
)
# Verify where AWS Security Hub is enabled
aws_security_enabled_regions = []
security_hub_regions = (
get_available_aws_service_regions("securityhub", audit_info)
if not audit_info.audited_regions
else audit_info.audited_regions
)
for region in security_hub_regions:
# Save the regions where AWS Security Hub is enabled
if verify_security_hub_integration_enabled_per_region(
audit_info.audited_partition,
region,
audit_info.audit_session,
audit_info.audited_account,
):
aws_security_enabled_regions.append(region)
# Prepare the findings to be sent to Security Hub
security_hub_findings_per_region = prepare_security_hub_findings(
findings, audit_info, audit_output_options, aws_security_enabled_regions
)
# Send the findings to Security Hub
findings_sent_to_security_hub = batch_send_to_security_hub(
security_hub_findings_per_region, audit_info.audit_session
)
print(
f"{Style.BRIGHT}{Fore.GREEN}\n{findings_sent_to_security_hub} findings sent to AWS Security Hub!{Style.RESET_ALL}"
)
# Resolve previous fails of Security Hub
if not args.skip_sh_update:
print(
f"{Style.BRIGHT}\nArchiving previous findings in AWS Security Hub, please wait...{Style.RESET_ALL}"
)
findings_archived_in_security_hub = resolve_security_hub_previous_findings(
security_hub_findings_per_region,
audit_info,
)
print(
f"{Style.BRIGHT}{Fore.GREEN}\n{findings_archived_in_security_hub} findings archived in AWS Security Hub!{Style.RESET_ALL}"
)
# Display summary table
if not args.only_logs:
@@ -230,6 +319,10 @@ def prowler():
audit_output_options.output_directory,
)
# If custom checks were passed, remove the modules
if checks_folder:
remove_custom_checks_module(checks_folder, provider)
# If there are failed findings exit code 3, except if -z is input
if not args.ignore_exit_code_3 and stats["total_fail"] > 0:
sys.exit(3)

File diff suppressed because it is too large Load Diff

View File

@@ -46,11 +46,11 @@
}
],
"Checks": [
"apigateway_logging_enabled",
"apigateway_client_certificate_enabled",
"apigateway_waf_acl_attached",
"apigatewayv2_authorizers_enabled",
"apigatewayv2_access_logging_enabled"
"apigateway_restapi_logging_enabled",
"apigateway_restapi_client_certificate_enabled",
"apigateway_restapi_waf_acl_attached",
"apigatewayv2_api_authorizers_enabled",
"apigatewayv2_api_access_logging_enabled"
]
},
{
@@ -362,14 +362,16 @@
}
],
"Checks": [
"iam_policy_no_administrative_privileges",
"iam_rotate_access_key_90_days",
"iam_no_root_access_key",
"iam_user_mfa_enabled_console_access",
"iam_root_hardware_mfa_enabled",
"iam_password_policy_minimum_length_14",
"iam_disable_90_days_credentials",
"iam_policy_no_administrative_privileges"
"iam_user_accesskey_unused",
"iam_user_console_access_unused",
"iam_aws_attached_policy_no_administrative_privileges",
"iam_customer_attached_policy_no_administrative_privileges",
"iam_inline_policy_no_administrative_privileges"
]
},
{

View File

@@ -0,0 +1,79 @@
{
"Framework": "AWS-Well-Architected-Framework-Reliability-Pillar",
"Version": "",
"Provider": "AWS",
"Description": "Best Practices for the AWS Well-Architected Framework Reliability Pillar encompasses the ability of a workload to perform its intended function correctly and consistently when its expected to. This includes the ability to operate and test the workload through its total lifecycle.",
"Requirements": [
{
"Id": "REL09-BP03",
"Description": "Configure backups to be taken automatically based on a periodic schedule informed by the Recovery Point Objective (RPO), or by changes in the dataset. Critical datasets with low data loss requirements need to be backed up automatically on a frequent basis, whereas less critical data where some loss is acceptable can be backed up less frequently.",
"Attributes": [
{
"Name": "REL09-BP03 Perform data backup automatically",
"WellArchitectedQuestionId": "backing-up-data",
"WellArchitectedPracticeId": "rel_backing_up_data_automated_backups_data",
"Section": "Failure management",
"SubSection": "Backup up data",
"LevelOfRisk": "High",
"AssessmentMethod": "Automated",
"Description": "Configure backups to be taken automatically based on a periodic schedule informed by the Recovery Point Objective (RPO), or by changes in the dataset. Critical datasets with low data loss requirements need to be backed up automatically on a frequent basis, whereas less critical data where some loss is acceptable can be backed up less frequently.",
"ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/rel_backing_up_data_automated_backups_data.html#implementation-guidance"
}
],
"Checks": [
"cloudformation_stacks_termination_protection_enabled",
"rds_instance_backup_enabled",
"rds_instance_deletion_protection",
"dynamodb_tables_pitr_enabled"
]
},
{
"Id": "REL06-BP01",
"Description": "Monitor components and services of AWS workload effectifely, using tools like Amazon CloudWatch and AWS Health Dashboard. Define relevant metrics, set thresholds, and analyze metrics and logs for early detection of issues.",
"Attributes": [
{
"Name": "REL06-BP01 Monitor all components for the workload (Generation)",
"WellArchitectedQuestionId": "monitor-aws-resources",
"WellArchitectedPracticeId": "rel_monitor_aws_resources_monitor_resources",
"Section": "Change management",
"SubSection": "Monitor workload resources",
"LevelOfRisk": "High",
"AssessmentMethod": "Automated",
"Description": "Monitor components and services of AWS workload effectifely, using tools like Amazon CloudWatch and AWS Health Dashboard. Define relevant metrics, set thresholds, and analyze metrics and logs for early detection of issues.",
"ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/rel_monitor_aws_resources_monitor_resources.html#implementation-guidance"
}
],
"Checks": [
"apigateway_restapi_logging_enabled",
"apigatewayv2_api_access_logging_enabled",
"awslambda_function_invoke_api_operations_cloudtrail_logging_enabled",
"cloudtrail_cloudwatch_logging_enabled",
"elb_logging_enabled",
"opensearch_service_domains_audit_logging_enabled",
"opensearch_service_domains_cloudwatch_logging_enabled",
"rds_instance_enhanced_monitoring_enabled",
"rds_instance_integration_cloudwatch_logs"
]
},
{
"Id": "REL10-BP01",
"Description": "Distribute workload data and resources across multiple Availability Zones or, where necessary, across AWS Regions. These locations can be as diverse as required.",
"Attributes": [
{
"Name": "REL10-BP01 Deploy the workload to multiple locations",
"WellArchitectedQuestionId": "fault-isolation",
"WellArchitectedPracticeId": "rel_fault_isolation_multiaz_region_system",
"Section": "Failure management",
"SubSection": "Use fault isolation to protect your workload",
"LevelOfRisk": "High",
"AssessmentMethod": "Automated",
"Description": "Distribute workload data and resources across multiple Availability Zones or, where necessary, across AWS Regions. These locations can be as diverse as required.",
"ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/use-fault-isolation-to-protect-your-workload.html#implementation-guidance."
}
],
"Checks": [
"rds_instance_multi_az"
]
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -15,11 +15,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization.\n\nAn AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.",
"Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.",
"RationaleStatement": "If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question, so it is in both the customers' and AWS' best interests that prompt contact can be established. This is best achieved by setting AWS account contact details to point to resources which have multiple individuals as recipients, such as email aliases and PABX hunt groups.",
"ImpactStatement": "",
"RemediationProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing ).\n\n1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/.\n2. On the navigation bar, choose your account name, and then choose `My Account`.\n3. On the `Account Settings` page, next to `Account Settings`, choose `Edit`.\n4. Next to the field that you need to update, choose `Edit`.\n5. After you have entered your changes, choose `Save changes`.\n6. After you have made your changes, choose `Done`.\n7. To edit your contact information, under `Contact Information`, choose `Edit`.\n8. For the fields that you want to change, type your updated information, and then choose `Update`.",
"AuditProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing )\n\n1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/.\n2. On the navigation bar, choose your account name, and then choose `My Account`.\n3. On the `Account Settings` page, review and verify the current details.\n4. Under `Contact Information`, review and verify the current details.",
"RemediationProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing ). 1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose `My Account`. 3. On the `Account Settings` page, next to `Account Settings`, choose `Edit`. 4. Next to the field that you need to update, choose `Edit`. 5. After you have entered your changes, choose `Save changes`. 6. After you have made your changes, choose `Done`. 7. To edit your contact information, under `Contact Information`, choose `Edit`. 8. For the fields that you want to change, type your updated information, and then choose `Update`.",
"AuditProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing ) 1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose `My Account`. 3. On the `Account Settings` page, review and verify the current details. 4. Under `Contact Information`, review and verify the current details.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/manage-account-payment.html#contact-info"
}
@@ -39,9 +39,9 @@
"Description": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.",
"RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that displays a time-sensitive key and have knowledge of a credential.",
"ImpactStatement": "AWS will soon end support for SMS multi-factor authentication (MFA). New customers are not allowed to use this feature. We recommend that existing customers switch to one of the following alternative methods of MFA.",
"RemediationProcedure": "Perform the following to enable MFA:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at 'https://console.aws.amazon.com/iam/'\n2. In the left pane, select `Users`.\n3. In the `User Name` list, choose the name of the intended MFA user.\n4. Choose the `Security Credentials` tab, and then choose `Manage MFA Device`.\n5. In the `Manage MFA Device wizard`, choose `Virtual MFA` device, and then choose `Continue`.\n\n IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes.\n\n6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see Virtual MFA Applications at https://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications). If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device).\n7. Determine whether the MFA app supports QR codes, and then do one of the following:\n\n - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.\n - In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application.\n\n When you are finished, the virtual MFA device starts generating one-time passwords.\n\n8. In the `Manage MFA Device wizard`, in the `MFA Code 1 box`, type the `one-time password` that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second `one-time password` into the `MFA Code 2 box`.\n\n9. Click `Assign MFA`.",
"AuditProcedure": "Perform the following to determine if a MFA device is enabled for all IAM users having a console password:\n\n**From Console:**\n\n1. Open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. In the left pane, select `Users` \n3. If the `MFA` or `Password age` columns are not visible in the table, click the gear icon at the upper right corner of the table and ensure a checkmark is next to both, then click `Close`.\n4. Ensure that for each user where the `Password age` column shows a password age, the `MFA` column shows `Virtual`, `U2F Security Key`, or `Hardware`.\n\n**From Command Line:**\n\n1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their password and MFA status:\n```\n aws iam generate-credential-report\n```\n```\n aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,8 \n```\n2. The output of this command will produce a table similar to the following:\n```\n user,password_enabled,mfa_active\n elise,false,false\n brandon,true,true\n rakesh,false,false\n helene,false,false\n paras,true,true\n anitha,false,false \n```\n3. For any column having `password_enabled` set to `true` , ensure `mfa_active` is also set to `true.`",
"AdditionalInformation": "**Forced IAM User Self-Service Remediation**\n\nAmazon has published a pattern that forces users to self-service setup MFA before they have access to their complete permissions set. Until they complete this step, they cannot access their full permissions. This pattern can be used on new AWS accounts. It can also be used on existing accounts - it is recommended users are given instructions and a grace period to accomplish MFA enrollment before active enforcement on existing AWS accounts.",
"RemediationProcedure": "Perform the following to enable MFA: **From Console:** 1. Sign in to the AWS Management Console and open the IAM console at 'https://console.aws.amazon.com/iam/' 2. In the left pane, select `Users`. 3. In the `User Name` list, choose the name of the intended MFA user. 4. Choose the `Security Credentials` tab, and then choose `Manage MFA Device`. 5. In the `Manage MFA Device wizard`, choose `Virtual MFA` device, and then choose `Continue`. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes. 6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see Virtual MFA Applications at https://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications). If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code. - In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application. When you are finished, the virtual MFA device starts generating one-time passwords. 8. In the `Manage MFA Device wizard`, in the `MFA Code 1 box`, type the `one-time password` that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second `one-time password` into the `MFA Code 2 box`. 9. Click `Assign MFA`.",
"AuditProcedure": "Perform the following to determine if a MFA device is enabled for all IAM users having a console password: **From Console:** 1. Open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left pane, select `Users` 3. If the `MFA` or `Password age` columns are not visible in the table, click the gear icon at the upper right corner of the table and ensure a checkmark is next to both, then click `Close`. 4. Ensure that for each user where the `Password age` column shows a password age, the `MFA` column shows `Virtual`, `U2F Security Key`, or `Hardware`. **From Command Line:** 1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their password and MFA status: ``` aws iam generate-credential-report ``` ``` aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,8 ``` 2. The output of this command will produce a table similar to the following: ``` user,password_enabled,mfa_active elise,false,false brandon,true,true rakesh,false,false helene,false,false paras,true,true anitha,false,false ``` 3. For any column having `password_enabled` set to `true` , ensure `mfa_active` is also set to `true.`",
"AdditionalInformation": "**Forced IAM User Self-Service Remediation** Amazon has published a pattern that forces users to self-service setup MFA before they have access to their complete permissions set. Until they complete this step, they cannot access their full permissions. This pattern can be used on new AWS accounts. It can also be used on existing accounts - it is recommended users are given instructions and a grace period to accomplish MFA enrollment before active enforcement on existing AWS accounts.",
"References": "https://tools.ietf.org/html/rfc6238:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#enable-mfa-for-privileged-users:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://blogs.aws.amazon.com/security/post/Tx2SJJYE082KBUK/How-to-Delegate-Management-of-Multi-Factor-Authentication-to-AWS-IAM-Users"
}
]
@@ -57,11 +57,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. \n\nProgrammatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. \n\nAWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.",
"RationaleStatement": "Requiring the additional steps be taken by the user for programmatic access after their profile has been created will give a stronger indication of intent that access keys are [a] necessary for their work and [b] once the access key is established on an account that the keys may be in use somewhere in the organization.\n\n**Note**: Even if it is known the user will need access keys, require them to create the keys themselves or put in a support ticket to have them created as a separate step from user creation.",
"Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. Programmatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. AWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.",
"RationaleStatement": "Requiring the additional steps be taken by the user for programmatic access after their profile has been created will give a stronger indication of intent that access keys are [a] necessary for their work and [b] once the access key is established on an account that the keys may be in use somewhere in the organization. **Note**: Even if it is known the user will need access keys, require them to create the keys themselves or put in a support ticket to have them created as a separate step from user creation.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to delete access keys that do not pass the audit:\n\n**From Console:**\n\n1. Login to the AWS Management Console:\n2. Click `Services` \n3. Click `IAM` \n4. Click on `Users` \n5. Click on `Security Credentials` \n6. As an Administrator \n - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used.\n7. As an IAM User\n - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used.\n\n**From Command Line:**\n```\naws iam delete-access-key --access-key-id <access-key-id-listed> --user-name <users-name>\n```",
"AuditProcedure": "Perform the following to determine if access keys were created upon user creation and are being used and rotated as prescribed:\n\n**From Console:**\n\n1. Login to the AWS Management Console\n2. Click `Services` \n3. Click `IAM` \n4. Click on a User where column `Password age` and `Access key age` is not set to `None`\n5. Click on `Security credentials` Tab\n6. Compare the user 'Creation time` to the Access Key `Created` date.\n6. For any that match, the key was created during initial user setup.\n\n- Keys that were created at the same time as the user profile and do not have a last used date should be deleted. Refer to the remediation below.\n\n**From Command Line:**\n\n1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their access keys utilization:\n```\n aws iam generate-credential-report\n```\n```\n aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,9,11,14,16\n```\n2. The output of this command will produce a table similar to the following:\n```\nuser,password_enabled,access_key_1_active,access_key_1_last_used_date,access_key_2_active,access_key_2_last_used_date\n elise,false,true,2015-04-16T15:14:00+00:00,false,N/A\n brandon,true,true,N/A,false,N/A\n rakesh,false,false,N/A,false,N/A\n helene,false,true,2015-11-18T17:47:00+00:00,false,N/A\n paras,true,true,2016-08-28T12:04:00+00:00,true,2016-03-04T10:11:00+00:00\n anitha,true,true,2016-06-08T11:43:00+00:00,true,N/A \n```\n3. For any user having `password_enabled` set to `true` AND `access_key_last_used_date` set to `N/A` refer to the remediation below.",
"RemediationProcedure": "Perform the following to delete access keys that do not pass the audit: **From Console:** 1. Login to the AWS Management Console: 2. Click `Services` 3. Click `IAM` 4. Click on `Users` 5. Click on `Security Credentials` 6. As an Administrator - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used. 7. As an IAM User - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used. **From Command Line:** ``` aws iam delete-access-key --access-key-id <access-key-id-listed> --user-name <users-name> ```",
"AuditProcedure": "Perform the following to determine if access keys were created upon user creation and are being used and rotated as prescribed: **From Console:** 1. Login to the AWS Management Console 2. Click `Services` 3. Click `IAM` 4. Click on a User where column `Password age` and `Access key age` is not set to `None` 5. Click on `Security credentials` Tab 6. Compare the user 'Creation time` to the Access Key `Created` date. 6. For any that match, the key was created during initial user setup. - Keys that were created at the same time as the user profile and do not have a last used date should be deleted. Refer to the remediation below. **From Command Line:** 1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their access keys utilization: ``` aws iam generate-credential-report ``` ``` aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,9,11,14,16 ``` 2. The output of this command will produce a table similar to the following: ``` user,password_enabled,access_key_1_active,access_key_1_last_used_date,access_key_2_active,access_key_2_last_used_date elise,false,true,2015-04-16T15:14:00+00:00,false,N/A brandon,true,true,N/A,false,N/A rakesh,false,false,N/A,false,N/A helene,false,true,2015-11-18T17:47:00+00:00,false,N/A paras,true,true,2016-08-28T12:04:00+00:00,true,2016-03-04T10:11:00+00:00 anitha,true,true,2016-06-08T11:43:00+00:00,true,N/A ``` 3. For any user having `password_enabled` set to `true` AND `access_key_last_used_date` set to `N/A` refer to the remediation below.",
"AdditionalInformation": "Credential report does not appear to contain \"Key Creation Date\"",
"References": "https://docs.aws.amazon.com/cli/latest/reference/iam/delete-access-key.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html"
}
@@ -71,7 +71,8 @@
"Id": "1.12",
"Description": "Ensure credentials unused for 45 days or greater are disabled",
"Checks": [
"iam_disable_45_days_credentials"
"iam_user_accesskey_unused",
"iam_user_console_access_unused"
],
"Attributes": [
{
@@ -81,8 +82,8 @@
"Description": "AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in 45 or greater days be deactivated or removed.",
"RationaleStatement": "Disabling or removing unnecessary credentials will reduce the window of opportunity for credentials associated with a compromised or abandoned account to be used.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\nPerform the following to manage Unused Password (IAM user console access)\n\n1. Login to the AWS Management Console:\n2. Click `Services` \n3. Click `IAM` \n4. Click on `Users` \n5. Click on `Security Credentials` \n6. Select user whose `Console last sign-in` is greater than 45 days\n7. Click `Security credentials`\n8. In section `Sign-in credentials`, `Console password` click `Manage` \n9. Under Console Access select `Disable`\n10.Click `Apply`\n\nPerform the following to deactivate Access Keys:\n\n1. Login to the AWS Management Console:\n2. Click `Services` \n3. Click `IAM` \n4. Click on `Users` \n5. Click on `Security Credentials` \n6. Select any access keys that are over 45 days old and that have been used and \n - Click on `Make Inactive`\n7. Select any access keys that are over 45 days old and that have not been used and \n - Click the X to `Delete`",
"AuditProcedure": "Perform the following to determine if unused credentials exist:\n\n**From Console:**\n\n1. Login to the AWS Management Console\n2. Click `Services` \n3. Click `IAM`\n4. Click on `Users`\n5. Click the `Settings` (gear) icon.\n6. Select `Console last sign-in`, `Access key last used`, and `Access Key Id`\n7. Click on `Close` \n8. Check and ensure that `Console last sign-in` is less than 45 days ago.\n\n**Note** - `Never` means the user has never logged in.\n\n9. Check and ensure that `Access key age` is less than 45 days and that `Access key last used` does not say `None`\n\nIf the user hasn't signed into the Console in the last 45 days or Access keys are over 45 days old refer to the remediation.\n\n**From Command Line:**\n\n**Download Credential Report:**\n\n1. Run the following commands:\n```\n aws iam generate-credential-report\n\n aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,5,6,9,10,11,14,15,16 | grep -v '^<root_account>'\n```\n\n**Ensure unused credentials do not exist:**\n\n2. For each user having `password_enabled` set to `TRUE` , ensure `password_last_used_date` is less than `45` days ago.\n\n- When `password_enabled` is set to `TRUE` and `password_last_used` is set to `No_Information` , ensure `password_last_changed` is less than 45 days ago.\n\n3. For each user having an `access_key_1_active` or `access_key_2_active` to `TRUE` , ensure the corresponding `access_key_n_last_used_date` is less than `45` days ago.\n\n- When a user having an `access_key_x_active` (where x is 1 or 2) to `TRUE` and corresponding access_key_x_last_used_date is set to `N/A', ensure `access_key_x_last_rotated` is less than 45 days ago.",
"RemediationProcedure": "**From Console:** Perform the following to manage Unused Password (IAM user console access) 1. Login to the AWS Management Console: 2. Click `Services` 3. Click `IAM` 4. Click on `Users` 5. Click on `Security Credentials` 6. Select user whose `Console last sign-in` is greater than 45 days 7. Click `Security credentials` 8. In section `Sign-in credentials`, `Console password` click `Manage` 9. Under Console Access select `Disable` 10.Click `Apply` Perform the following to deactivate Access Keys: 1. Login to the AWS Management Console: 2. Click `Services` 3. Click `IAM` 4. Click on `Users` 5. Click on `Security Credentials` 6. Select any access keys that are over 45 days old and that have been used and - Click on `Make Inactive` 7. Select any access keys that are over 45 days old and that have not been used and - Click the X to `Delete`",
"AuditProcedure": "Perform the following to determine if unused credentials exist: **From Console:** 1. Login to the AWS Management Console 2. Click `Services` 3. Click `IAM` 4. Click on `Users` 5. Click the `Settings` (gear) icon. 6. Select `Console last sign-in`, `Access key last used`, and `Access Key Id` 7. Click on `Close` 8. Check and ensure that `Console last sign-in` is less than 45 days ago. **Note** - `Never` means the user has never logged in. 9. Check and ensure that `Access key age` is less than 45 days and that `Access key last used` does not say `None` If the user hasn't signed into the Console in the last 45 days or Access keys are over 45 days old refer to the remediation. **From Command Line:** **Download Credential Report:** 1. Run the following commands: ``` aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,5,6,9,10,11,14,15,16 | grep -v '^<root_account>' ``` **Ensure unused credentials do not exist:** 2. For each user having `password_enabled` set to `TRUE` , ensure `password_last_used_date` is less than `45` days ago. - When `password_enabled` is set to `TRUE` and `password_last_used` is set to `No_Information` , ensure `password_last_changed` is less than 45 days ago. 3. For each user having an `access_key_1_active` or `access_key_2_active` to `TRUE` , ensure the corresponding `access_key_n_last_used_date` is less than `45` days ago. - When a user having an `access_key_x_active` (where x is 1 or 2) to `TRUE` and corresponding access_key_x_last_used_date is set to `N/A', ensure `access_key_x_last_rotated` is less than 45 days ago.",
"AdditionalInformation": "<root_account> is excluded in the audit since the root account should not be used for day to day business and would likely be unused for more than 45 days.",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#remove-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_admin-change-user.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html"
}
@@ -102,8 +103,8 @@
"Description": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)",
"RationaleStatement": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API. One of the best ways to protect your account is to not allow users to have multiple access keys.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`.\n2. In the left navigation panel, choose `Users`.\n3. Click on the IAM user name that you want to examine.\n4. On the IAM user configuration page, select `Security Credentials` tab.\n5. In `Access Keys` section, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working.\n6. In the same `Access Keys` section, identify your non-operational access keys (other than the chosen one) and deactivate it by clicking the `Make Inactive` link.\n7. If you receive the `Change Key Status` confirmation box, click `Deactivate` to switch off the selected key.\n8. Repeat steps no. 3 7 for each IAM user in your AWS account.\n\n**From Command Line:**\n\n1. Using the IAM user and access key information provided in the `Audit CLI`, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working.\n\n2. Run the `update-access-key` command below using the IAM user name and the non-operational access key IDs to deactivate the unnecessary key(s). Refer to the Audit section to identify the unnecessary access key ID for the selected IAM user\n\n**Note** - the command does not return any output:\n```\naws iam update-access-key --access-key-id <access-key-id> --status Inactive --user-name <user-name>\n```\n3. To confirm that the selected access key pair has been successfully `deactivated` run the `list-access-keys` audit command again for that IAM User:\n```\naws iam list-access-keys --user-name <user-name>\n```\n- The command output should expose the metadata for each access key associated with the IAM user. If the non-operational key pair(s) `Status` is set to `Inactive`, the key has been successfully deactivated and the IAM user access configuration adheres now to this recommendation.\n\n4. Repeat steps no. 1 3 for each IAM user in your AWS account.",
"AuditProcedure": "**From Console:**\n\n1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`.\n2. In the left navigation panel, choose `Users`.\n3. Click on the IAM user name that you want to examine.\n4. On the IAM user configuration page, select `Security Credentials` tab.\n5. Under `Access Keys` section, in the Status column, check the current status for each access key associated with the IAM user. If the selected IAM user has more than one access key activated then the users access configuration does not adhere to security best practices and the risk of accidental exposures increases.\n- Repeat steps no. 3 5 for each IAM user in your AWS account.\n\n**From Command Line:**\n\n1. Run `list-users` command to list all IAM users within your account:\n```\naws iam list-users --query \"Users[*].UserName\"\n```\nThe command output should return an array that contains all your IAM user names.\n\n2. Run `list-access-keys` command using the IAM user name list to return the current status of each access key associated with the selected IAM user:\n```\naws iam list-access-keys --user-name <user-name>\n```\nThe command output should expose the metadata `(\"Username\", \"AccessKeyId\", \"Status\", \"CreateDate\")` for each access key on that user account.\n\n3. Check the `Status` property value for each key returned to determine each keys current state. If the `Status` property value for more than one IAM access key is set to `Active`, the user access configuration does not adhere to this recommendation, refer to the remediation below.\n\n- Repeat steps no. 2 and 3 for each IAM user in your AWS account.",
"RemediationProcedure": "**From Console:** 1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. In `Access Keys` section, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working. 6. In the same `Access Keys` section, identify your non-operational access keys (other than the chosen one) and deactivate it by clicking the `Make Inactive` link. 7. If you receive the `Change Key Status` confirmation box, click `Deactivate` to switch off the selected key. 8. Repeat steps no. 3 7 for each IAM user in your AWS account. **From Command Line:** 1. Using the IAM user and access key information provided in the `Audit CLI`, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working. 2. Run the `update-access-key` command below using the IAM user name and the non-operational access key IDs to deactivate the unnecessary key(s). Refer to the Audit section to identify the unnecessary access key ID for the selected IAM user **Note** - the command does not return any output: ``` aws iam update-access-key --access-key-id <access-key-id> --status Inactive --user-name <user-name> ``` 3. To confirm that the selected access key pair has been successfully `deactivated` run the `list-access-keys` audit command again for that IAM User: ``` aws iam list-access-keys --user-name <user-name> ``` - The command output should expose the metadata for each access key associated with the IAM user. If the non-operational key pair(s) `Status` is set to `Inactive`, the key has been successfully deactivated and the IAM user access configuration adheres now to this recommendation. 4. Repeat steps no. 1 3 for each IAM user in your AWS account.",
"AuditProcedure": "**From Console:** 1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. Under `Access Keys` section, in the Status column, check the current status for each access key associated with the IAM user. If the selected IAM user has more than one access key activated then the users access configuration does not adhere to security best practices and the risk of accidental exposures increases. - Repeat steps no. 3 5 for each IAM user in your AWS account. **From Command Line:** 1. Run `list-users` command to list all IAM users within your account: ``` aws iam list-users --query \"Users[*].UserName\" ``` The command output should return an array that contains all your IAM user names. 2. Run `list-access-keys` command using the IAM user name list to return the current status of each access key associated with the selected IAM user: ``` aws iam list-access-keys --user-name <user-name> ``` The command output should expose the metadata `(\"Username\", \"AccessKeyId\", \"Status\", \"CreateDate\")` for each access key on that user account. 3. Check the `Status` property value for each key returned to determine each keys current state. If the `Status` property value for more than one IAM access key is set to `Active`, the user access configuration does not adhere to this recommendation, refer to the remediation below. - Repeat steps no. 2 and 3 for each IAM user in your AWS account.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html"
}
@@ -121,10 +122,10 @@
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.",
"RationaleStatement": "Rotating access keys will reduce the window of opportunity for an access key that is associated with a compromised or terminated account to be used.\n\nAccess keys should be rotated to ensure that data cannot be accessed with an old key which might have been lost, cracked, or stolen.",
"RationaleStatement": "Rotating access keys will reduce the window of opportunity for an access key that is associated with a compromised or terminated account to be used. Access keys should be rotated to ensure that data cannot be accessed with an old key which might have been lost, cracked, or stolen.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to rotate access keys:\n\n**From Console:**\n\n1. Go to Management Console (https://console.aws.amazon.com/iam)\n2. Click on `Users`\n3. Click on `Security Credentials` \n4. As an Administrator \n - Click on `Make Inactive` for keys that have not been rotated in `90` Days\n5. As an IAM User\n - Click on `Make Inactive` or `Delete` for keys which have not been rotated or used in `90` Days\n6. Click on `Create Access Key` \n7. Update programmatic call with new Access Key credentials\n\n**From Command Line:**\n\n1. While the first access key is still active, create a second access key, which is active by default. Run the following command:\n```\naws iam create-access-key\n```\n\nAt this point, the user has two active access keys.\n\n2. Update all applications and tools to use the new access key.\n3. Determine whether the first access key is still in use by using this command:\n```\naws iam get-access-key-last-used\n```\n4. One approach is to wait several days and then check the old access key for any use before proceeding.\n\nEven if step Step 3 indicates no use of the old key, it is recommended that you do not immediately delete the first access key. Instead, change the state of the first access key to Inactive using this command:\n```\naws iam update-access-key\n```\n5. Use only the new access key to confirm that your applications are working. Any applications and tools that still use the original access key will stop working at this point because they no longer have access to AWS resources. If you find such an application or tool, you can switch its state back to Active to reenable the first access key. Then return to step Step 2 and update this application to use the new key.\n\n6. After you wait some period of time to ensure that all applications and tools have been updated, you can delete the first access key with this command:\n```\naws iam delete-access-key\n```",
"AuditProcedure": "Perform the following to determine if access keys are rotated as prescribed:\n\n**From Console:**\n\n1. Go to Management Console (https://console.aws.amazon.com/iam)\n2. Click on `Users`\n3. Click `setting` icon\n4. Select `Console last sign-in`\n5. Click `Close`\n6. Ensure that `Access key age` is less than 90 days ago. note) `None` in the `Access key age` means the user has not used the access key.\n\n**From Command Line:**\n\n```\naws iam generate-credential-report\naws iam get-credential-report --query 'Content' --output text | base64 -d\n```\nThe `access_key_1_last_rotated` field in this file notes The date and time, in ISO 8601 date-time format, when the user's access key was created or last changed. If the user does not have an active access key, the value in this field is N/A (not applicable).",
"RemediationProcedure": "Perform the following to rotate access keys: **From Console:** 1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click on `Security Credentials` 4. As an Administrator - Click on `Make Inactive` for keys that have not been rotated in `90` Days 5. As an IAM User - Click on `Make Inactive` or `Delete` for keys which have not been rotated or used in `90` Days 6. Click on `Create Access Key` 7. Update programmatic call with new Access Key credentials **From Command Line:** 1. While the first access key is still active, create a second access key, which is active by default. Run the following command: ``` aws iam create-access-key ``` At this point, the user has two active access keys. 2. Update all applications and tools to use the new access key. 3. Determine whether the first access key is still in use by using this command: ``` aws iam get-access-key-last-used ``` 4. One approach is to wait several days and then check the old access key for any use before proceeding. Even if step Step 3 indicates no use of the old key, it is recommended that you do not immediately delete the first access key. Instead, change the state of the first access key to Inactive using this command: ``` aws iam update-access-key ``` 5. Use only the new access key to confirm that your applications are working. Any applications and tools that still use the original access key will stop working at this point because they no longer have access to AWS resources. If you find such an application or tool, you can switch its state back to Active to reenable the first access key. Then return to step Step 2 and update this application to use the new key. 6. After you wait some period of time to ensure that all applications and tools have been updated, you can delete the first access key with this command: ``` aws iam delete-access-key ```",
"AuditProcedure": "Perform the following to determine if access keys are rotated as prescribed: **From Console:** 1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click `setting` icon 4. Select `Console last sign-in` 5. Click `Close` 6. Ensure that `Access key age` is less than 90 days ago. note) `None` in the `Access key age` means the user has not used the access key. **From Command Line:** ``` aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d ``` The `access_key_1_last_rotated` field in this file notes The date and time, in ISO 8601 date-time format, when the user's access key was created or last changed. If the user does not have an active access key, the value in this field is N/A (not applicable).",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#rotate-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html"
}
@@ -141,11 +142,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. \n\nOnly the third implementation is recommended.",
"Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. Only the third implementation is recommended.",
"RationaleStatement": "Assigning IAM policy only through groups unifies permissions management to a single, flexible layer consistent with organizational functional roles. By unifying permissions management, the likelihood of excessive permissions is reduced.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to create an IAM group and assign a policy to it:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. In the navigation pane, click `Groups` and then click `Create New Group` .\n3. In the `Group Name` box, type the name of the group and then click `Next Step` .\n4. In the list of policies, select the check box for each policy that you want to apply to all members of the group. Then click `Next Step` .\n5. Click `Create Group` \n\nPerform the following to add a user to a given group:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. In the navigation pane, click `Groups` \n3. Select the group to add a user to\n4. Click `Add Users To Group` \n5. Select the users to be added to the group\n6. Click `Add Users` \n\nPerform the following to remove a direct association between a user and policy:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. In the left navigation pane, click on Users\n3. For each user:\n - Select the user\n - Click on the `Permissions` tab\n - Expand `Permissions policies` \n - Click `X` for each policy; then click Detach or Remove (depending on policy type)",
"AuditProcedure": "Perform the following to determine if an inline policy is set or a policy is directly attached to users:\n\n1. Run the following to get a list of IAM users:\n```\n aws iam list-users --query 'Users[*].UserName' --output text \n```\n2. For each user returned, run the following command to determine if any policies are attached to them:\n```\n aws iam list-attached-user-policies --user-name <iam_user>\n aws iam list-user-policies --user-name <iam_user> \n```\n3. If any policies are returned, the user has an inline policy or direct policy attachment.",
"RemediationProcedure": "Perform the following to create an IAM group and assign a policy to it: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups` and then click `Create New Group` . 3. In the `Group Name` box, type the name of the group and then click `Next Step` . 4. In the list of policies, select the check box for each policy that you want to apply to all members of the group. Then click `Next Step` . 5. Click `Create Group` Perform the following to add a user to a given group: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups` 3. Select the group to add a user to 4. Click `Add Users To Group` 5. Select the users to be added to the group 6. Click `Add Users` Perform the following to remove a direct association between a user and policy: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left navigation pane, click on Users 3. For each user: - Select the user - Click on the `Permissions` tab - Expand `Permissions policies` - Click `X` for each policy; then click Detach or Remove (depending on policy type)",
"AuditProcedure": "Perform the following to determine if an inline policy is set or a policy is directly attached to users: 1. Run the following to get a list of IAM users: ``` aws iam list-users --query 'Users[*].UserName' --output text ``` 2. For each user returned, run the following command to determine if any policies are attached to them: ``` aws iam list-attached-user-policies --user-name <iam_user> aws iam list-user-policies --user-name <iam_user> ``` 3. If any policies are returned, the user has an inline policy or direct policy attachment.",
"AdditionalInformation": "",
"References": "http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html"
}
@@ -155,7 +156,8 @@
"Id": "1.16",
"Description": "Ensure IAM policies that allow full \"*:*\" administrative privileges are not attached",
"Checks": [
"iam_policy_no_administrative_privileges"
"iam_aws_attached_policy_no_administrative_privileges",
"iam_customer_attached_policy_no_administrative_privileges"
],
"Attributes": [
{
@@ -163,10 +165,10 @@
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered a standard security advice to grant _least privilege_ -that is, granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform _only_ those tasks, instead of allowing full administrative privileges.",
"RationaleStatement": "It's more secure to start with a minimum set of permissions and grant additional permissions as necessary, rather than starting with permissions that are too lenient and then trying to tighten them later.\n\nProviding full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions.\n\nIAM policies that have a statement with \"Effect\": \"Allow\" with \"Action\": \"\\*\" over \"Resource\": \"\\*\" should be removed.",
"RationaleStatement": "It's more secure to start with a minimum set of permissions and grant additional permissions as necessary, rather than starting with permissions that are too lenient and then trying to tighten them later. Providing full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions. IAM policies that have a statement with \"Effect\": \"Allow\" with \"Action\": \"\\*\" over \"Resource\": \"\\*\" should be removed.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\nPerform the following to detach the policy that has full administrative privileges:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. In the navigation pane, click Policies and then search for the policy name found in the audit step.\n3. Select the policy that needs to be deleted.\n4. In the policy action menu, select first `Detach` \n5. Select all Users, Groups, Roles that have this policy attached\n6. Click `Detach Policy` \n7. In the policy action menu, select `Detach` \n\n**From Command Line:**\n\nPerform the following to detach the policy that has full administrative privileges as found in the audit step:\n\n1. Lists all IAM users, groups, and roles that the specified managed policy is attached to.\n\n```\n aws iam list-entities-for-policy --policy-arn <policy_arn>\n```\n2. Detach the policy from all IAM Users:\n```\n aws iam detach-user-policy --user-name <iam_user> --policy-arn <policy_arn>\n```\n3. Detach the policy from all IAM Groups:\n```\n aws iam detach-group-policy --group-name <iam_group> --policy-arn <policy_arn>\n```\n4. Detach the policy from all IAM Roles:\n```\n aws iam detach-role-policy --role-name <iam_role> --policy-arn <policy_arn>\n```",
"AuditProcedure": "Perform the following to determine what policies are created:\n\n**From Command Line:**\n\n1. Run the following to get a list of IAM policies:\n```\n aws iam list-policies --only-attached --output text\n```\n2. For each policy returned, run the following command to determine if any policies is allowing full administrative privileges on the account:\n```\n aws iam get-policy-version --policy-arn <policy_arn> --version-id <version>\n```\n3. In output ensure policy should not have any Statement block with `\"Effect\": \"Allow\"` and `Action` set to `\"*\"` and `Resource` set to `\"*\"`",
"RemediationProcedure": "**From Console:** Perform the following to detach the policy that has full administrative privileges: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click Policies and then search for the policy name found in the audit step. 3. Select the policy that needs to be deleted. 4. In the policy action menu, select first `Detach` 5. Select all Users, Groups, Roles that have this policy attached 6. Click `Detach Policy` 7. In the policy action menu, select `Detach` **From Command Line:** Perform the following to detach the policy that has full administrative privileges as found in the audit step: 1. Lists all IAM users, groups, and roles that the specified managed policy is attached to. ``` aws iam list-entities-for-policy --policy-arn <policy_arn> ``` 2. Detach the policy from all IAM Users: ``` aws iam detach-user-policy --user-name <iam_user> --policy-arn <policy_arn> ``` 3. Detach the policy from all IAM Groups: ``` aws iam detach-group-policy --group-name <iam_group> --policy-arn <policy_arn> ``` 4. Detach the policy from all IAM Roles: ``` aws iam detach-role-policy --role-name <iam_role> --policy-arn <policy_arn> ```",
"AuditProcedure": "Perform the following to determine what policies are created: **From Command Line:** 1. Run the following to get a list of IAM policies: ``` aws iam list-policies --only-attached --output text ``` 2. For each policy returned, run the following command to determine if any policies is allowing full administrative privileges on the account: ``` aws iam get-policy-version --policy-arn <policy_arn> --version-id <version> ``` 3. In output ensure policy should not have any Statement block with `\"Effect\": \"Allow\"` and `Action` set to `\"*\"` and `Resource` set to `\"*\"`",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://docs.aws.amazon.com/cli/latest/reference/iam/index.html#cli-aws-iam"
}
@@ -186,8 +188,8 @@
"Description": "AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.",
"RationaleStatement": "By implementing least privilege for access control, an IAM Role will require an appropriate IAM Policy to allow Support Center Access in order to manage Incidents with AWS Support.",
"ImpactStatement": "All AWS Support plans include an unlimited number of account and billing support cases, with no long-term contracts. Support billing calculations are performed on a per-account basis for all plans. Enterprise Support plan customers have the option to include multiple enabled accounts in an aggregated monthly billing calculation. Monthly charges for the Business and Enterprise support plans are based on each month's AWS usage charges, subject to a monthly minimum, billed in advance.",
"RemediationProcedure": "**From Command Line:**\n\n1. Create an IAM role for managing incidents with AWS:\n - Create a trust relationship policy document that allows <iam_user> to manage AWS incidents, and save it locally as /tmp/TrustPolicy.json:\n```\n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"AWS\": \"<iam_user>\"\n },\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n }\n```\n2. Create the IAM role using the above trust policy:\n```\naws iam create-role --role-name <aws_support_iam_role> --assume-role-policy-document file:///tmp/TrustPolicy.json\n```\n3. Attach 'AWSSupportAccess' managed policy to the created IAM role:\n```\naws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess --role-name <aws_support_iam_role>\n```",
"AuditProcedure": "**From Command Line:**\n\n1. List IAM policies, filter for the 'AWSSupportAccess' managed policy, and note the \"Arn\" element value:\n```\naws iam list-policies --query \"Policies[?PolicyName == 'AWSSupportAccess']\"\n```\n2. Check if the 'AWSSupportAccess' policy is attached to any role:\n\n```\naws iam list-entities-for-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess\n```\n\n3. In Output, Ensure `PolicyRoles` does not return empty. 'Example: Example: PolicyRoles: [ ]'\n\nIf it returns empty refer to the remediation below.",
"RemediationProcedure": "**From Command Line:** 1. Create an IAM role for managing incidents with AWS: - Create a trust relationship policy document that allows <iam_user> to manage AWS incidents, and save it locally as /tmp/TrustPolicy.json: ``` { \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Principal\": { \"AWS\": \"<iam_user>\" }, \"Action\": \"sts:AssumeRole\" } ] } ``` 2. Create the IAM role using the above trust policy: ``` aws iam create-role --role-name <aws_support_iam_role> --assume-role-policy-document file:///tmp/TrustPolicy.json ``` 3. Attach 'AWSSupportAccess' managed policy to the created IAM role: ``` aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess --role-name <aws_support_iam_role> ```",
"AuditProcedure": "**From Command Line:** 1. List IAM policies, filter for the 'AWSSupportAccess' managed policy, and note the \"Arn\" element value: ``` aws iam list-policies --query \"Policies[?PolicyName == 'AWSSupportAccess']\" ``` 2. Check if the 'AWSSupportAccess' policy is attached to any role: ``` aws iam list-entities-for-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess ``` 3. In Output, Ensure `PolicyRoles` does not return empty. 'Example: Example: PolicyRoles: [ ]' If it returns empty refer to the remediation below.",
"AdditionalInformation": "AWSSupportAccess policy is a global AWS resource. It has same ARN as `arn:aws:iam::aws:policy/AWSSupportAccess` for every account.",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://aws.amazon.com/premiumsupport/pricing/:https://docs.aws.amazon.com/cli/latest/reference/iam/list-policies.html:https://docs.aws.amazon.com/cli/latest/reference/iam/attach-role-policy.html:https://docs.aws.amazon.com/cli/latest/reference/iam/list-entities-for-policy.html"
}
@@ -205,10 +207,10 @@
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. \"AWS Access\" means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.",
"RationaleStatement": "AWS IAM roles reduce the risks associated with sharing and rotating credentials that can be used outside of AWS itself. If credentials are compromised, they can be used from outside of the AWS account they give access to. In contrast, in order to leverage role permissions an attacker would need to gain and maintain access to a specific instance to use the privileges associated with it.\n\nAdditionally, if credentials are encoded into compiled applications or other hard to change mechanisms, then they are even more unlikely to be properly rotated due to service disruption risks. As time goes on, credentials that cannot be rotated are more likely to be known by an increasing number of individuals who no longer work for the organization owning the credentials.",
"RationaleStatement": "AWS IAM roles reduce the risks associated with sharing and rotating credentials that can be used outside of AWS itself. If credentials are compromised, they can be used from outside of the AWS account they give access to. In contrast, in order to leverage role permissions an attacker would need to gain and maintain access to a specific instance to use the privileges associated with it. Additionally, if credentials are encoded into compiled applications or other hard to change mechanisms, then they are even more unlikely to be properly rotated due to service disruption risks. As time goes on, credentials that cannot be rotated are more likely to be known by an increasing number of individuals who no longer work for the organization owning the credentials.",
"ImpactStatement": "",
"RemediationProcedure": "IAM roles can only be associated at the launch of an instance. To remediate an instance to add it to a role you must create a new instance.\n\nIf the instance has no external dependencies on its current private ip or public addresses are elastic IPs:\n\n1. In AWS IAM create a new role. Assign a permissions policy if needed permissions are already known.\n2. In the AWS console launch a new instance with identical settings to the existing instance, and ensure that the newly created role is selected.\n3. Shutdown both the existing instance and the new instance.\n4. Detach disks from both instances.\n5. Attach the existing instance disks to the new instance.\n6. Boot the new instance and you should have the same machine, but with the associated role.\n\n**Note:** if your environment has dependencies on a dynamically assigned PRIVATE IP address you can create an AMI from the existing instance, destroy the old one and then when launching from the AMI, manually assign the previous private IP address.\n\n**Note: **if your environment has dependencies on a dynamically assigned PUBLIC IP address there is not a way ensure the address is retained and assign an instance role. Dependencies on dynamically assigned public IP addresses are a bad practice and, if possible, you may wish to rebuild the instance with a new elastic IP address and make the investment to remediate affected systems while assigning the system to a role.",
"AuditProcedure": "Where an instance is associated with a Role:\n\nFor instances that are known to perform AWS actions, ensure that they belong to an instance role that has the necessary permissions:\n\n1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings)\n2. Open the EC2 Dashboard and choose \"Instances\"\n3. Click the EC2 instance that performs AWS actions, in the lower pane details find \"IAM Role\"\n4. If the Role is blank, the instance is not assigned to one.\n5. If the Role is filled in, it does not mean the instance might not \\*also\\* have credentials encoded on it for some activities.\n\nWhere an Instance Contains Embedded Credentials:\n\n- On the instance that is known to perform AWS actions, audit all scripts and environment variables to ensure that none of them contain AWS credentials.\n\nWhere an Instance Application Contains Embedded Credentials:\n\n- Applications that run on an instance may also have credentials embedded. This is a bad practice, but even worse if the source code is stored in a public code repository such as github. When an application contains credentials can be determined by eliminating all other sources of credentials and if the application can still access AWS resources - it likely contains embedded credentials. Another method is to examine all source code and configuration files of the application.",
"RemediationProcedure": "IAM roles can only be associated at the launch of an instance. To remediate an instance to add it to a role you must create a new instance. If the instance has no external dependencies on its current private ip or public addresses are elastic IPs: 1. In AWS IAM create a new role. Assign a permissions policy if needed permissions are already known. 2. In the AWS console launch a new instance with identical settings to the existing instance, and ensure that the newly created role is selected. 3. Shutdown both the existing instance and the new instance. 4. Detach disks from both instances. 5. Attach the existing instance disks to the new instance. 6. Boot the new instance and you should have the same machine, but with the associated role. **Note:** if your environment has dependencies on a dynamically assigned PRIVATE IP address you can create an AMI from the existing instance, destroy the old one and then when launching from the AMI, manually assign the previous private IP address. **Note: **if your environment has dependencies on a dynamically assigned PUBLIC IP address there is not a way ensure the address is retained and assign an instance role. Dependencies on dynamically assigned public IP addresses are a bad practice and, if possible, you may wish to rebuild the instance with a new elastic IP address and make the investment to remediate affected systems while assigning the system to a role.",
"AuditProcedure": "Where an instance is associated with a Role: For instances that are known to perform AWS actions, ensure that they belong to an instance role that has the necessary permissions: 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Open the EC2 Dashboard and choose \"Instances\" 3. Click the EC2 instance that performs AWS actions, in the lower pane details find \"IAM Role\" 4. If the Role is blank, the instance is not assigned to one. 5. If the Role is filled in, it does not mean the instance might not \\*also\\* have credentials encoded on it for some activities. Where an Instance Contains Embedded Credentials: - On the instance that is known to perform AWS actions, audit all scripts and environment variables to ensure that none of them contain AWS credentials. Where an Instance Application Contains Embedded Credentials: - Applications that run on an instance may also have credentials embedded. This is a bad practice, but even worse if the source code is stored in a public code repository such as github. When an application contains credentials can be determined by eliminating all other sources of credentials and if the application can still access AWS resources - it likely contains embedded credentials. Another method is to examine all source code and configuration files of the application.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html"
}
@@ -225,11 +227,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates. \nUse IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.",
"Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates. Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.",
"RationaleStatement": "Removing expired SSL/TLS certificates eliminates the risk that an invalid certificate will be deployed accidentally to a resource such as AWS Elastic Load Balancer (ELB), which can damage the credibility of the application/website behind the ELB. As a best practice, it is recommended to delete expired certificates.",
"ImpactStatement": "Deleting the certificate could have implications for your application if you are using an expired server certificate with Elastic Load Balancing, CloudFront, etc.\nOne has to make configurations at respective services to ensure there is no interruption in application functionality.",
"RemediationProcedure": "**From Console:**\n\nRemoving expired certificates via AWS Management Console is not currently supported. To delete SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).\n\n**From Command Line:**\n\nTo delete Expired Certificate run following command by replacing <CERTIFICATE_NAME> with the name of the certificate to delete:\n\n```\naws iam delete-server-certificate --server-certificate-name <CERTIFICATE_NAME>\n```\n\nWhen the preceding command is successful, it does not return any output.",
"AuditProcedure": "**From Console:**\n\nGetting the certificates expiration information via AWS Management Console is not currently supported. \nTo request information about the SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).\n\n**From Command Line:**\n\nRun list-server-certificates command to list all the IAM-stored server certificates:\n\n```\naws iam list-server-certificates\n```\n\nThe command output should return an array that contains all the SSL/TLS certificates currently stored in IAM and their metadata (name, ID, expiration date, etc):\n\n```\n{\n \"ServerCertificateMetadataList\": [\n {\n \"ServerCertificateId\": \"EHDGFRW7EJFYTE88D\",\n \"ServerCertificateName\": \"MyServerCertificate\",\n \"Expiration\": \"2018-07-10T23:59:59Z\",\n \"Path\": \"/\",\n \"Arn\": \"arn:aws:iam::012345678910:server-certificate/MySSLCertificate\",\n \"UploadDate\": \"2018-06-10T11:56:08Z\"\n }\n ]\n}\n```\n\nVerify the `ServerCertificateName` and `Expiration` parameter value (expiration date) for each SSL/TLS certificate returned by the list-server-certificates command and determine if there are any expired server certificates currently stored in AWS IAM. If so, use the AWS API to remove them.\n\nIf this command returns:\n```\n{ { \"ServerCertificateMetadataList\": [] }\n```\nThis means that there are no expired certificates, It DOES NOT mean that no certificates exist.",
"ImpactStatement": "Deleting the certificate could have implications for your application if you are using an expired server certificate with Elastic Load Balancing, CloudFront, etc. One has to make configurations at respective services to ensure there is no interruption in application functionality.",
"RemediationProcedure": "**From Console:** Removing expired certificates via AWS Management Console is not currently supported. To delete SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI). **From Command Line:** To delete Expired Certificate run following command by replacing <CERTIFICATE_NAME> with the name of the certificate to delete: ``` aws iam delete-server-certificate --server-certificate-name <CERTIFICATE_NAME> ``` When the preceding command is successful, it does not return any output.",
"AuditProcedure": "**From Console:** Getting the certificates expiration information via AWS Management Console is not currently supported. To request information about the SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI). **From Command Line:** Run list-server-certificates command to list all the IAM-stored server certificates: ``` aws iam list-server-certificates ``` The command output should return an array that contains all the SSL/TLS certificates currently stored in IAM and their metadata (name, ID, expiration date, etc): ``` { \"ServerCertificateMetadataList\": [ { \"ServerCertificateId\": \"EHDGFRW7EJFYTE88D\", \"ServerCertificateName\": \"MyServerCertificate\", \"Expiration\": \"2018-07-10T23:59:59Z\", \"Path\": \"/\", \"Arn\": \"arn:aws:iam::012345678910:server-certificate/MySSLCertificate\", \"UploadDate\": \"2018-06-10T11:56:08Z\" } ] } ``` Verify the `ServerCertificateName` and `Expiration` parameter value (expiration date) for each SSL/TLS certificate returned by the list-server-certificates command and determine if there are any expired server certificates currently stored in AWS IAM. If so, use the AWS API to remove them. If this command returns: ``` { { \"ServerCertificateMetadataList\": [] } ``` This means that there are no expired certificates, It DOES NOT mean that no certificates exist.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html:https://docs.aws.amazon.com/cli/latest/reference/iam/delete-server-certificate.html"
}
@@ -249,8 +251,8 @@
"Description": "AWS provides customers with the option of specifying the contact information for account's security team. It is recommended that this information be provided.",
"RationaleStatement": "Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to establish security contact information:\n\n**From Console:**\n\n1. Click on your account name at the top right corner of the console.\n2. From the drop-down menu Click `My Account` \n3. Scroll down to the `Alternate Contacts` section\n4. Enter contact information in the `Security` section\n\n**Note:** Consider specifying an internal email distribution list to ensure emails are regularly monitored by more than one individual.",
"AuditProcedure": "Perform the following to determine if security contact information is present:\n\n**From Console:**\n\n1. Click on your account name at the top right corner of the console\n2. From the drop-down menu Click `My Account` \n3. Scroll down to the `Alternate Contacts` section\n4. Ensure contact information is specified in the `Security` section",
"RemediationProcedure": "Perform the following to establish security contact information: **From Console:** 1. Click on your account name at the top right corner of the console. 2. From the drop-down menu Click `My Account` 3. Scroll down to the `Alternate Contacts` section 4. Enter contact information in the `Security` section **Note:** Consider specifying an internal email distribution list to ensure emails are regularly monitored by more than one individual.",
"AuditProcedure": "Perform the following to determine if security contact information is present: **From Console:** 1. Click on your account name at the top right corner of the console 2. From the drop-down menu Click `My Account` 3. Scroll down to the `Alternate Contacts` section 4. Ensure contact information is specified in the `Security` section",
"AdditionalInformation": "",
"References": ""
}
@@ -267,11 +269,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable IAM Access analyzer for IAM policies about all resources in each region.\n\nIAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access.\nAccess Analyzer analyzes only policies that are applied to resources in the same AWS Region.",
"Description": "Enable IAM Access analyzer for IAM policies about all resources in each region. IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access. Access Analyzer analyzes only policies that are applied to resources in the same AWS Region.",
"RationaleStatement": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data. Access Analyzer identifies resources that are shared with external principals by using logic-based reasoning to analyze the resource-based policies in your AWS environment. IAM Access Analyzer continuously monitors all policies for S3 bucket, IAM roles, KMS(Key Management Service) keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\nPerform the following to enable IAM Access analyzer for IAM policies:\n\n1. Open the IAM console at `https://console.aws.amazon.com/iam/.`\n2. Choose `Access analyzer`.\n3. Choose `Create analyzer`.\n4. On the `Create analyzer` page, confirm that the `Region` displayed is the Region where you want to enable Access Analyzer.\n5. Enter a name for the analyzer. `Optional as it will generate a name for you automatically`.\n6. Add any tags that you want to apply to the analyzer. `Optional`. \n7. Choose `Create Analyzer`.\n8. Repeat these step for each active region\n\n**From Command Line:**\n\nRun the following command:\n```\naws accessanalyzer create-analyzer --analyzer-name <NAME> --type <ACCOUNT|ORGANIZATION>\n```\nRepeat this command above for each active region.\n\n**Note:** The IAM Access Analyzer is successfully configured only when the account you use has the necessary permissions.",
"AuditProcedure": "**From Console:**\n\n1. Open the IAM console at `https://console.aws.amazon.com/iam/`\n2. Choose `Access analyzer`\n3. Click 'Analyzers'\n4. Ensure that at least one analyzer is present\n5. Ensure that the `STATUS` is set to `Active`\n6. Repeat these step for each active region\n\n**From Command Line:**\n\n1. Run the following command:\n```\naws accessanalyzer list-analyzers | grep status\n```\n2. Ensure that at least one Analyzer the `status` is set to `ACTIVE`\n\n3. Repeat the steps above for each active region.\n\nIf an Access analyzer is not listed for each region or the status is not set to active refer to the remediation procedure below.",
"RemediationProcedure": "**From Console:** Perform the following to enable IAM Access analyzer for IAM policies: 1. Open the IAM console at `https://console.aws.amazon.com/iam/.` 2. Choose `Access analyzer`. 3. Choose `Create analyzer`. 4. On the `Create analyzer` page, confirm that the `Region` displayed is the Region where you want to enable Access Analyzer. 5. Enter a name for the analyzer. `Optional as it will generate a name for you automatically`. 6. Add any tags that you want to apply to the analyzer. `Optional`. 7. Choose `Create Analyzer`. 8. Repeat these step for each active region **From Command Line:** Run the following command: ``` aws accessanalyzer create-analyzer --analyzer-name <NAME> --type <ACCOUNT|ORGANIZATION> ``` Repeat this command above for each active region. **Note:** The IAM Access Analyzer is successfully configured only when the account you use has the necessary permissions.",
"AuditProcedure": "**From Console:** 1. Open the IAM console at `https://console.aws.amazon.com/iam/` 2. Choose `Access analyzer` 3. Click 'Analyzers' 4. Ensure that at least one analyzer is present 5. Ensure that the `STATUS` is set to `Active` 6. Repeat these step for each active region **From Command Line:** 1. Run the following command: ``` aws accessanalyzer list-analyzers | grep status ``` 2. Ensure that at least one Analyzer the `status` is set to `ACTIVE` 3. Repeat the steps above for each active region. If an Access analyzer is not listed for each region or the status is not set to active refer to the remediation procedure below.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/get-analyzer.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/create-analyzer.html"
}
@@ -292,7 +294,7 @@
"RationaleStatement": "Centralizing IAM user management to a single identity store reduces complexity and thus the likelihood of access management errors.",
"ImpactStatement": "",
"RemediationProcedure": "The remediation procedure will vary based on the individual organization's implementation of identity federation and/or AWS Organizations with the acceptance criteria that no non-service IAM users, and non-root accounts, are present outside the account providing centralized IAM user management.",
"AuditProcedure": "For multi-account AWS environments with an external identity provider... \n\n1. Determine the master account for identity federation or IAM user management\n2. Login to that account through the AWS Management Console\n3. Click `Services` \n4. Click `IAM` \n5. Click `Identity providers`\n6. Verify the configuration\n\nThen..., determine all accounts that should not have local users present. For each account...\n\n1. Determine all accounts that should not have local users present\n2. Log into the AWS Management Console\n3. Switch role into each identified account\n4. Click `Services` \n5. Click `IAM` \n6. Click `Users`\n7. Confirm that no IAM users representing individuals are present\n\nFor multi-account AWS environments implementing AWS Organizations without an external identity provider... \n\n1. Determine all accounts that should not have local users present\n2. Log into the AWS Management Console\n3. Switch role into each identified account\n4. Click `Services` \n5. Click `IAM` \n6. Click `Users`\n7. Confirm that no IAM users representing individuals are present",
"AuditProcedure": "For multi-account AWS environments with an external identity provider... 1. Determine the master account for identity federation or IAM user management 2. Login to that account through the AWS Management Console 3. Click `Services` 4. Click `IAM` 5. Click `Identity providers` 6. Verify the configuration Then..., determine all accounts that should not have local users present. For each account... 1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click `Services` 5. Click `IAM` 6. Click `Users` 7. Confirm that no IAM users representing individuals are present For multi-account AWS environments implementing AWS Organizations without an external identity provider... 1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click `Services` 5. Click `IAM` 6. Click `Users` 7. Confirm that no IAM users representing individuals are present",
"AdditionalInformation": "",
"References": ""
}
@@ -312,8 +314,8 @@
"Description": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established.",
"RationaleStatement": "When creating a new AWS account, a default super user is automatically created. This account is referred to as the 'root user' or 'root' account. It is recommended that the use of this account be limited and highly controlled. During events in which the 'root' password is no longer accessible or the MFA token associated with 'root' is lost/destroyed it is possible, through authentication using secret questions and associated answers, to recover 'root' user login access.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Login to the AWS Account as the 'root' user\n2. Click on the _<Root\\_Account\\_Name>_ from the top right of the console\n3. From the drop-down menu Click _My Account_\n4. Scroll down to the `Configure Security Questions` section\n5. Click on `Edit` \n6. Click on each `Question` \n - From the drop-down select an appropriate question\n - Click on the `Answer` section\n - Enter an appropriate answer \n - Follow process for all 3 questions\n7. Click `Update` when complete\n8. Save Questions and Answers and place in a secure physical location",
"AuditProcedure": "**From Console:**\n\n1. Login to the AWS account as the 'root' user\n2. On the top right you will see the _<Root\\_Account\\_Name>_\n3. Click on the _<Root\\_Account\\_Name>_\n4. From the drop-down menu Click `My Account` \n5. In the `Configure Security Challenge Questions` section on the `Personal Information` page, configure three security challenge questions.\n6. Click `Save questions` .",
"RemediationProcedure": "**From Console:** 1. Login to the AWS Account as the 'root' user 2. Click on the _<Root\\_Account\\_Name>_ from the top right of the console 3. From the drop-down menu Click _My Account_ 4. Scroll down to the `Configure Security Questions` section 5. Click on `Edit` 6. Click on each `Question` - From the drop-down select an appropriate question - Click on the `Answer` section - Enter an appropriate answer - Follow process for all 3 questions 7. Click `Update` when complete 8. Save Questions and Answers and place in a secure physical location",
"AuditProcedure": "**From Console:** 1. Login to the AWS account as the 'root' user 2. On the top right you will see the _<Root\\_Account\\_Name>_ 3. Click on the _<Root\\_Account\\_Name>_ 4. From the drop-down menu Click `My Account` 5. In the `Configure Security Challenge Questions` section on the `Personal Information` page, configure three security challenge questions. 6. Click `Save questions` .",
"AdditionalInformation": "",
"References": ""
}
@@ -333,8 +335,8 @@
"Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be removed.",
"RationaleStatement": "Removing access keys associated with the 'root' user account limits vectors by which the account can be compromised. Additionally, removing the 'root' access keys encourages the creation and use of role based accounts that are least privileged.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to delete or disable active 'root' user access keys\n\n**From Console:**\n\n1. Sign in to the AWS Management Console as 'root' and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. Click on _<Root\\_Account\\_Name>_ at the top right and select `My Security Credentials` from the drop down list\n3. On the pop out screen Click on `Continue to Security Credentials` \n4. Click on `Access Keys` _(Access Key ID and Secret Access Key)_\n5. Under the `Status` column if there are any Keys which are Active\n - Click on `Make Inactive` - (Temporarily disable Key - may be needed again)\n - Click `Delete` - (Deleted keys cannot be recovered)",
"AuditProcedure": "Perform the following to determine if the 'root' user account has access keys:\n\n**From Console:**\n\n1. Login to the AWS Management Console\n2. Click `Services` \n3. Click `IAM` \n4. Click on `Credential Report` \n5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file\n6. For the `<root_account>` user, ensure the `access_key_1_active` and `access_key_2_active` fields are set to `FALSE` .\n\n**From Command Line:**\n\nRun the following command:\n```\n aws iam get-account-summary | grep \"AccountAccessKeysPresent\" \n```\nIf no 'root' access keys exist the output will show \"AccountAccessKeysPresent\": 0,. \n\nIf the output shows a \"1\" than 'root' keys exist, refer to the remediation procedure below.",
"RemediationProcedure": "Perform the following to delete or disable active 'root' user access keys **From Console:** 1. Sign in to the AWS Management Console as 'root' and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. Click on _<Root\\_Account\\_Name>_ at the top right and select `My Security Credentials` from the drop down list 3. On the pop out screen Click on `Continue to Security Credentials` 4. Click on `Access Keys` _(Access Key ID and Secret Access Key)_ 5. Under the `Status` column if there are any Keys which are Active - Click on `Make Inactive` - (Temporarily disable Key - may be needed again) - Click `Delete` - (Deleted keys cannot be recovered)",
"AuditProcedure": "Perform the following to determine if the 'root' user account has access keys: **From Console:** 1. Login to the AWS Management Console 2. Click `Services` 3. Click `IAM` 4. Click on `Credential Report` 5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `<root_account>` user, ensure the `access_key_1_active` and `access_key_2_active` fields are set to `FALSE` . **From Command Line:** Run the following command: ``` aws iam get-account-summary | grep \"AccountAccessKeysPresent\" ``` If no 'root' access keys exist the output will show \"AccountAccessKeysPresent\": 0,. If the output shows a \"1\" than 'root' keys exist, refer to the remediation procedure below.",
"AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions is not enabled by default. However, on request to AWS support enables 'root' access only through access-keys (CLI, API methods) for us-gov cloud region.",
"References": "http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html:http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetAccountSummary.html:https://aws.amazon.com/blogs/security/an-easier-way-to-determine-the-presence-of-aws-account-access-keys/"
}
@@ -351,11 +353,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device.\n\n**Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.",
"Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device. **Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.",
"RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that emits a time-sensitive key and have knowledge of a credential.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to establish MFA for the 'root' user account:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n\n Note: to manage MFA devices for the 'root' AWS account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials.\n\n2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account.\n3. Choose `Activate MFA` \n4. In the wizard, choose `A virtual MFA` device and then choose `Next Step` .\n5. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes.\n6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see [Virtual MFA Applications](http://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications).) If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device).\n7. Determine whether the MFA app supports QR codes, and then do one of the following:\n\n - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.\n - In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application.\n\nWhen you are finished, the virtual MFA device starts generating one-time passwords.\n\nIn the Manage MFA Device wizard, in the Authentication Code 1 box, type the one-time password that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second one-time password into the Authentication Code 2 box. Choose Assign Virtual MFA.",
"AuditProcedure": "Perform the following to determine if the 'root' user account has MFA setup:\n\n**From Console:**\n\n1. Login to the AWS Management Console\n2. Click `Services` \n3. Click `IAM` \n4. Click on `Credential Report` \n5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file\n6. For the `<root_account>` user, ensure the `mfa_active` field is set to `TRUE` .\n\n**From Command Line:**\n\n1. Run the following command:\n```\n aws iam get-account-summary | grep \"AccountMFAEnabled\"\n```\n2. Ensure the AccountMFAEnabled property is set to 1",
"RemediationProcedure": "Perform the following to establish MFA for the 'root' user account: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the 'root' AWS account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials. 2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA` 4. In the wizard, choose `A virtual MFA` device and then choose `Next Step` . 5. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes. 6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see [Virtual MFA Applications](http://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications).) If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code. - In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application. When you are finished, the virtual MFA device starts generating one-time passwords. In the Manage MFA Device wizard, in the Authentication Code 1 box, type the one-time password that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second one-time password into the Authentication Code 2 box. Choose Assign Virtual MFA.",
"AuditProcedure": "Perform the following to determine if the 'root' user account has MFA setup: **From Console:** 1. Login to the AWS Management Console 2. Click `Services` 3. Click `IAM` 4. Click on `Credential Report` 5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `<root_account>` user, ensure the `mfa_active` field is set to `TRUE` . **From Command Line:** 1. Run the following command: ``` aws iam get-account-summary | grep \"AccountMFAEnabled\" ``` 2. Ensure the AccountMFAEnabled property is set to 1",
"AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions does not have console access. This recommendation is not applicable for us-gov cloud regions.",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html#enable-virt-mfa-for-root"
}
@@ -373,10 +375,10 @@
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the 'root' user account be protected with a hardware MFA.",
"RationaleStatement": "A hardware MFA has a smaller attack surface than a virtual MFA. For example, a hardware MFA does not suffer the attack surface introduced by the mobile smartphone on which a virtual MFA resides.\n\n**Note**: Using hardware MFA for many, many AWS accounts may create a logistical device management issue. If this is the case, consider implementing this Level 2 recommendation selectively to the highest security AWS accounts and the Level 1 recommendation applied to the remaining accounts.",
"RationaleStatement": "A hardware MFA has a smaller attack surface than a virtual MFA. For example, a hardware MFA does not suffer the attack surface introduced by the mobile smartphone on which a virtual MFA resides. **Note**: Using hardware MFA for many, many AWS accounts may create a logistical device management issue. If this is the case, consider implementing this Level 2 recommendation selectively to the highest security AWS accounts and the Level 1 recommendation applied to the remaining accounts.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to establish a hardware MFA for the 'root' user account:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\nNote: to manage MFA devices for the AWS 'root' user account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials.\n2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account.\n3. Choose `Activate MFA` \n4. In the wizard, choose `A hardware MFA` device and then choose `Next Step` .\n5. In the `Serial Number` box, enter the serial number that is found on the back of the MFA device.\n6. In the `Authentication Code 1` box, enter the six-digit number displayed by the MFA device. You might need to press the button on the front of the device to display the number.\n7. Wait 30 seconds while the device refreshes the code, and then enter the next six-digit number into the `Authentication Code 2` box. You might need to press the button on the front of the device again to display the second number.\n8. Choose `Next Step` . The MFA device is now associated with the AWS account. The next time you use your AWS account credentials to sign in, you must type a code from the hardware MFA device.\n\nRemediation for this recommendation is not available through AWS CLI.",
"AuditProcedure": "Perform the following to determine if the 'root' user account has a hardware MFA setup:\n\n1. Run the following command to determine if the 'root' account has MFA setup:\n```\n aws iam get-account-summary | grep \"AccountMFAEnabled\"\n```\n\nThe `AccountMFAEnabled` property is set to `1` will ensure that the 'root' user account has MFA (Virtual or Hardware) Enabled.\nIf `AccountMFAEnabled` property is set to `0` the account is not compliant with this recommendation.\n\n2. If `AccountMFAEnabled` property is set to `1`, determine 'root' account has Hardware MFA enabled.\nRun the following command to list all virtual MFA devices:\n```\n aws iam list-virtual-mfa-devices \n```\nIf the output contains one MFA with the following Serial Number, it means the MFA is virtual, not hardware and the account is not compliant with this recommendation:\n\n `\"SerialNumber\": \"arn:aws:iam::_<aws_account_number>_:mfa/root-account-mfa-device\"`",
"RemediationProcedure": "Perform the following to establish a hardware MFA for the 'root' user account: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the AWS 'root' user account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials. 2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA` 4. In the wizard, choose `A hardware MFA` device and then choose `Next Step` . 5. In the `Serial Number` box, enter the serial number that is found on the back of the MFA device. 6. In the `Authentication Code 1` box, enter the six-digit number displayed by the MFA device. You might need to press the button on the front of the device to display the number. 7. Wait 30 seconds while the device refreshes the code, and then enter the next six-digit number into the `Authentication Code 2` box. You might need to press the button on the front of the device again to display the second number. 8. Choose `Next Step` . The MFA device is now associated with the AWS account. The next time you use your AWS account credentials to sign in, you must type a code from the hardware MFA device. Remediation for this recommendation is not available through AWS CLI.",
"AuditProcedure": "Perform the following to determine if the 'root' user account has a hardware MFA setup: 1. Run the following command to determine if the 'root' account has MFA setup: ``` aws iam get-account-summary | grep \"AccountMFAEnabled\" ``` The `AccountMFAEnabled` property is set to `1` will ensure that the 'root' user account has MFA (Virtual or Hardware) Enabled. If `AccountMFAEnabled` property is set to `0` the account is not compliant with this recommendation. 2. If `AccountMFAEnabled` property is set to `1`, determine 'root' account has Hardware MFA enabled. Run the following command to list all virtual MFA devices: ``` aws iam list-virtual-mfa-devices ``` If the output contains one MFA with the following Serial Number, it means the MFA is virtual, not hardware and the account is not compliant with this recommendation: `\"SerialNumber\": \"arn:aws:iam::_<aws_account_number>_:mfa/root-account-mfa-device\"`",
"AdditionalInformation": "IAM User account 'root' for us-gov cloud regions does not have console access. This control is not applicable for us-gov cloud regions.",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html#enable-hw-mfa-for-root"
}
@@ -396,9 +398,9 @@
"Description": "With the creation of an AWS account, a 'root user' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.",
"RationaleStatement": "The 'root user' has unrestricted access to and control over all account resources. Use of it is inconsistent with the principles of least privilege and separation of duties, and can lead to unnecessary harm due to error or account compromise.",
"ImpactStatement": "",
"RemediationProcedure": "If you find that the 'root' user account is being used for daily activity to include administrative tasks that do not require the 'root' user:\n\n1. Change the 'root' user password.\n2. Deactivate or delete any access keys associate with the 'root' user.\n\n**Remember, anyone who has 'root' user credentials for your AWS account has unrestricted access to and control of all the resources in your account, including billing information.",
"AuditProcedure": "**From Console:**\n\n1. Login to the AWS Management Console at `https://console.aws.amazon.com/iam/`\n2. In the left pane, click `Credential Report`\n3. Click on `Download Report`\n4. Open of Save the file locally\n5. Locate the `<root account>` under the user column\n6. Review `password_last_used, access_key_1_last_used_date, access_key_2_last_used_date` to determine when the 'root user' was last used.\n\n**From Command Line:**\n\nRun the following CLI commands to provide a credential report for determining the last time the 'root user' was used:\n```\naws iam generate-credential-report\n```\n```\naws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,5,11,16 | grep -B1 '<root_account>'\n```\n\nReview `password_last_used`, `access_key_1_last_used_date`, `access_key_2_last_used_date` to determine when the _root user_ was last used.\n\n**Note:** There are a few conditions under which the use of the 'root' user account is required. Please see the reference links for all of the tasks that require use of the 'root' user.",
"AdditionalInformation": "The 'root' user for us-gov cloud regions is not enabled by default. However, on request to AWS support, they can enable the 'root' user and grant access only through access-keys (CLI, API methods) for us-gov cloud region. If the 'root' user for us-gov cloud regions is enabled, this recommendation is applicable.\n\nMonitoring usage of the 'root' user can be accomplished by implementing recommendation 3.3 Ensure a log metric filter and alarm exist for usage of the 'root' user.",
"RemediationProcedure": "If you find that the 'root' user account is being used for daily activity to include administrative tasks that do not require the 'root' user: 1. Change the 'root' user password. 2. Deactivate or delete any access keys associate with the 'root' user. **Remember, anyone who has 'root' user credentials for your AWS account has unrestricted access to and control of all the resources in your account, including billing information.",
"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console at `https://console.aws.amazon.com/iam/` 2. In the left pane, click `Credential Report` 3. Click on `Download Report` 4. Open of Save the file locally 5. Locate the `<root account>` under the user column 6. Review `password_last_used, access_key_1_last_used_date, access_key_2_last_used_date` to determine when the 'root user' was last used. **From Command Line:** Run the following CLI commands to provide a credential report for determining the last time the 'root user' was used: ``` aws iam generate-credential-report ``` ``` aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,5,11,16 | grep -B1 '<root_account>' ``` Review `password_last_used`, `access_key_1_last_used_date`, `access_key_2_last_used_date` to determine when the _root user_ was last used. **Note:** There are a few conditions under which the use of the 'root' user account is required. Please see the reference links for all of the tasks that require use of the 'root' user.",
"AdditionalInformation": "The 'root' user for us-gov cloud regions is not enabled by default. However, on request to AWS support, they can enable the 'root' user and grant access only through access-keys (CLI, API methods) for us-gov cloud region. If the 'root' user for us-gov cloud regions is enabled, this recommendation is applicable. Monitoring usage of the 'root' user can be accomplished by implementing recommendation 3.3 Ensure a log metric filter and alarm exist for usage of the 'root' user.",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html:https://docs.aws.amazon.com/general/latest/gr/aws_tasks-that-require-root.html"
}
]
@@ -417,8 +419,8 @@
"Description": "Password policies are, in part, used to enforce password complexity requirements. IAM password policies can be used to ensure password are at least a given length. It is recommended that the password policy require a minimum password length 14.",
"RationaleStatement": "Setting a password complexity policy increases account resiliency against brute force login attempts.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to set the password policy as prescribed:\n\n**From Console:**\n\n1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings)\n2. Go to IAM Service on the AWS Console\n3. Click on Account Settings on the Left Pane\n4. Set \"Minimum password length\" to `14` or greater.\n5. Click \"Apply password policy\"\n\n**From Command Line:**\n```\n aws iam update-account-password-policy --minimum-password-length 14\n```\nNote: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.",
"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:\n\n**From Console:**\n\n1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings)\n2. Go to IAM Service on the AWS Console\n3. Click on Account Settings on the Left Pane\n4. Ensure \"Minimum password length\" is set to 14 or greater.\n\n**From Command Line:**\n```\naws iam get-account-password-policy\n```\nEnsure the output of the above command includes \"MinimumPasswordLength\": 14 (or higher)",
"RemediationProcedure": "Perform the following to set the password policy as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Set \"Minimum password length\" to `14` or greater. 5. Click \"Apply password policy\" **From Command Line:** ``` aws iam update-account-password-policy --minimum-password-length 14 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.",
"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Minimum password length\" is set to 14 or greater. **From Command Line:** ``` aws iam get-account-password-policy ``` Ensure the output of the above command includes \"MinimumPasswordLength\": 14 (or higher)",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy"
}
@@ -438,8 +440,8 @@
"Description": "IAM password policies can prevent the reuse of a given password by the same user. It is recommended that the password policy prevent the reuse of passwords.",
"RationaleStatement": "Preventing password reuse increases account resiliency against brute force login attempts.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to set the password policy as prescribed:\n\n**From Console:**\n\n1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings)\n2. Go to IAM Service on the AWS Console\n3. Click on Account Settings on the Left Pane\n4. Check \"Prevent password reuse\"\n5. Set \"Number of passwords to remember\" is set to `24` \n\n**From Command Line:**\n```\n aws iam update-account-password-policy --password-reuse-prevention 24\n```\nNote: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.",
"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:\n\n**From Console:**\n\n1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings)\n2. Go to IAM Service on the AWS Console\n3. Click on Account Settings on the Left Pane\n4. Ensure \"Prevent password reuse\" is checked\n5. Ensure \"Number of passwords to remember\" is set to 24\n\n**From Command Line:**\n```\naws iam get-account-password-policy \n```\nEnsure the output of the above command includes \"PasswordReusePrevention\": 24",
"RemediationProcedure": "Perform the following to set the password policy as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Check \"Prevent password reuse\" 5. Set \"Number of passwords to remember\" is set to `24` **From Command Line:** ``` aws iam update-account-password-policy --password-reuse-prevention 24 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.",
"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Prevent password reuse\" is checked 5. Ensure \"Number of passwords to remember\" is set to 24 **From Command Line:** ``` aws iam get-account-password-policy ``` Ensure the output of the above command includes \"PasswordReusePrevention\": 24",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy"
}
@@ -459,8 +461,8 @@
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
"RationaleStatement": "Encrypting data at rest reduces the likelihood that it is unintentionally exposed and can nullify the impact of disclosure if the encryption remains unbroken.",
"ImpactStatement": "Amazon S3 buckets with default bucket encryption using SSE-KMS cannot be used as destination buckets for Amazon S3 server access logging. Only SSE-S3 default encryption is supported for server access log destination buckets.",
"RemediationProcedure": "**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Select a Bucket.\n3. Click on 'Properties'.\n4. Click edit on `Default Encryption`.\n5. Select either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`.\n6. Click `Save`\n7. Repeat for all the buckets in your AWS account lacking encryption.\n\n**From Command Line:**\n\nRun either \n```\naws s3api put-bucket-encryption --bucket <bucket name> --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"AES256\"}}]}'\n```\n or \n```\naws s3api put-bucket-encryption --bucket <bucket name> --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"aws:kms\",\"KMSMasterKeyID\": \"aws/s3\"}}]}'\n```\n\n**Note:** the KMSMasterKeyID can be set to the master key of your choosing; aws/s3 is an AWS preconfigured default.",
"AuditProcedure": "**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Select a Bucket.\n3. Click on 'Properties'.\n4. Verify that `Default Encryption` is enabled, and displays either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`.\n5. Repeat for all the buckets in your AWS account.\n\n**From Command Line:**\n\n1. Run command to list buckets\n```\naws s3 ls\n```\n2. For each bucket, run \n```\naws s3api get-bucket-encryption --bucket <bucket name>\n```\n3. Verify that either \n```\n\"SSEAlgorithm\": \"AES256\"\n```\n or \n```\n\"SSEAlgorithm\": \"aws:kms\"```\n is displayed.",
"RemediationProcedure": "**From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select a Bucket. 3. Click on 'Properties'. 4. Click edit on `Default Encryption`. 5. Select either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`. 6. Click `Save` 7. Repeat for all the buckets in your AWS account lacking encryption. **From Command Line:** Run either ``` aws s3api put-bucket-encryption --bucket <bucket name> --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"AES256\"}}]}' ``` or ``` aws s3api put-bucket-encryption --bucket <bucket name> --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"aws:kms\",\"KMSMasterKeyID\": \"aws/s3\"}}]}' ``` **Note:** the KMSMasterKeyID can be set to the master key of your choosing; aws/s3 is an AWS preconfigured default.",
"AuditProcedure": "**From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select a Bucket. 3. Click on 'Properties'. 4. Verify that `Default Encryption` is enabled, and displays either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`. 5. Repeat for all the buckets in your AWS account. **From Command Line:** 1. Run command to list buckets ``` aws s3 ls ``` 2. For each bucket, run ``` aws s3api get-bucket-encryption --bucket <bucket name> ``` 3. Verify that either ``` \"SSEAlgorithm\": \"AES256\" ``` or ``` \"SSEAlgorithm\": \"aws:kms\"``` is displayed.",
"AdditionalInformation": "S3 bucket encryption only applies to objects as they are placed in the bucket. Enabling S3 bucket encryption does **not** encrypt objects previously stored within the bucket.",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/default-bucket-encryption.html:https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-related-resources"
}
@@ -480,8 +482,8 @@
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
"RationaleStatement": "By default, Amazon S3 allows both HTTP and HTTPS requests. To achieve only allowing access to Amazon S3 objects through HTTPS you also have to explicitly deny access to HTTP requests. Bucket policies that allow HTTPS requests without explicitly denying HTTP requests will not comply with this recommendation.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/\n2. Select the Check box next to the Bucket.\n3. Click on 'Permissions'.\n4. Click 'Bucket Policy'\n5. Add this to the existing policy filling in the required information\n```\n{\n \"Sid\": <optional>\",\n \"Effect\": \"Deny\",\n \"Principal\": \"*\",\n \"Action\": \"s3:*\",\n \"Resource\": \"arn:aws:s3:::<bucket_name>/*\",\n \"Condition\": {\n \"Bool\": {\n \"aws:SecureTransport\": \"false\"\n }\n }\n }\n```\n6. Save\n7. Repeat for all the buckets in your AWS account that contain sensitive data.\n\n**From Console** \n\nusing AWS Policy Generator:\n\n1. Repeat steps 1-4 above.\n2. Click on `Policy Generator` at the bottom of the Bucket Policy Editor\n3. Select Policy Type\n`S3 Bucket Policy`\n4. Add Statements\n- `Effect` = Deny\n- `Principal` = *\n- `AWS Service` = Amazon S3\n- `Actions` = *\n- `Amazon Resource Name` = <ARN of the S3 Bucket>\n5. Generate Policy\n6. Copy the text and add it to the Bucket Policy.\n\n**From Command Line:**\n\n1. Export the bucket policy to a json file.\n```\naws s3api get-bucket-policy --bucket <bucket_name> --query Policy --output text > policy.json\n```\n\n2. Modify the policy.json file by adding in this statement:\n```\n{\n \"Sid\": <optional>\",\n \"Effect\": \"Deny\",\n \"Principal\": \"*\",\n \"Action\": \"s3:*\",\n \"Resource\": \"arn:aws:s3:::<bucket_name>/*\",\n \"Condition\": {\n \"Bool\": {\n \"aws:SecureTransport\": \"false\"\n }\n }\n }\n```\n3. Apply this modified policy back to the S3 bucket:\n```\naws s3api put-bucket-policy --bucket <bucket_name> --policy file://policy.json\n```",
"AuditProcedure": "To allow access to HTTPS you can use a condition that checks for the key `\"aws:SecureTransport: true\"`. This means that the request is sent through HTTPS but that HTTP can still be used. So to make sure you do not allow HTTP access confirm that there is a bucket policy that explicitly denies access for HTTP requests and that it contains the key \"aws:SecureTransport\": \"false\".\n\n**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/\n2. Select the Check box next to the Bucket.\n3. Click on 'Permissions', then Click on `Bucket Policy`.\n4. Ensure that a policy is listed that matches:\n```\n'{\n \"Sid\": <optional>,\n \"Effect\": \"Deny\",\n \"Principal\": \"*\",\n \"Action\": \"s3:*\",\n \"Resource\": \"arn:aws:s3:::<bucket_name>/*\",\n \"Condition\": {\n \"Bool\": {\n \"aws:SecureTransport\": \"false\"\n }'\n```\n`<optional>` and `<bucket_name>` will be specific to your account\n\n5. Repeat for all the buckets in your AWS account.\n\n**From Command Line:**\n\n1. List all of the S3 Buckets \n```\naws s3 ls\n```\n2. Using the list of buckets run this command on each of them:\n```\naws s3api get-bucket-policy --bucket <bucket_name> | grep aws:SecureTransport\n```\n3. Confirm that `aws:SecureTransport` is set to false `aws:SecureTransport:false`\n4. Confirm that the policy line has Effect set to Deny 'Effect:Deny'",
"RemediationProcedure": "**From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions'. 4. Click 'Bucket Policy' 5. Add this to the existing policy filling in the required information ``` { \"Sid\": <optional>\", \"Effect\": \"Deny\", \"Principal\": \"*\", \"Action\": \"s3:*\", \"Resource\": \"arn:aws:s3:::<bucket_name>/*\", \"Condition\": { \"Bool\": { \"aws:SecureTransport\": \"false\" } } } ``` 6. Save 7. Repeat for all the buckets in your AWS account that contain sensitive data. **From Console** using AWS Policy Generator: 1. Repeat steps 1-4 above. 2. Click on `Policy Generator` at the bottom of the Bucket Policy Editor 3. Select Policy Type `S3 Bucket Policy` 4. Add Statements - `Effect` = Deny - `Principal` = * - `AWS Service` = Amazon S3 - `Actions` = * - `Amazon Resource Name` = <ARN of the S3 Bucket> 5. Generate Policy 6. Copy the text and add it to the Bucket Policy. **From Command Line:** 1. Export the bucket policy to a json file. ``` aws s3api get-bucket-policy --bucket <bucket_name> --query Policy --output text > policy.json ``` 2. Modify the policy.json file by adding in this statement: ``` { \"Sid\": <optional>\", \"Effect\": \"Deny\", \"Principal\": \"*\", \"Action\": \"s3:*\", \"Resource\": \"arn:aws:s3:::<bucket_name>/*\", \"Condition\": { \"Bool\": { \"aws:SecureTransport\": \"false\" } } } ``` 3. Apply this modified policy back to the S3 bucket: ``` aws s3api put-bucket-policy --bucket <bucket_name> --policy file://policy.json ```",
"AuditProcedure": "To allow access to HTTPS you can use a condition that checks for the key `\"aws:SecureTransport: true\"`. This means that the request is sent through HTTPS but that HTTP can still be used. So to make sure you do not allow HTTP access confirm that there is a bucket policy that explicitly denies access for HTTP requests and that it contains the key \"aws:SecureTransport\": \"false\". **From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions', then Click on `Bucket Policy`. 4. Ensure that a policy is listed that matches: ``` '{ \"Sid\": <optional>, \"Effect\": \"Deny\", \"Principal\": \"*\", \"Action\": \"s3:*\", \"Resource\": \"arn:aws:s3:::<bucket_name>/*\", \"Condition\": { \"Bool\": { \"aws:SecureTransport\": \"false\" }' ``` `<optional>` and `<bucket_name>` will be specific to your account 5. Repeat for all the buckets in your AWS account. **From Command Line:** 1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Using the list of buckets run this command on each of them: ``` aws s3api get-bucket-policy --bucket <bucket_name> | grep aws:SecureTransport ``` 3. Confirm that `aws:SecureTransport` is set to false `aws:SecureTransport:false` 4. Confirm that the policy line has Effect set to Deny 'Effect:Deny'",
"AdditionalInformation": "",
"References": "https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-policy-for-config-rule/:https://aws.amazon.com/blogs/security/how-to-use-bucket-policies-and-apply-defense-in-depth-to-help-secure-your-amazon-s3-data/:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/get-bucket-policy.html"
}
@@ -501,8 +503,8 @@
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
"RationaleStatement": "Adding MFA delete to an S3 bucket, requires additional authentication when you change the version state of your bucket or you delete and object version adding another layer of security in the event your security credentials are compromised or unauthorized access is granted.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the steps below to enable MFA delete on an S3 bucket.\n\nNote:\n-You cannot enable MFA Delete using the AWS Management Console. You must use the AWS CLI or API.\n-You must use your 'root' account to enable MFA Delete on S3 buckets.\n\n**From Command line:**\n\n1. Run the s3api put-bucket-versioning command\n\n```\naws s3api put-bucket-versioning --profile my-root-profile --bucket Bucket_Name --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa “arn:aws:iam::aws_account_id:mfa/root-account-mfa-device passcode”\n```",
"AuditProcedure": "Perform the steps below to confirm MFA delete is configured on an S3 Bucket\n\n**From Console:**\n\n1. Login to the S3 console at `https://console.aws.amazon.com/s3/`\n\n2. Click the `Check` box next to the Bucket name you want to confirm\n\n3. In the window under `Properties`\n\n4. Confirm that Versioning is `Enabled`\n\n5. Confirm that MFA Delete is `Enabled`\n\n**From Command Line:**\n\n1. Run the `get-bucket-versioning`\n```\naws s3api get-bucket-versioning --bucket my-bucket\n```\n\nOutput example:\n```\n<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"> \n <Status>Enabled</Status>\n <MfaDelete>Enabled</MfaDelete> \n</VersioningConfiguration>\n```\n\nIf the Console or the CLI output does not show Versioning and MFA Delete `enabled` refer to the remediation below.",
"RemediationProcedure": "Perform the steps below to enable MFA delete on an S3 bucket. Note: -You cannot enable MFA Delete using the AWS Management Console. You must use the AWS CLI or API. -You must use your 'root' account to enable MFA Delete on S3 buckets. **From Command line:** 1. Run the s3api put-bucket-versioning command ``` aws s3api put-bucket-versioning --profile my-root-profile --bucket Bucket_Name --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa “arn:aws:iam::aws_account_id:mfa/root-account-mfa-device passcode” ```",
"AuditProcedure": "Perform the steps below to confirm MFA delete is configured on an S3 Bucket **From Console:** 1. Login to the S3 console at `https://console.aws.amazon.com/s3/` 2. Click the `Check` box next to the Bucket name you want to confirm 3. In the window under `Properties` 4. Confirm that Versioning is `Enabled` 5. Confirm that MFA Delete is `Enabled` **From Command Line:** 1. Run the `get-bucket-versioning` ``` aws s3api get-bucket-versioning --bucket my-bucket ``` Output example: ``` <VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"> <Status>Enabled</Status> <MfaDelete>Enabled</MfaDelete> </VersioningConfiguration> ``` If the Console or the CLI output does not show Versioning and MFA Delete `enabled` refer to the remediation below.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete:https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html:https://aws.amazon.com/blogs/security/securing-access-to-aws-using-mfa-part-3/:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_lost-or-broken.html"
}
@@ -520,10 +522,10 @@
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
"RationaleStatement": "Using a Cloud service or 3rd Party software to continuously monitor and automate the process of data discovery and classification for S3 buckets using machine learning and pattern matching is a strong defense in protecting that information.\n\nAmazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS.",
"RationaleStatement": "Using a Cloud service or 3rd Party software to continuously monitor and automate the process of data discovery and classification for S3 buckets using machine learning and pattern matching is a strong defense in protecting that information. Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS.",
"ImpactStatement": "There is a cost associated with using Amazon Macie. There is also typically a cost associated with 3rd Party tools that perform similar processes and protection.",
"RemediationProcedure": "Perform the steps below to enable and configure Amazon Macie\n\n**From Console:**\n\n1. Log on to the Macie console at `https://console.aws.amazon.com/macie/`\n\n2. Click `Get started`.\n\n3. Click `Enable Macie`.\n\nSetup a repository for sensitive data discovery results\n\n1. In the Left pane, under Settings, click `Discovery results`.\n\n2. Make sure `Create bucket` is selected.\n\n3. Create a bucket, enter a name for the bucket. The name must be unique across all S3 buckets. In addition, the name must start with a lowercase letter or a number.\n\n4. Click on `Advanced`.\n\n5. Block all public access, make sure `Yes` is selected.\n\n6. KMS encryption, specify the AWS KMS key that you want to use to encrypt the results. The key must be a symmetric, customer master key (CMK) that's in the same Region as the S3 bucket.\n\n7. Click on `Save`\n\nCreate a job to discover sensitive data\n\n1. In the left pane, click `S3 buckets`. Macie displays a list of all the S3 buckets for your account.\n\n2. Select the `check box` for each bucket that you want Macie to analyze as part of the job\n\n3. Click `Create job`.\n\n3. Click `Quick create`.\n\n4. For the Name and description step, enter a name and, optionally, a description of the job.\n\n5. Then click `Next`.\n\n6. For the Review and create step, click `Submit`.\n\nReview your findings\n\n1. In the left pane, click `Findings`.\n\n2. To view the details of a specific finding, choose any field other than the check box for the finding.\n\nIf you are using a 3rd Party tool to manage and protect your s3 data, follow the Vendor documentation for implementing and configuring that tool.",
"AuditProcedure": "Perform the following steps to determine if Macie is running:\n\n**From Console:**\n\n 1. Login to the Macie console at https://console.aws.amazon.com/macie/\n\n 2. In the left hand pane click on By job under findings.\n\n 3. Confirm that you have a Job setup for your S3 Buckets\n\nWhen you log into the Macie console if you aren't taken to the summary page and you don't have a job setup and running then refer to the remediation procedure below.\n\nIf you are using a 3rd Party tool to manage and protect your s3 data you meet this recommendation.",
"RemediationProcedure": "Perform the steps below to enable and configure Amazon Macie **From Console:** 1. Log on to the Macie console at `https://console.aws.amazon.com/macie/` 2. Click `Get started`. 3. Click `Enable Macie`. Setup a repository for sensitive data discovery results 1. In the Left pane, under Settings, click `Discovery results`. 2. Make sure `Create bucket` is selected. 3. Create a bucket, enter a name for the bucket. The name must be unique across all S3 buckets. In addition, the name must start with a lowercase letter or a number. 4. Click on `Advanced`. 5. Block all public access, make sure `Yes` is selected. 6. KMS encryption, specify the AWS KMS key that you want to use to encrypt the results. The key must be a symmetric, customer master key (CMK) that's in the same Region as the S3 bucket. 7. Click on `Save` Create a job to discover sensitive data 1. In the left pane, click `S3 buckets`. Macie displays a list of all the S3 buckets for your account. 2. Select the `check box` for each bucket that you want Macie to analyze as part of the job 3. Click `Create job`. 3. Click `Quick create`. 4. For the Name and description step, enter a name and, optionally, a description of the job. 5. Then click `Next`. 6. For the Review and create step, click `Submit`. Review your findings 1. In the left pane, click `Findings`. 2. To view the details of a specific finding, choose any field other than the check box for the finding. If you are using a 3rd Party tool to manage and protect your s3 data, follow the Vendor documentation for implementing and configuring that tool.",
"AuditProcedure": "Perform the following steps to determine if Macie is running: **From Console:** 1. Login to the Macie console at https://console.aws.amazon.com/macie/ 2. In the left hand pane click on By job under findings. 3. Confirm that you have a Job setup for your S3 Buckets When you log into the Macie console if you aren't taken to the summary page and you don't have a job setup and running then refer to the remediation procedure below. If you are using a 3rd Party tool to manage and protect your s3 data you meet this recommendation.",
"AdditionalInformation": "",
"References": "https://aws.amazon.com/macie/getting-started/:https://docs.aws.amazon.com/workspaces/latest/adminguide/data-protection.html:https://docs.aws.amazon.com/macie/latest/user/data-classification.html"
}
@@ -533,6 +535,7 @@
"Id": "2.1.5",
"Description": "Ensure that S3 Buckets are configured with 'Block public access (bucket settings)'",
"Checks": [
"s3_bucket_level_public_access_block",
"s3_account_level_public_access_blocks"
],
"Attributes": [
@@ -541,10 +544,10 @@
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
"RationaleStatement": "Amazon S3 `Block public access (bucket settings)` prevents the accidental or malicious public exposure of data contained within the respective bucket(s). \n\nAmazon S3 `Block public access (account settings)` prevents the accidental or malicious public exposure of data contained within all buckets of the respective AWS account.\n\nWhether blocking public access to all or some buckets is an organizational decision that should be based on data sensitivity, least privilege, and use case.",
"RationaleStatement": "Amazon S3 `Block public access (bucket settings)` prevents the accidental or malicious public exposure of data contained within the respective bucket(s). Amazon S3 `Block public access (account settings)` prevents the accidental or malicious public exposure of data contained within all buckets of the respective AWS account. Whether blocking public access to all or some buckets is an organizational decision that should be based on data sensitivity, least privilege, and use case.",
"ImpactStatement": "When you apply Block Public Access settings to an account, the settings apply to all AWS Regions globally. The settings might not take effect in all Regions immediately or simultaneously, but they eventually propagate to all Regions.",
"RemediationProcedure": "**If utilizing Block Public Access (bucket settings)**\n\n**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Select the Check box next to the Bucket.\n3. Click on 'Edit public access settings'.\n4. Click 'Block all public access'\n5. Repeat for all the buckets in your AWS account that contain sensitive data.\n\n**From Command Line:**\n\n1. List all of the S3 Buckets\n```\naws s3 ls\n```\n2. Set the Block Public Access to true on that bucket\n```\naws s3api put-public-access-block --bucket <name-of-bucket> --public-access-block-configuration \"BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true\"\n```\n\n**If utilizing Block Public Access (account settings)**\n\n**From Console:**\n\nIf the output reads `true` for the separate configuration settings then it is set on the account.\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Choose `Block Public Access (account settings)`\n3. Choose `Edit` to change the block public access settings for all the buckets in your AWS account\n4. Choose the settings you want to change, and then choose `Save`. For details about each setting, pause on the `i` icons.\n5. When you're asked for confirmation, enter `confirm`. Then Click `Confirm` to save your changes.\n\n**From Command Line:**\n\nTo set Block Public access settings for this account, run the following command:\n```\naws s3control put-public-access-block\n--public-access-block-configuration BlockPublicAcls=true, IgnorePublicAcls=true, BlockPublicPolicy=true, RestrictPublicBuckets=true\n--account-id <value>\n```",
"AuditProcedure": "**If utilizing Block Public Access (bucket settings)**\n\n**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Select the Check box next to the Bucket.\n3. Click on 'Edit public access settings'.\n4. Ensure that block public access settings are set appropriately for this bucket\n5. Repeat for all the buckets in your AWS account.\n\n**From Command Line:**\n\n1. List all of the S3 Buckets\n```\naws s3 ls\n```\n2. Find the public access setting on that bucket\n```\naws s3api get-public-access-block --bucket <name-of-the-bucket>\n```\nOutput if Block Public access is enabled:\n\n```\n{\n \"PublicAccessBlockConfiguration\": {\n \"BlockPublicAcls\": true,\n \"IgnorePublicAcls\": true,\n \"BlockPublicPolicy\": true,\n \"RestrictPublicBuckets\": true\n }\n}\n```\n\nIf the output reads `false` for the separate configuration settings then proceed to the remediation.\n\n**If utilizing Block Public Access (account settings)**\n\n**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Choose `Block public access (account settings)`\n3. Ensure that block public access settings are set appropriately for your AWS account.\n\n**From Command Line:**\n\nTo check Public access settings for this account status, run the following command,\n`aws s3control get-public-access-block --account-id <ACCT_ID> --region <REGION_NAME>`\n\nOutput if Block Public access is enabled:\n\n```\n{\n \"PublicAccessBlockConfiguration\": {\n \"IgnorePublicAcls\": true, \n \"BlockPublicPolicy\": true, \n \"BlockPublicAcls\": true, \n \"RestrictPublicBuckets\": true\n }\n}\n```\n\nIf the output reads `false` for the separate configuration settings then proceed to the remediation.",
"RemediationProcedure": "**If utilizing Block Public Access (bucket settings)** **From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Click 'Block all public access' 5. Repeat for all the buckets in your AWS account that contain sensitive data. **From Command Line:** 1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Set the Block Public Access to true on that bucket ``` aws s3api put-public-access-block --bucket <name-of-bucket> --public-access-block-configuration \"BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true\" ``` **If utilizing Block Public Access (account settings)** **From Console:** If the output reads `true` for the separate configuration settings then it is set on the account. 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Choose `Block Public Access (account settings)` 3. Choose `Edit` to change the block public access settings for all the buckets in your AWS account 4. Choose the settings you want to change, and then choose `Save`. For details about each setting, pause on the `i` icons. 5. When you're asked for confirmation, enter `confirm`. Then Click `Confirm` to save your changes. **From Command Line:** To set Block Public access settings for this account, run the following command: ``` aws s3control put-public-access-block --public-access-block-configuration BlockPublicAcls=true, IgnorePublicAcls=true, BlockPublicPolicy=true, RestrictPublicBuckets=true --account-id <value> ```",
"AuditProcedure": "**If utilizing Block Public Access (bucket settings)** **From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Ensure that block public access settings are set appropriately for this bucket 5. Repeat for all the buckets in your AWS account. **From Command Line:** 1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Find the public access setting on that bucket ``` aws s3api get-public-access-block --bucket <name-of-the-bucket> ``` Output if Block Public access is enabled: ``` { \"PublicAccessBlockConfiguration\": { \"BlockPublicAcls\": true, \"IgnorePublicAcls\": true, \"BlockPublicPolicy\": true, \"RestrictPublicBuckets\": true } } ``` If the output reads `false` for the separate configuration settings then proceed to the remediation. **If utilizing Block Public Access (account settings)** **From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Choose `Block public access (account settings)` 3. Ensure that block public access settings are set appropriately for your AWS account. **From Command Line:** To check Public access settings for this account status, run the following command, `aws s3control get-public-access-block --account-id <ACCT_ID> --region <REGION_NAME>` Output if Block Public access is enabled: ``` { \"PublicAccessBlockConfiguration\": { \"IgnorePublicAcls\": true, \"BlockPublicPolicy\": true, \"BlockPublicAcls\": true, \"RestrictPublicBuckets\": true } } ``` If the output reads `false` for the separate configuration settings then proceed to the remediation.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/block-public-access-account.html"
}
@@ -564,8 +567,8 @@
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
"RationaleStatement": "Encrypting data at rest reduces the likelihood that it is unintentionally exposed and can nullify the impact of disclosure if the encryption remains unbroken.",
"ImpactStatement": "Losing access or removing the KMS key in use by the EBS volumes will result in no longer being able to access the volumes.",
"RemediationProcedure": "**From Console:**\n\n1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/ \n2. Under `Account attributes`, click `EBS encryption`.\n3. Click `Manage`.\n4. Click the `Enable` checkbox.\n5. Click `Update EBS encryption`\n6. Repeat for every region requiring the change.\n\n**Note:** EBS volume encryption is configured per region.\n\n**From Command Line:**\n\n1. Run \n```\naws --region <region> ec2 enable-ebs-encryption-by-default\n```\n2. Verify that `\"EbsEncryptionByDefault\": true` is displayed.\n3. Repeat every region requiring the change.\n\n**Note:** EBS volume encryption is configured per region.",
"AuditProcedure": "**From Console:**\n\n1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/ \n2. Under `Account attributes`, click `EBS encryption`.\n3. Verify `Always encrypt new EBS volumes` displays `Enabled`.\n4. Review every region in-use.\n\n**Note:** EBS volume encryption is configured per region.\n\n**From Command Line:**\n\n1. Run \n```\naws --region <region> ec2 get-ebs-encryption-by-default\n```\n2. Verify that `\"EbsEncryptionByDefault\": true` is displayed.\n3. Review every region in-use.\n\n**Note:** EBS volume encryption is configured per region.",
"RemediationProcedure": "**From Console:** 1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/ 2. Under `Account attributes`, click `EBS encryption`. 3. Click `Manage`. 4. Click the `Enable` checkbox. 5. Click `Update EBS encryption` 6. Repeat for every region requiring the change. **Note:** EBS volume encryption is configured per region. **From Command Line:** 1. Run ``` aws --region <region> ec2 enable-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Repeat every region requiring the change. **Note:** EBS volume encryption is configured per region.",
"AuditProcedure": "**From Console:** 1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/ 2. Under `Account attributes`, click `EBS encryption`. 3. Verify `Always encrypt new EBS volumes` displays `Enabled`. 4. Review every region in-use. **Note:** EBS volume encryption is configured per region. **From Command Line:** 1. Run ``` aws --region <region> ec2 get-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Review every region in-use. **Note:** EBS volume encryption is configured per region.",
"AdditionalInformation": "Default EBS volume encryption only applies to newly created EBS volumes. Existing EBS volumes are **not** converted automatically.",
"References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html:https://aws.amazon.com/blogs/aws/new-opt-in-to-default-encryption-for-new-ebs-volumes/"
}
@@ -585,8 +588,8 @@
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
"RationaleStatement": "Databases are likely to hold sensitive and critical data, it is highly recommended to implement encryption in order to protect your data from unauthorized access or disclosure. With RDS encryption enabled, the data stored on the instance's underlying storage, the automated backups, read replicas, and snapshots, are all encrypted.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/.\n2. In the left navigation panel, click on `Databases`\n3. Select the Database instance that needs to be encrypted.\n4. Click on `Actions` button placed at the top right and select `Take Snapshot`.\n5. On the Take Snapshot page, enter a database name of which you want to take a snapshot in the `Snapshot Name` field and click on `Take Snapshot`.\n6. Select the newly created snapshot and click on the `Action` button placed at the top right and select `Copy snapshot` from the Action menu.\n7. On the Make Copy of DB Snapshot page, perform the following:\n\n- In the New DB Snapshot Identifier field, Enter a name for the `new snapshot`.\n- Check `Copy Tags`, New snapshot must have the same tags as the source snapshot.\n- Select `Yes` from the `Enable Encryption` dropdown list to enable encryption, You can choose to use the AWS default encryption key or custom key from Master Key dropdown list.\n\n8. Click `Copy Snapshot` to create an encrypted copy of the selected instance snapshot.\n9. Select the new Snapshot Encrypted Copy and click on the `Action` button placed at the top right and select `Restore Snapshot` button from the Action menu, This will restore the encrypted snapshot to a new database instance.\n10. On the Restore DB Instance page, enter a unique name for the new database instance in the DB Instance Identifier field.\n11. Review the instance configuration details and click `Restore DB Instance`.\n12. As the new instance provisioning process is completed can update application configuration to refer to the endpoint of the new Encrypted database instance Once the database endpoint is changed at the application level, can remove the unencrypted instance.\n\n**From Command Line:**\n\n1. Run `describe-db-instances` command to list all RDS database names available in the selected AWS region, The command output should return the database instance identifier.\n```\naws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier'\n```\n2. Run `create-db-snapshot` command to create a snapshot for the selected database instance, The command output will return the `new snapshot` with name DB Snapshot Name.\n```\naws rds create-db-snapshot --region <region-name> --db-snapshot-identifier <DB-Snapshot-Name> --db-instance-identifier <DB-Name>\n```\n3. Now run `list-aliases` command to list the KMS keys aliases available in a specified region, The command output should return each `key alias currently available`. For our RDS encryption activation process, locate the ID of the AWS default KMS key.\n```\naws kms list-aliases --region <region-name>\n```\n4. Run `copy-db-snapshot` command using the default KMS key ID for RDS instances returned earlier to create an encrypted copy of the database instance snapshot, The command output will return the `encrypted instance snapshot configuration`.\n```\naws rds copy-db-snapshot --region <region-name> --source-db-snapshot-identifier <DB-Snapshot-Name> --target-db-snapshot-identifier <DB-Snapshot-Name-Encrypted> --copy-tags --kms-key-id <KMS-ID-For-RDS>\n```\n5. Run `restore-db-instance-from-db-snapshot` command to restore the encrypted snapshot created at the previous step to a new database instance, If successful, the command output should return the new encrypted database instance configuration.\n```\naws rds restore-db-instance-from-db-snapshot --region <region-name> --db-instance-identifier <DB-Name-Encrypted> --db-snapshot-identifier <DB-Snapshot-Name-Encrypted>\n```\n6. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region, Output will return database instance identifier name Select encrypted database name that we just created DB-Name-Encrypted.\n```\naws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier'\n```\n7. Run again `describe-db-instances` command using the RDS instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True`.\n```\naws rds describe-db-instances --region <region-name> --db-instance-identifier <DB-Name-Encrypted> --query 'DBInstances[*].StorageEncrypted'\n```",
"AuditProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/\n2. In the navigation pane, under RDS dashboard, click `Databases`.\n3. Select the RDS Instance that you want to examine\n4. Click `Instance Name` to see details, then click on `Configuration` tab.\n5. Under Configuration Details section, In Storage pane search for the `Encryption Enabled` Status.\n6. If the current status is set to `Disabled`, Encryption is not enabled for the selected RDS Instance database instance.\n7. Repeat steps 3 to 7 to verify encryption status of other RDS Instance in same region.\n8. Change region from the top of the navigation bar and repeat audit for other regions.\n\n**From Command Line:**\n\n1. Run `describe-db-instances` command to list all RDS Instance database names, available in the selected AWS region, Output will return each Instance database identifier-name.\n ```\naws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier'\n```\n2. Run again `describe-db-instances` command using the RDS Instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True` Or `False`.\n```\naws rds describe-db-instances --region <region-name> --db-instance-identifier <DB-Name> --query 'DBInstances[*].StorageEncrypted'\n```\n3. If the StorageEncrypted parameter value is `False`, Encryption is not enabled for the selected RDS database instance.\n4. Repeat steps 1 to 3 for auditing each RDS Instance and change Region to verify for other regions",
"RemediationProcedure": "**From Console:** 1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases` 3. Select the Database instance that needs to be encrypted. 4. Click on `Actions` button placed at the top right and select `Take Snapshot`. 5. On the Take Snapshot page, enter a database name of which you want to take a snapshot in the `Snapshot Name` field and click on `Take Snapshot`. 6. Select the newly created snapshot and click on the `Action` button placed at the top right and select `Copy snapshot` from the Action menu. 7. On the Make Copy of DB Snapshot page, perform the following: - In the New DB Snapshot Identifier field, Enter a name for the `new snapshot`. - Check `Copy Tags`, New snapshot must have the same tags as the source snapshot. - Select `Yes` from the `Enable Encryption` dropdown list to enable encryption, You can choose to use the AWS default encryption key or custom key from Master Key dropdown list. 8. Click `Copy Snapshot` to create an encrypted copy of the selected instance snapshot. 9. Select the new Snapshot Encrypted Copy and click on the `Action` button placed at the top right and select `Restore Snapshot` button from the Action menu, This will restore the encrypted snapshot to a new database instance. 10. On the Restore DB Instance page, enter a unique name for the new database instance in the DB Instance Identifier field. 11. Review the instance configuration details and click `Restore DB Instance`. 12. As the new instance provisioning process is completed can update application configuration to refer to the endpoint of the new Encrypted database instance Once the database endpoint is changed at the application level, can remove the unencrypted instance. **From Command Line:** 1. Run `describe-db-instances` command to list all RDS database names available in the selected AWS region, The command output should return the database instance identifier. ``` aws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run `create-db-snapshot` command to create a snapshot for the selected database instance, The command output will return the `new snapshot` with name DB Snapshot Name. ``` aws rds create-db-snapshot --region <region-name> --db-snapshot-identifier <DB-Snapshot-Name> --db-instance-identifier <DB-Name> ``` 3. Now run `list-aliases` command to list the KMS keys aliases available in a specified region, The command output should return each `key alias currently available`. For our RDS encryption activation process, locate the ID of the AWS default KMS key. ``` aws kms list-aliases --region <region-name> ``` 4. Run `copy-db-snapshot` command using the default KMS key ID for RDS instances returned earlier to create an encrypted copy of the database instance snapshot, The command output will return the `encrypted instance snapshot configuration`. ``` aws rds copy-db-snapshot --region <region-name> --source-db-snapshot-identifier <DB-Snapshot-Name> --target-db-snapshot-identifier <DB-Snapshot-Name-Encrypted> --copy-tags --kms-key-id <KMS-ID-For-RDS> ``` 5. Run `restore-db-instance-from-db-snapshot` command to restore the encrypted snapshot created at the previous step to a new database instance, If successful, the command output should return the new encrypted database instance configuration. ``` aws rds restore-db-instance-from-db-snapshot --region <region-name> --db-instance-identifier <DB-Name-Encrypted> --db-snapshot-identifier <DB-Snapshot-Name-Encrypted> ``` 6. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region, Output will return database instance identifier name Select encrypted database name that we just created DB-Name-Encrypted. ``` aws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier' ``` 7. Run again `describe-db-instances` command using the RDS instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True`. ``` aws rds describe-db-instances --region <region-name> --db-instance-identifier <DB-Name-Encrypted> --query 'DBInstances[*].StorageEncrypted' ```",
"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/ 2. In the navigation pane, under RDS dashboard, click `Databases`. 3. Select the RDS Instance that you want to examine 4. Click `Instance Name` to see details, then click on `Configuration` tab. 5. Under Configuration Details section, In Storage pane search for the `Encryption Enabled` Status. 6. If the current status is set to `Disabled`, Encryption is not enabled for the selected RDS Instance database instance. 7. Repeat steps 3 to 7 to verify encryption status of other RDS Instance in same region. 8. Change region from the top of the navigation bar and repeat audit for other regions. **From Command Line:** 1. Run `describe-db-instances` command to list all RDS Instance database names, available in the selected AWS region, Output will return each Instance database identifier-name. ``` aws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run again `describe-db-instances` command using the RDS Instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True` Or `False`. ``` aws rds describe-db-instances --region <region-name> --db-instance-identifier <DB-Name> --query 'DBInstances[*].StorageEncrypted' ``` 3. If the StorageEncrypted parameter value is `False`, Encryption is not enabled for the selected RDS database instance. 4. Repeat steps 1 to 3 for auditing each RDS Instance and change Region to verify for other regions",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html:https://aws.amazon.com/blogs/database/selecting-the-right-encryption-options-for-amazon-rds-and-amazon-aurora-database-engines/#:~:text=With%20RDS%2Dencrypted%20resources%2C%20data,transparent%20to%20your%20database%20engine.:https://aws.amazon.com/rds/features/security/"
}
@@ -604,10 +607,10 @@
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail provides a history of AWS API calls for an account, including API calls made via the Management Console, SDKs, command line tools, and higher-level AWS services (such as CloudFormation).",
"RationaleStatement": "The AWS API call history produced by CloudTrail enables security analysis, resource change tracking, and compliance auditing. Additionally, \n\n- ensuring that a multi-regions trail exists will ensure that unexpected activity occurring in otherwise unused regions is detected\n\n- ensuring that a multi-regions trail exists will ensure that `Global Service Logging` is enabled for a trail by default to capture recording of events generated on \nAWS global services\n\n- for a multi-regions trail, ensuring that management events configured for all type of Read/Writes ensures recording of management operations that are performed on all resources in an AWS account",
"ImpactStatement": "S3 lifecycle features can be used to manage the accumulation and management of logs over time. See the following AWS resource for more information on these features:\n\n1. https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html",
"RemediationProcedure": "Perform the following to enable global (Multi-region) CloudTrail logging:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. Click on _Trails_ on the left navigation pane\n3. Click `Get Started Now` , if presented\n - Click `Add new trail` \n - Enter a trail name in the `Trail name` box\n - Set the `Apply trail to all regions` option to `Yes` \n - Specify an S3 bucket name in the `S3 bucket` box\n - Click `Create` \n4. If 1 or more trails already exist, select the target trail to enable for global logging\n5. Click the edit icon (pencil) next to `Apply trail to all regions` , Click `Yes` and Click `Save`.\n6. Click the edit icon (pencil) next to `Management Events` click `All` for setting `Read/Write Events` and Click `Save`.\n\n**From Command Line:**\n```\naws cloudtrail create-trail --name <trail_name> --bucket-name <s3_bucket_for_cloudtrail> --is-multi-region-trail \naws cloudtrail update-trail --name <trail_name> --is-multi-region-trail\n```\n\nNote: Creating CloudTrail via CLI without providing any overriding options configures `Management Events` to set `All` type of `Read/Writes` by default.",
"AuditProcedure": "Perform the following to determine if CloudTrail is enabled for all regions:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. Click on `Trails` on the left navigation pane\n - You will be presented with a list of trails across all regions\n3. Ensure at least one Trail has `All` specified in the `Region` column\n4. Click on a trail via the link in the _Name_ column\n5. Ensure `Logging` is set to `ON` \n6. Ensure `Apply trail to all regions` is set to `Yes`\n7. In section `Management Events` ensure `Read/Write Events` set to `ALL`\n\n**From Command Line:**\n```\n aws cloudtrail describe-trails\n```\nEnsure `IsMultiRegionTrail` is set to `true` \n```\naws cloudtrail get-trail-status --name <trailname shown in describe-trails>\n```\nEnsure `IsLogging` is set to `true`\n```\naws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>\n```\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`",
"RationaleStatement": "The AWS API call history produced by CloudTrail enables security analysis, resource change tracking, and compliance auditing. Additionally, - ensuring that a multi-regions trail exists will ensure that unexpected activity occurring in otherwise unused regions is detected - ensuring that a multi-regions trail exists will ensure that `Global Service Logging` is enabled for a trail by default to capture recording of events generated on AWS global services - for a multi-regions trail, ensuring that management events configured for all type of Read/Writes ensures recording of management operations that are performed on all resources in an AWS account",
"ImpactStatement": "S3 lifecycle features can be used to manage the accumulation and management of logs over time. See the following AWS resource for more information on these features: 1. https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html",
"RemediationProcedure": "Perform the following to enable global (Multi-region) CloudTrail logging: **From Console:** 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on _Trails_ on the left navigation pane 3. Click `Get Started Now` , if presented - Click `Add new trail` - Enter a trail name in the `Trail name` box - Set the `Apply trail to all regions` option to `Yes` - Specify an S3 bucket name in the `S3 bucket` box - Click `Create` 4. If 1 or more trails already exist, select the target trail to enable for global logging 5. Click the edit icon (pencil) next to `Apply trail to all regions` , Click `Yes` and Click `Save`. 6. Click the edit icon (pencil) next to `Management Events` click `All` for setting `Read/Write Events` and Click `Save`. **From Command Line:** ``` aws cloudtrail create-trail --name <trail_name> --bucket-name <s3_bucket_for_cloudtrail> --is-multi-region-trail aws cloudtrail update-trail --name <trail_name> --is-multi-region-trail ``` Note: Creating CloudTrail via CLI without providing any overriding options configures `Management Events` to set `All` type of `Read/Writes` by default.",
"AuditProcedure": "Perform the following to determine if CloudTrail is enabled for all regions: **From Console:** 1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane - You will be presented with a list of trails across all regions 3. Ensure at least one Trail has `All` specified in the `Region` column 4. Click on a trail via the link in the _Name_ column 5. Ensure `Logging` is set to `ON` 6. Ensure `Apply trail to all regions` is set to `Yes` 7. In section `Management Events` ensure `Read/Write Events` set to `ALL` **From Command Line:** ``` aws cloudtrail describe-trails ``` Ensure `IsMultiRegionTrail` is set to `true` ``` aws cloudtrail get-trail-status --name <trailname shown in describe-trails> ``` Ensure `IsLogging` is set to `true` ``` aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails> ``` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html?icmpid=docs_cloudtrail_console#logging-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-services.html#cloud-trail-supported-services-data-events"
}
@@ -627,8 +630,8 @@
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
"RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity within your S3 Buckets using Amazon CloudWatch Events.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/`\n2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine.\n3. Click `Properties` tab to see in detail bucket configuration.\n4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/`\n5. Once the Cloudtrail is selected, check the `Write` event checkbox, so that `object-level` logging for Write events is enabled.\n6. Repeat steps 2 to 5 to enable object-level logging of write events for other S3 buckets.\n\n**From Command Line:**\n\n1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier:\n```\naws cloudtrail put-event-selectors --region <region-name> --trail-name <trail-name> --event-selectors '[{ \"ReadWriteType\": \"WriteOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::<s3-bucket-name>/\"] }] }]'\n```\n2. The command output will be `object-level` event trail configuration.\n3. If you want to enable it for all buckets at once then change Values parameter to `[\"arn:aws:s3\"]` in command given above.\n4. Repeat step 1 for each s3 bucket to update `object-level` logging of write events.\n5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.",
"AuditProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and navigate to CloudTrail dashboard at `https://console.aws.amazon.com/cloudtrail/`\n2. In the left panel, click `Trails` and then click on the CloudTrail Name that you want to examine.\n3. Review `General details`\n4. Confirm that `Multi-region trail` is set to `Yes`\n5. Scroll down to `Data events`\n6. Confirm that it reads:\nData events: S3\nBucket Name: All current and future S3 buckets\nRead: Enabled\nWrite: Enabled\n7. Repeat steps 2 to 6 to verify that Multi-region trail and Data events logging of S3 buckets in CloudTrail.\nIf the CloudTrails do not have multi-region and data events configured for S3 refer to the remediation below.\n\n**From Command Line:**\n\n1. Run `list-trails` command to list the names of all Amazon CloudTrail trails currently available in all AWS regions:\n```\naws cloudtrail list-trails\n```\n2. The command output will be a list of all the trail names to include.\n\"TrailARN\": \"arn:aws:cloudtrail:<region>:<account#>:trail/<trailname>\",\n\"Name\": \"<trailname>\",\n\"HomeRegion\": \"<region>\"\n3. Next run 'get-trail- command to determine Multi-region.\n```\naws cloudtrail get-trail --name <trailname> --region <region_name>\n```\n4. The command output should include:\n\"IsMultiRegionTrail\": true,\n5. Next run `get-event-selectors` command using the `Name` of the trail and the `region` returned in step 2 to determine if Data events logging feature is enabled within the selected CloudTrail trail for all S3 buckets:\n```\naws cloudtrail get-event-selectors --region <HomeRegion> --trail-name <trailname> --query EventSelectors[*].DataResources[]\n```\n6. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector.\n\"Type\": \"AWS::S3::Object\",\n \"Values\": [\n \"arn:aws:s3\"\n7. If the `get-event-selectors` command returns an empty array '[]', the Data events are not included in the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded.\n8. Repeat steps 1 to 5 for auditing each CloudTrail to determine if Data events for S3 are covered.\nIf Multi-region is not set to true and the Data events does not show S3 defined as shown refer to the remediation procedure below.",
"RemediationProcedure": "**From Console:** 1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, check the `Write` event checkbox, so that `object-level` logging for Write events is enabled. 6. Repeat steps 2 to 5 to enable object-level logging of write events for other S3 buckets. **From Command Line:** 1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier: ``` aws cloudtrail put-event-selectors --region <region-name> --trail-name <trail-name> --event-selectors '[{ \"ReadWriteType\": \"WriteOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::<s3-bucket-name>/\"] }] }]' ``` 2. The command output will be `object-level` event trail configuration. 3. If you want to enable it for all buckets at once then change Values parameter to `[\"arn:aws:s3\"]` in command given above. 4. Repeat step 1 for each s3 bucket to update `object-level` logging of write events. 5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.",
"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console and navigate to CloudTrail dashboard at `https://console.aws.amazon.com/cloudtrail/` 2. In the left panel, click `Trails` and then click on the CloudTrail Name that you want to examine. 3. Review `General details` 4. Confirm that `Multi-region trail` is set to `Yes` 5. Scroll down to `Data events` 6. Confirm that it reads: Data events: S3 Bucket Name: All current and future S3 buckets Read: Enabled Write: Enabled 7. Repeat steps 2 to 6 to verify that Multi-region trail and Data events logging of S3 buckets in CloudTrail. If the CloudTrails do not have multi-region and data events configured for S3 refer to the remediation below. **From Command Line:** 1. Run `list-trails` command to list the names of all Amazon CloudTrail trails currently available in all AWS regions: ``` aws cloudtrail list-trails ``` 2. The command output will be a list of all the trail names to include. \"TrailARN\": \"arn:aws:cloudtrail:<region>:<account#>:trail/<trailname>\", \"Name\": \"<trailname>\", \"HomeRegion\": \"<region>\" 3. Next run 'get-trail- command to determine Multi-region. ``` aws cloudtrail get-trail --name <trailname> --region <region_name> ``` 4. The command output should include: \"IsMultiRegionTrail\": true, 5. Next run `get-event-selectors` command using the `Name` of the trail and the `region` returned in step 2 to determine if Data events logging feature is enabled within the selected CloudTrail trail for all S3 buckets: ``` aws cloudtrail get-event-selectors --region <HomeRegion> --trail-name <trailname> --query EventSelectors[*].DataResources[] ``` 6. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. \"Type\": \"AWS::S3::Object\", \"Values\": [ \"arn:aws:s3\" 7. If the `get-event-selectors` command returns an empty array '[]', the Data events are not included in the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. 8. Repeat steps 1 to 5 for auditing each CloudTrail to determine if Data events for S3 are covered. If Multi-region is not set to true and the Data events does not show S3 defined as shown refer to the remediation procedure below.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html"
}
@@ -648,8 +651,8 @@
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
"RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity using Amazon CloudWatch Events.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/`\n2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine.\n3. Click `Properties` tab to see in detail bucket configuration.\n4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/`\n5. Once the Cloudtrail is selected, check the Read event checkbox, so that `object-level` logging for `Read` events is enabled.\n6. Repeat steps 2 to 5 to enable `object-level` logging of read events for other S3 buckets.\n\n**From Command Line:**\n1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier:\n```\naws cloudtrail put-event-selectors --region <region-name> --trail-name <trail-name> --event-selectors '[{ \"ReadWriteType\": \"ReadOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::<s3-bucket-name>/\"] }] }]'\n```\n2. The command output will be `object-level` event trail configuration.\n3. If you want to enable it for all buckets at ones then change Values parameter to `[\"arn:aws:s3\"]` in command given above.\n4. Repeat step 1 for each s3 bucket to update `object-level` logging of read events.\n5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.",
"AuditProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/`\n2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine.\n3. Click `Properties` tab to see in detail bucket configuration.\n4. If the current status for `Object-level` logging is set to `Disabled`, then object-level logging of read events for the selected s3 bucket is not set.\n5. If the current status for `Object-level` logging is set to `Enabled`, but the Read event check-box is unchecked, then object-level logging of read events for the selected s3 bucket is not set.\n6. Repeat steps 2 to 5 to verify `object-level` logging for `read` events of your other S3 buckets.\n\n**From Command Line:**\n1. Run `describe-trails` command to list the names of all Amazon CloudTrail trails currently available in the selected AWS region:\n```\naws cloudtrail describe-trails --region <region-name> --output table --query trailList[*].Name\n```\n2. The command output will be table of the requested trail names.\n3. Run `get-event-selectors` command using the name of the trail returned at the previous step and custom query filters to determine if Data events logging feature is enabled within the selected CloudTrail trail configuration for s3 bucket resources:\n```\naws cloudtrail get-event-selectors --region <region-name> --trail-name <trail-name> --query EventSelectors[*].DataResources[]\n```\n4. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector.\n5. If the `get-event-selectors` command returns an empty array, the Data events are not included into the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded.\n6. Repeat steps 1 to 5 for auditing each s3 bucket to identify other trails that are missing the capability to log Data events.\n7. Change the AWS region by updating the `--region` command parameter and perform the audit process for other regions.",
"RemediationProcedure": "**From Console:** 1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, check the Read event checkbox, so that `object-level` logging for `Read` events is enabled. 6. Repeat steps 2 to 5 to enable `object-level` logging of read events for other S3 buckets. **From Command Line:** 1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier: ``` aws cloudtrail put-event-selectors --region <region-name> --trail-name <trail-name> --event-selectors '[{ \"ReadWriteType\": \"ReadOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::<s3-bucket-name>/\"] }] }]' ``` 2. The command output will be `object-level` event trail configuration. 3. If you want to enable it for all buckets at ones then change Values parameter to `[\"arn:aws:s3\"]` in command given above. 4. Repeat step 1 for each s3 bucket to update `object-level` logging of read events. 5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.",
"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. If the current status for `Object-level` logging is set to `Disabled`, then object-level logging of read events for the selected s3 bucket is not set. 5. If the current status for `Object-level` logging is set to `Enabled`, but the Read event check-box is unchecked, then object-level logging of read events for the selected s3 bucket is not set. 6. Repeat steps 2 to 5 to verify `object-level` logging for `read` events of your other S3 buckets. **From Command Line:** 1. Run `describe-trails` command to list the names of all Amazon CloudTrail trails currently available in the selected AWS region: ``` aws cloudtrail describe-trails --region <region-name> --output table --query trailList[*].Name ``` 2. The command output will be table of the requested trail names. 3. Run `get-event-selectors` command using the name of the trail returned at the previous step and custom query filters to determine if Data events logging feature is enabled within the selected CloudTrail trail configuration for s3 bucket resources: ``` aws cloudtrail get-event-selectors --region <region-name> --trail-name <trail-name> --query EventSelectors[*].DataResources[] ``` 4. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. 5. If the `get-event-selectors` command returns an empty array, the Data events are not included into the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. 6. Repeat steps 1 to 5 for auditing each s3 bucket to identify other trails that are missing the capability to log Data events. 7. Change the AWS region by updating the `--region` command parameter and perform the audit process for other regions.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html"
}
@@ -669,8 +672,8 @@
"Description": "CloudTrail log file validation creates a digitally signed digest file containing a hash of each log that CloudTrail writes to S3. These digest files can be used to determine whether a log file was changed, deleted, or unchanged after CloudTrail delivered the log. It is recommended that file validation be enabled on all CloudTrails.",
"RationaleStatement": "Enabling log file validation will provide additional integrity checking of CloudTrail logs.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to enable log file validation on a given trail:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. Click on `Trails` on the left navigation pane\n3. Click on target trail\n4. Within the `General details` section click `edit`\n5. Under the `Advanced settings` section\n6. Check the enable box under `Log file validation` \n7. Click `Save changes` \n\n**From Command Line:**\n```\naws cloudtrail update-trail --name <trail_name> --enable-log-file-validation\n```\nNote that periodic validation of logs using these digests can be performed by running the following command:\n```\naws cloudtrail validate-logs --trail-arn <trail_arn> --start-time <start_time> --end-time <end_time>\n```",
"AuditProcedure": "Perform the following on each trail to determine if log file validation is enabled:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. Click on `Trails` on the left navigation pane\n3. For Every Trail:\n- Click on a trail via the link in the _Name_ column\n- Under the `General details` section, ensure `Log file validation` is set to `Enabled` \n\n**From Command Line:**\n```\naws cloudtrail describe-trails\n```\nEnsure `LogFileValidationEnabled` is set to `true` for each trail",
"RemediationProcedure": "Perform the following to enable log file validation on a given trail: **From Console:** 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. Click on target trail 4. Within the `General details` section click `edit` 5. Under the `Advanced settings` section 6. Check the enable box under `Log file validation` 7. Click `Save changes` **From Command Line:** ``` aws cloudtrail update-trail --name <trail_name> --enable-log-file-validation ``` Note that periodic validation of logs using these digests can be performed by running the following command: ``` aws cloudtrail validate-logs --trail-arn <trail_arn> --start-time <start_time> --end-time <end_time> ```",
"AuditProcedure": "Perform the following on each trail to determine if log file validation is enabled: **From Console:** 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. For Every Trail: - Click on a trail via the link in the _Name_ column - Under the `General details` section, ensure `Log file validation` is set to `Enabled` **From Command Line:** ``` aws cloudtrail describe-trails ``` Ensure `LogFileValidationEnabled` is set to `true` for each trail",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-enabling.html"
}
@@ -690,8 +693,8 @@
"Description": "CloudTrail logs a record of every API call made in your AWS account. These logs file are stored in an S3 bucket. It is recommended that the bucket policy or access control list (ACL) applied to the S3 bucket that CloudTrail logs to prevent public access to the CloudTrail logs.",
"RationaleStatement": "Allowing public access to CloudTrail log content may aid an adversary in identifying weaknesses in the affected account's use or configuration.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to remove any public access that has been granted to the bucket via an ACL or S3 bucket policy:\n\n1. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home)\n2. Right-click on the bucket and click Properties\n3. In the `Properties` pane, click the `Permissions` tab.\n4. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted.\n5. Select the row that grants permission to `Everyone` or `Any Authenticated User` \n6. Uncheck all the permissions granted to `Everyone` or `Any Authenticated User` (click `x` to delete the row).\n7. Click `Save` to save the ACL.\n8. If the `Edit bucket policy` button is present, click it.\n9. Remove any `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}.",
"AuditProcedure": "Perform the following to determine if any public access is granted to an S3 bucket via an ACL or S3 bucket policy:\n\n**From Console:**\n\n1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home)\n2. In the `API activity history` pane on the left, click `Trails` \n3. In the `Trails` pane, note the bucket names in the `S3 bucket` column\n4. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home)\n5. For each bucket noted in step 3, right-click on the bucket and click `Properties` \n6. In the `Properties` pane, click the `Permissions` tab.\n7. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted.\n8. Ensure no rows exists that have the `Grantee` set to `Everyone` or the `Grantee` set to `Any Authenticated User.` \n9. If the `Edit bucket policy` button is present, click it to review the bucket policy.\n10. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}\n\n**From Command Line:**\n\n1. Get the name of the S3 bucket that CloudTrail is logging to:\n```\n aws cloudtrail describe-trails --query 'trailList[*].S3BucketName'\n```\n2. Ensure the `AllUsers` principal is not granted privileges to that `<bucket>` :\n```\n aws s3api get-bucket-acl --bucket <s3_bucket_for_cloudtrail> --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/AllUsers` ]'\n```\n3. Ensure the `AuthenticatedUsers` principal is not granted privileges to that `<bucket>`:\n```\n aws s3api get-bucket-acl --bucket <s3_bucket_for_cloudtrail> --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/Authenticated Users` ]'\n```\n4. Get the S3 Bucket Policy\n```\n aws s3api get-bucket-policy --bucket <s3_bucket_for_cloudtrail> \n```\n5. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}\n\n**Note:** Principal set to \"\\*\" or {\"AWS\" : \"\\*\"} allows anonymous access.",
"RemediationProcedure": "Perform the following to remove any public access that has been granted to the bucket via an ACL or S3 bucket policy: 1. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home) 2. Right-click on the bucket and click Properties 3. In the `Properties` pane, click the `Permissions` tab. 4. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. 5. Select the row that grants permission to `Everyone` or `Any Authenticated User` 6. Uncheck all the permissions granted to `Everyone` or `Any Authenticated User` (click `x` to delete the row). 7. Click `Save` to save the ACL. 8. If the `Edit bucket policy` button is present, click it. 9. Remove any `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}.",
"AuditProcedure": "Perform the following to determine if any public access is granted to an S3 bucket via an ACL or S3 bucket policy: **From Console:** 1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home) 2. In the `API activity history` pane on the left, click `Trails` 3. In the `Trails` pane, note the bucket names in the `S3 bucket` column 4. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home) 5. For each bucket noted in step 3, right-click on the bucket and click `Properties` 6. In the `Properties` pane, click the `Permissions` tab. 7. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. 8. Ensure no rows exists that have the `Grantee` set to `Everyone` or the `Grantee` set to `Any Authenticated User.` 9. If the `Edit bucket policy` button is present, click it to review the bucket policy. 10. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"} **From Command Line:** 1. Get the name of the S3 bucket that CloudTrail is logging to: ``` aws cloudtrail describe-trails --query 'trailList[*].S3BucketName' ``` 2. Ensure the `AllUsers` principal is not granted privileges to that `<bucket>` : ``` aws s3api get-bucket-acl --bucket <s3_bucket_for_cloudtrail> --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/AllUsers` ]' ``` 3. Ensure the `AuthenticatedUsers` principal is not granted privileges to that `<bucket>`: ``` aws s3api get-bucket-acl --bucket <s3_bucket_for_cloudtrail> --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/Authenticated Users` ]' ``` 4. Get the S3 Bucket Policy ``` aws s3api get-bucket-policy --bucket <s3_bucket_for_cloudtrail> ``` 5. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"} **Note:** Principal set to \"\\*\" or {\"AWS\" : \"\\*\"} allows anonymous access.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html"
}
@@ -708,11 +711,11 @@
"Section": "3. Logging",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "AWS CloudTrail is a web service that records AWS API calls made in a given AWS account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs within a specified S3 bucket for long term analysis, realtime analysis can be performed by configuring CloudTrail to send logs to CloudWatch Logs. For a trail that is enabled in all regions in an account, CloudTrail sends log files from all those regions to a CloudWatch Logs log group. It is recommended that CloudTrail logs be sent to CloudWatch Logs.\n\nNote: The intent of this recommendation is to ensure AWS account activity is being captured, monitored, and appropriately alarmed on. CloudWatch Logs is a native way to accomplish this using AWS services but does not preclude the use of an alternate solution.",
"Description": "AWS CloudTrail is a web service that records AWS API calls made in a given AWS account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs within a specified S3 bucket for long term analysis, realtime analysis can be performed by configuring CloudTrail to send logs to CloudWatch Logs. For a trail that is enabled in all regions in an account, CloudTrail sends log files from all those regions to a CloudWatch Logs log group. It is recommended that CloudTrail logs be sent to CloudWatch Logs. Note: The intent of this recommendation is to ensure AWS account activity is being captured, monitored, and appropriately alarmed on. CloudWatch Logs is a native way to accomplish this using AWS services but does not preclude the use of an alternate solution.",
"RationaleStatement": "Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user, API, resource, and IP address, and provides opportunity to establish alarms and notifications for anomalous or sensitivity account activity.",
"ImpactStatement": "Note: By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods:\n\n1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html",
"RemediationProcedure": "Perform the following to establish the prescribed state:\n\n**From Console:**\n\n1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/`\n2. Select the `Trail` the needs to be updated.\n3. Scroll down to `CloudWatch Logs`\n4. Click `Edit`\n5. Under `CloudWatch Logs` click the box `Enabled`\n6. Under `Log Group` pick new or select an existing log group\n7. Edit the `Log group name` to match the CloudTrail or pick the existing CloudWatch Group.\n8. Under `IAM Role` pick new or select an existing.\n9. Edit the `Role name` to match the CloudTrail or pick the existing IAM Role.\n10. Click `Save changes.\n\n**From Command Line:**\n```\naws cloudtrail update-trail --name <trail_name> --cloudwatch-logs-log-group-arn <cloudtrail_log_group_arn> --cloudwatch-logs-role-arn <cloudtrail_cloudwatchLogs_role_arn>\n```",
"AuditProcedure": "Perform the following to ensure CloudTrail is configured as prescribed:\n\n**From Console:**\n\n1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/`\n2. Under `Trails` , click on the CloudTrail you wish to evaluate\n3. Under the `CloudWatch Logs` section.\n4. Ensure a `CloudWatch Logs` log group is configured and listed.\n5. Under `General details` confirm `Last log file delivered` has a recent (~one day old) timestamp.\n\n**From Command Line:**\n\n1. Run the following command to get a listing of existing trails:\n```\n aws cloudtrail describe-trails\n```\n2. Ensure `CloudWatchLogsLogGroupArn` is not empty and note the value of the `Name` property.\n3. Using the noted value of the `Name` property, run the following command:\n```\n aws cloudtrail get-trail-status --name <trail_name>\n```\n4. Ensure the `LatestcloudwatchLogdDeliveryTime` property is set to a recent (~one day old) timestamp.\n\nIf the `CloudWatch Logs` log group is not setup and the delivery time is not recent refer to the remediation below.",
"ImpactStatement": "Note: By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods: 1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html",
"RemediationProcedure": "Perform the following to establish the prescribed state: **From Console:** 1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/` 2. Select the `Trail` the needs to be updated. 3. Scroll down to `CloudWatch Logs` 4. Click `Edit` 5. Under `CloudWatch Logs` click the box `Enabled` 6. Under `Log Group` pick new or select an existing log group 7. Edit the `Log group name` to match the CloudTrail or pick the existing CloudWatch Group. 8. Under `IAM Role` pick new or select an existing. 9. Edit the `Role name` to match the CloudTrail or pick the existing IAM Role. 10. Click `Save changes. **From Command Line:** ``` aws cloudtrail update-trail --name <trail_name> --cloudwatch-logs-log-group-arn <cloudtrail_log_group_arn> --cloudwatch-logs-role-arn <cloudtrail_cloudwatchLogs_role_arn> ```",
"AuditProcedure": "Perform the following to ensure CloudTrail is configured as prescribed: **From Console:** 1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/` 2. Under `Trails` , click on the CloudTrail you wish to evaluate 3. Under the `CloudWatch Logs` section. 4. Ensure a `CloudWatch Logs` log group is configured and listed. 5. Under `General details` confirm `Last log file delivered` has a recent (~one day old) timestamp. **From Command Line:** 1. Run the following command to get a listing of existing trails: ``` aws cloudtrail describe-trails ``` 2. Ensure `CloudWatchLogsLogGroupArn` is not empty and note the value of the `Name` property. 3. Using the noted value of the `Name` property, run the following command: ``` aws cloudtrail get-trail-status --name <trail_name> ``` 4. Ensure the `LatestcloudwatchLogdDeliveryTime` property is set to a recent (~one day old) timestamp. If the `CloudWatch Logs` log group is not setup and the delivery time is not recent refer to the remediation below.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-user-guide.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/how-cloudtrail-works.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-aws-service-specific-topics.html"
}
@@ -732,8 +735,8 @@
"Description": "AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items (AWS resources), any configuration changes between resources. It is recommended AWS Config be enabled in all regions.",
"RationaleStatement": "The AWS configuration item history captured by AWS Config enables security analysis, resource change tracking, and compliance auditing.",
"ImpactStatement": "It is recommended AWS Config be enabled in all regions.",
"RemediationProcedure": "To implement AWS Config configuration:\n\n**From Console:**\n\n1. Select the region you want to focus on in the top right of the console\n2. Click `Services` \n3. Click `Config` \n4. Define which resources you want to record in the selected region\n5. Choose to include global resources (IAM resources)\n6. Specify an S3 bucket in the same account or in another managed AWS account\n7. Create an SNS Topic from the same AWS account or another managed AWS account\n\n**From Command Line:**\n\n1. Ensure there is an appropriate S3 bucket, SNS topic, and IAM role per the [AWS Config Service prerequisites](http://docs.aws.amazon.com/config/latest/developerguide/gs-cli-prereq.html).\n2. Run this command to set up the configuration recorder\n```\naws configservice subscribe --s3-bucket my-config-bucket --sns-topic arn:aws:sns:us-east-1:012345678912:my-config-notice --iam-role arn:aws:iam::012345678912:role/myConfigRole\n```\n3. Run this command to start the configuration recorder:\n```\nstart-configuration-recorder --configuration-recorder-name <value>\n```",
"AuditProcedure": "Process to evaluate AWS Config configuration per region\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the AWS Config console at [https://console.aws.amazon.com/config/](https://console.aws.amazon.com/config/).\n2. On the top right of the console select target Region.\n3. If presented with Setup AWS Config - follow remediation procedure:\n4. On the Resource inventory page, Click on edit (the gear icon). The Set Up AWS Config page appears.\n5. Ensure 1 or both check-boxes under \"All Resources\" is checked.\n - Include global resources related to IAM resources - which needs to be enabled in 1 region only\n6. Ensure the correct S3 bucket has been defined.\n7. Ensure the correct SNS topic has been defined.\n8. Repeat steps 2 to 7 for each region.\n\n**From Command Line:**\n\n1. Run this command to show all AWS Config recorders and their properties:\n```\naws configservice describe-configuration-recorders\n```\n2. Evaluate the output to ensure that there's at least one recorder for which `recordingGroup` object includes `\"allSupported\": true` AND `\"includeGlobalResourceTypes\": true`\n\nNote: There is one more parameter \"ResourceTypes\" in recordingGroup object. We don't need to check the same as whenever we set \"allSupported\": true, AWS enforces resource types to be empty (\"ResourceTypes\":[])\n\nSample Output:\n\n```\n{\n \"ConfigurationRecorders\": [\n {\n \"recordingGroup\": {\n \"allSupported\": true,\n \"resourceTypes\": [],\n \"includeGlobalResourceTypes\": true\n },\n \"roleARN\": \"arn:aws:iam::<AWS_Account_ID>:role/service-role/<config-role-name>\",\n \"name\": \"default\"\n }\n ]\n}\n```\n\n3. Run this command to show the status for all AWS Config recorders:\n```\naws configservice describe-configuration-recorder-status\n```\n4. In the output, find recorders with `name` key matching the recorders that met criteria in step 2. Ensure that at least one of them includes `\"recording\": true` and `\"lastStatus\": \"SUCCESS\"`",
"RemediationProcedure": "To implement AWS Config configuration: **From Console:** 1. Select the region you want to focus on in the top right of the console 2. Click `Services` 3. Click `Config` 4. Define which resources you want to record in the selected region 5. Choose to include global resources (IAM resources) 6. Specify an S3 bucket in the same account or in another managed AWS account 7. Create an SNS Topic from the same AWS account or another managed AWS account **From Command Line:** 1. Ensure there is an appropriate S3 bucket, SNS topic, and IAM role per the [AWS Config Service prerequisites](http://docs.aws.amazon.com/config/latest/developerguide/gs-cli-prereq.html). 2. Run this command to set up the configuration recorder ``` aws configservice subscribe --s3-bucket my-config-bucket --sns-topic arn:aws:sns:us-east-1:012345678912:my-config-notice --iam-role arn:aws:iam::012345678912:role/myConfigRole ``` 3. Run this command to start the configuration recorder: ``` start-configuration-recorder --configuration-recorder-name <value> ```",
"AuditProcedure": "Process to evaluate AWS Config configuration per region **From Console:** 1. Sign in to the AWS Management Console and open the AWS Config console at [https://console.aws.amazon.com/config/](https://console.aws.amazon.com/config/). 2. On the top right of the console select target Region. 3. If presented with Setup AWS Config - follow remediation procedure: 4. On the Resource inventory page, Click on edit (the gear icon). The Set Up AWS Config page appears. 5. Ensure 1 or both check-boxes under \"All Resources\" is checked. - Include global resources related to IAM resources - which needs to be enabled in 1 region only 6. Ensure the correct S3 bucket has been defined. 7. Ensure the correct SNS topic has been defined. 8. Repeat steps 2 to 7 for each region. **From Command Line:** 1. Run this command to show all AWS Config recorders and their properties: ``` aws configservice describe-configuration-recorders ``` 2. Evaluate the output to ensure that there's at least one recorder for which `recordingGroup` object includes `\"allSupported\": true` AND `\"includeGlobalResourceTypes\": true` Note: There is one more parameter \"ResourceTypes\" in recordingGroup object. We don't need to check the same as whenever we set \"allSupported\": true, AWS enforces resource types to be empty (\"ResourceTypes\":[]) Sample Output: ``` { \"ConfigurationRecorders\": [ { \"recordingGroup\": { \"allSupported\": true, \"resourceTypes\": [], \"includeGlobalResourceTypes\": true }, \"roleARN\": \"arn:aws:iam::<AWS_Account_ID>:role/service-role/<config-role-name>\", \"name\": \"default\" } ] } ``` 3. Run this command to show the status for all AWS Config recorders: ``` aws configservice describe-configuration-recorder-status ``` 4. In the output, find recorders with `name` key matching the recorders that met criteria in step 2. Ensure that at least one of them includes `\"recording\": true` and `\"lastStatus\": \"SUCCESS\"`",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/cli/latest/reference/configservice/describe-configuration-recorder-status.html"
}
@@ -753,8 +756,8 @@
"Description": "S3 Bucket Access Logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that bucket access logging be enabled on the CloudTrail S3 bucket.",
"RationaleStatement": "By enabling S3 bucket logging on target S3 buckets, it is possible to capture all events which may affect objects within any target buckets. Configuring logs to be placed in a separate bucket allows access to log information which can be useful in security and incident response workflows.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to enable S3 bucket logging:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3).\n2. Under `All Buckets` click on the target S3 bucket\n3. Click on `Properties` in the top right of the console\n4. Under `Bucket:` <s3\\_bucket\\_for\\_cloudtrail> click on `Logging` \n5. Configure bucket logging\n - Click on the `Enabled` checkbox\n - Select Target Bucket from list\n - Enter a Target Prefix\n6. Click `Save`.\n\n**From Command Line:**\n\n1. Get the name of the S3 bucket that CloudTrail is logging to:\n```\naws cloudtrail describe-trails --region <region-name> --query trailList[*].S3BucketName\n```\n2. Copy and add target bucket name at `<Logging_BucketName>`, Prefix for logfile at `<LogFilePrefix>` and optionally add an email address in the following template and save it as `<FileName.Json>`:\n```\n{\n \"LoggingEnabled\": {\n \"TargetBucket\": \"<Logging_BucketName>\",\n \"TargetPrefix\": \"<LogFilePrefix>\",\n \"TargetGrants\": [\n {\n \"Grantee\": {\n \"Type\": \"AmazonCustomerByEmail\",\n \"EmailAddress\": \"<EmailID>\"\n },\n \"Permission\": \"FULL_CONTROL\"\n }\n ]\n } \n}\n```\n3. Run the `put-bucket-logging` command with bucket name and `<FileName.Json>` as input, for more information refer at [put-bucket-logging](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-logging.html):\n```\naws s3api put-bucket-logging --bucket <BucketName> --bucket-logging-status file://<FileName.Json>\n```",
"AuditProcedure": "Perform the following ensure the CloudTrail S3 bucket has access logging is enabled:\n\n**From Console:**\n\n1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home)\n2. In the API activity history pane on the left, click Trails\n3. In the Trails pane, note the bucket names in the S3 bucket column\n4. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3).\n5. Under `All Buckets` click on a target S3 bucket\n6. Click on `Properties` in the top right of the console\n7. Under `Bucket:` _ `<bucket_name>` _ click on `Logging` \n8. Ensure `Enabled` is checked.\n\n**From Command Line:**\n\n1. Get the name of the S3 bucket that CloudTrail is logging to:\n``` \naws cloudtrail describe-trails --query 'trailList[*].S3BucketName' \n```\n2. Ensure Bucket Logging is enabled:\n```\naws s3api get-bucket-logging --bucket <s3_bucket_for_cloudtrail>\n```\nEnsure command does not returns empty output.\n\nSample Output for a bucket with logging enabled:\n\n```\n{\n \"LoggingEnabled\": {\n \"TargetPrefix\": \"<Prefix_Test>\",\n \"TargetBucket\": \"<Bucket_name_for_Storing_Logs>\"\n }\n}\n```",
"RemediationProcedure": "Perform the following to enable S3 bucket logging: **From Console:** 1. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 2. Under `All Buckets` click on the target S3 bucket 3. Click on `Properties` in the top right of the console 4. Under `Bucket:` <s3\\_bucket\\_for\\_cloudtrail> click on `Logging` 5. Configure bucket logging - Click on the `Enabled` checkbox - Select Target Bucket from list - Enter a Target Prefix 6. Click `Save`. **From Command Line:** 1. Get the name of the S3 bucket that CloudTrail is logging to: ``` aws cloudtrail describe-trails --region <region-name> --query trailList[*].S3BucketName ``` 2. Copy and add target bucket name at `<Logging_BucketName>`, Prefix for logfile at `<LogFilePrefix>` and optionally add an email address in the following template and save it as `<FileName.Json>`: ``` { \"LoggingEnabled\": { \"TargetBucket\": \"<Logging_BucketName>\", \"TargetPrefix\": \"<LogFilePrefix>\", \"TargetGrants\": [ { \"Grantee\": { \"Type\": \"AmazonCustomerByEmail\", \"EmailAddress\": \"<EmailID>\" }, \"Permission\": \"FULL_CONTROL\" } ] } } ``` 3. Run the `put-bucket-logging` command with bucket name and `<FileName.Json>` as input, for more information refer at [put-bucket-logging](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-logging.html): ``` aws s3api put-bucket-logging --bucket <BucketName> --bucket-logging-status file://<FileName.Json> ```",
"AuditProcedure": "Perform the following ensure the CloudTrail S3 bucket has access logging is enabled: **From Console:** 1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home) 2. In the API activity history pane on the left, click Trails 3. In the Trails pane, note the bucket names in the S3 bucket column 4. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 5. Under `All Buckets` click on a target S3 bucket 6. Click on `Properties` in the top right of the console 7. Under `Bucket:` _ `<bucket_name>` _ click on `Logging` 8. Ensure `Enabled` is checked. **From Command Line:** 1. Get the name of the S3 bucket that CloudTrail is logging to: ``` aws cloudtrail describe-trails --query 'trailList[*].S3BucketName' ``` 2. Ensure Bucket Logging is enabled: ``` aws s3api get-bucket-logging --bucket <s3_bucket_for_cloudtrail> ``` Ensure command does not returns empty output. Sample Output for a bucket with logging enabled: ``` { \"LoggingEnabled\": { \"TargetPrefix\": \"<Prefix_Test>\", \"TargetBucket\": \"<Bucket_name_for_Storing_Logs>\" } } ```",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html"
}
@@ -774,9 +777,9 @@
"Description": "AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.",
"RationaleStatement": "Configuring CloudTrail to use SSE-KMS provides additional confidentiality controls on log data as a given user must have S3 read permission on the corresponding log bucket and must be granted decrypt permission by the CMK policy.",
"ImpactStatement": "Customer created keys incur an additional cost. See https://aws.amazon.com/kms/pricing/ for more information.",
"RemediationProcedure": "Perform the following to configure CloudTrail to use SSE-KMS:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. In the left navigation pane, choose `Trails` .\n3. Click on a Trail\n4. Under the `S3` section click on the edit button (pencil icon)\n5. Click `Advanced` \n6. Select an existing CMK from the `KMS key Id` drop-down menu\n - Note: Ensure the CMK is located in the same region as the S3 bucket\n - Note: You will need to apply a KMS Key policy on the selected CMK in order for CloudTrail as a service to encrypt and decrypt log files using the CMK provided. Steps are provided [here](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-kms-key-policy-for-cloudtrail.html) for editing the selected CMK Key policy\n7. Click `Save` \n8. You will see a notification message stating that you need to have decrypt permissions on the specified KMS key to decrypt log files.\n9. Click `Yes` \n\n**From Command Line:**\n```\naws cloudtrail update-trail --name <trail_name> --kms-id <cloudtrail_kms_key>\naws kms put-key-policy --key-id <cloudtrail_kms_key> --policy <cloudtrail_kms_key_policy>\n```",
"AuditProcedure": "Perform the following to determine if CloudTrail is configured to use SSE-KMS:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. In the left navigation pane, choose `Trails` .\n3. Select a Trail\n4. Under the `S3` section, ensure `Encrypt log files` is set to `Yes` and a KMS key ID is specified in the `KSM Key Id` field.\n\n**From Command Line:**\n\n1. Run the following command:\n```\n aws cloudtrail describe-trails \n```\n2. For each trail listed, SSE-KMS is enabled if the trail has a `KmsKeyId` property defined.",
"AdditionalInformation": "3 statements which need to be added to the CMK policy:\n\n1\\. Enable Cloudtrail to describe CMK properties\n```\n<pre class=\"programlisting\" style=\"font-style: normal;\">{\n \"Sid\": \"Allow CloudTrail access\",\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"cloudtrail.amazonaws.com\"\n },\n \"Action\": \"kms:DescribeKey\",\n \"Resource\": \"*\"\n}\n```\n2\\. Granting encrypt permissions\n```\n<pre class=\"programlisting\" style=\"font-style: normal;\">{\n \"Sid\": \"Allow CloudTrail to encrypt logs\",\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"cloudtrail.amazonaws.com\"\n },\n \"Action\": \"kms:GenerateDataKey*\",\n \"Resource\": \"*\",\n \"Condition\": {\n \"StringLike\": {\n \"kms:EncryptionContext:aws:cloudtrail:arn\": [\n \"arn:aws:cloudtrail:*:aws-account-id:trail/*\"\n ]\n }\n }\n}\n```\n3\\. Granting decrypt permissions\n```\n<pre class=\"programlisting\" style=\"font-style: normal;\">{\n \"Sid\": \"Enable CloudTrail log decrypt permissions\",\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"AWS\": \"arn:aws:iam::aws-account-id:user/username\"\n },\n \"Action\": \"kms:Decrypt\",\n \"Resource\": \"*\",\n \"Condition\": {\n \"Null\": {\n \"kms:EncryptionContext:aws:cloudtrail:arn\": \"false\"\n }\n }\n}\n```",
"RemediationProcedure": "Perform the following to configure CloudTrail to use SSE-KMS: **From Console:** 1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Click on a Trail 4. Under the `S3` section click on the edit button (pencil icon) 5. Click `Advanced` 6. Select an existing CMK from the `KMS key Id` drop-down menu - Note: Ensure the CMK is located in the same region as the S3 bucket - Note: You will need to apply a KMS Key policy on the selected CMK in order for CloudTrail as a service to encrypt and decrypt log files using the CMK provided. Steps are provided [here](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-kms-key-policy-for-cloudtrail.html) for editing the selected CMK Key policy 7. Click `Save` 8. You will see a notification message stating that you need to have decrypt permissions on the specified KMS key to decrypt log files. 9. Click `Yes` **From Command Line:** ``` aws cloudtrail update-trail --name <trail_name> --kms-id <cloudtrail_kms_key> aws kms put-key-policy --key-id <cloudtrail_kms_key> --policy <cloudtrail_kms_key_policy> ```",
"AuditProcedure": "Perform the following to determine if CloudTrail is configured to use SSE-KMS: **From Console:** 1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Select a Trail 4. Under the `S3` section, ensure `Encrypt log files` is set to `Yes` and a KMS key ID is specified in the `KSM Key Id` field. **From Command Line:** 1. Run the following command: ``` aws cloudtrail describe-trails ``` 2. For each trail listed, SSE-KMS is enabled if the trail has a `KmsKeyId` property defined.",
"AdditionalInformation": "3 statements which need to be added to the CMK policy: 1\\. Enable Cloudtrail to describe CMK properties ``` <pre class=\"programlisting\" style=\"font-style: normal;\">{ \"Sid\": \"Allow CloudTrail access\", \"Effect\": \"Allow\", \"Principal\": { \"Service\": \"cloudtrail.amazonaws.com\" }, \"Action\": \"kms:DescribeKey\", \"Resource\": \"*\" } ``` 2\\. Granting encrypt permissions ``` <pre class=\"programlisting\" style=\"font-style: normal;\">{ \"Sid\": \"Allow CloudTrail to encrypt logs\", \"Effect\": \"Allow\", \"Principal\": { \"Service\": \"cloudtrail.amazonaws.com\" }, \"Action\": \"kms:GenerateDataKey*\", \"Resource\": \"*\", \"Condition\": { \"StringLike\": { \"kms:EncryptionContext:aws:cloudtrail:arn\": [ \"arn:aws:cloudtrail:*:aws-account-id:trail/*\" ] } } } ``` 3\\. Granting decrypt permissions ``` <pre class=\"programlisting\" style=\"font-style: normal;\">{ \"Sid\": \"Enable CloudTrail log decrypt permissions\", \"Effect\": \"Allow\", \"Principal\": { \"AWS\": \"arn:aws:iam::aws-account-id:user/username\" }, \"Action\": \"kms:Decrypt\", \"Resource\": \"*\", \"Condition\": { \"Null\": { \"kms:EncryptionContext:aws:cloudtrail:arn\": \"false\" } } } ```",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html:https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html"
}
]
@@ -793,10 +796,10 @@
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "AWS Key Management Service (KMS) allows customers to rotate the backing key which is key material stored within the KMS which is tied to the key ID of the Customer Created customer master key (CMK). It is the backing key that is used to perform cryptographic operations such as encryption and decryption. Automated key rotation currently retains all prior backing keys so that decryption of encrypted data can take place transparently. It is recommended that CMK key rotation be enabled for symmetric keys. Key rotation can not be enabled for any asymmetric CMK.",
"RationaleStatement": "Rotating encryption keys helps reduce the potential impact of a compromised key as data encrypted with a new key cannot be accessed with a previous key that may have been exposed.\nKeys should be rotated every year, or upon event that would result in the compromise of that key.",
"RationaleStatement": "Rotating encryption keys helps reduce the potential impact of a compromised key as data encrypted with a new key cannot be accessed with a previous key that may have been exposed. Keys should be rotated every year, or upon event that would result in the compromise of that key.",
"ImpactStatement": "Creation, management, and storage of CMKs may require additional time from and administrator.",
"RemediationProcedure": "**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam).\n2. In the left navigation pane, choose `Customer managed keys` .\n3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT`\n4. Underneath the \"General configuration\" panel open the tab \"Key rotation\"\n5. Check the \"Automatically rotate this KMS key every year.\" checkbox\n\n**From Command Line:**\n\n1. Run the following command to enable key rotation:\n```\n aws kms enable-key-rotation --key-id <kms_key_id>\n```",
"AuditProcedure": "**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam).\n2. In the left navigation pane, choose `Customer managed keys`\n3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT`\n4. Underneath the `General configuration` panel open the tab `Key rotation`\n5. Ensure that the checkbox `Automatically rotate this KMS key every year.` is activated\n6. Repeat steps 3 - 5 for all customer managed CMKs where \"Key spec = SYMMETRIC_DEFAULT\"\n\n**From Command Line:**\n\n1. Run the following command to get a list of all keys and their associated `KeyIds` \n```\n aws kms list-keys\n```\n2. For each key, note the KeyId and run the following command\n```\ndescribe-key --key-id <kms_key_id>\n```\n3. If the response contains \"KeySpec = SYMMETRIC_DEFAULT\" run the following command\n```\n aws kms get-key-rotation-status --key-id <kms_key_id>\n```\n4. Ensure `KeyRotationEnabled` is set to `true`\n5. Repeat steps 2 - 4 for all remaining CMKs",
"RemediationProcedure": "**From Console:** 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam). 2. In the left navigation pane, choose `Customer managed keys` . 3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT` 4. Underneath the \"General configuration\" panel open the tab \"Key rotation\" 5. Check the \"Automatically rotate this KMS key every year.\" checkbox **From Command Line:** 1. Run the following command to enable key rotation: ``` aws kms enable-key-rotation --key-id <kms_key_id> ```",
"AuditProcedure": "**From Console:** 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam). 2. In the left navigation pane, choose `Customer managed keys` 3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT` 4. Underneath the `General configuration` panel open the tab `Key rotation` 5. Ensure that the checkbox `Automatically rotate this KMS key every year.` is activated 6. Repeat steps 3 - 5 for all customer managed CMKs where \"Key spec = SYMMETRIC_DEFAULT\" **From Command Line:** 1. Run the following command to get a list of all keys and their associated `KeyIds` ``` aws kms list-keys ``` 2. For each key, note the KeyId and run the following command ``` describe-key --key-id <kms_key_id> ``` 3. If the response contains \"KeySpec = SYMMETRIC_DEFAULT\" run the following command ``` aws kms get-key-rotation-status --key-id <kms_key_id> ``` 4. Ensure `KeyRotationEnabled` is set to `true` 5. Repeat steps 2 - 4 for all remaining CMKs",
"AdditionalInformation": "",
"References": "https://aws.amazon.com/kms/pricing/:https://csrc.nist.gov/publications/detail/sp/800-57-part-1/rev-5/final"
}
@@ -815,9 +818,9 @@
"AssessmentStatus": "Automated",
"Description": "VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. After you've created a flow log, you can view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow Logs be enabled for packet \"Rejects\" for VPCs.",
"RationaleStatement": "VPC Flow Logs provide visibility into network traffic that traverses the VPC and can be used to detect anomalous traffic or insight during security workflows.",
"ImpactStatement": "By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods:\n\n1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html",
"RemediationProcedure": "Perform the following to determine if VPC Flow logs is enabled:\n\n**From Console:**\n\n1. Sign into the management console\n2. Select `Services` then `VPC` \n3. In the left navigation pane, select `Your VPCs` \n4. Select a VPC\n5. In the right pane, select the `Flow Logs` tab.\n6. If no Flow Log exists, click `Create Flow Log` \n7. For Filter, select `Reject`\n8. Enter in a `Role` and `Destination Log Group` \n9. Click `Create Log Flow` \n10. Click on `CloudWatch Logs Group` \n\n**Note:** Setting the filter to \"Reject\" will dramatically reduce the logging data accumulation for this recommendation and provide sufficient information for the purposes of breach detection, research and remediation. However, during periods of least privilege security group engineering, setting this the filter to \"All\" can be very helpful in discovering existing traffic flows required for proper operation of an already running environment.\n\n**From Command Line:**\n\n1. Create a policy document and name it as `role_policy_document.json` and paste the following content:\n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"test\",\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"ec2.amazonaws.com\"\n },\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n}\n```\n2. Create another policy document and name it as `iam_policy.json` and paste the following content:\n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\":[\n \"logs:CreateLogGroup\",\n \"logs:CreateLogStream\",\n \"logs:DescribeLogGroups\",\n \"logs:DescribeLogStreams\",\n \"logs:PutLogEvents\",\n \"logs:GetLogEvents\",\n \"logs:FilterLogEvents\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n```\n3. Run the below command to create an IAM role:\n```\naws iam create-role --role-name <aws_support_iam_role> --assume-role-policy-document file://<file-path>role_policy_document.json \n```\n4. Run the below command to create an IAM policy:\n```\naws iam create-policy --policy-name <ami-policy-name> --policy-document file://<file-path>iam-policy.json\n```\n5. Run `attach-group-policy` command using the IAM policy ARN returned at the previous step to attach the policy to the IAM role (if the command succeeds, no output is returned):\n```\naws iam attach-group-policy --policy-arn arn:aws:iam::<aws-account-id>:policy/<iam-policy-name> --group-name <group-name>\n```\n6. Run `describe-vpcs` to get the VpcId available in the selected region:\n```\naws ec2 describe-vpcs --region <region>\n```\n7. The command output should return the VPC Id available in the selected region.\n8. Run `create-flow-logs` to create a flow log for the vpc:\n```\naws ec2 create-flow-logs --resource-type VPC --resource-ids <vpc-id> --traffic-type REJECT --log-group-name <log-group-name> --deliver-logs-permission-arn <iam-role-arn>\n```\n9. Repeat step 8 for other vpcs available in the selected region.\n10. Change the region by updating --region and repeat remediation procedure for other vpcs.",
"AuditProcedure": "Perform the following to determine if VPC Flow logs are enabled:\n\n**From Console:**\n\n1. Sign into the management console\n2. Select `Services` then `VPC` \n3. In the left navigation pane, select `Your VPCs` \n4. Select a VPC\n5. In the right pane, select the `Flow Logs` tab.\n6. Ensure a Log Flow exists that has `Active` in the `Status` column.\n\n**From Command Line:**\n\n1. Run `describe-vpcs` command (OSX/Linux/UNIX) to list the VPC networks available in the current AWS region:\n```\naws ec2 describe-vpcs --region <region> --query Vpcs[].VpcId\n```\n2. The command output returns the `VpcId` available in the selected region.\n3. Run `describe-flow-logs` command (OSX/Linux/UNIX) using the VPC ID to determine if the selected virtual network has the Flow Logs feature enabled:\n```\naws ec2 describe-flow-logs --filter \"Name=resource-id,Values=<vpc-id>\"\n```\n4. If there are no Flow Logs created for the selected VPC, the command output will return an `empty list []`.\n5. Repeat step 3 for other VPCs available in the same region.\n6. Change the region by updating `--region` and repeat steps 1 - 5 for all the VPCs.",
"ImpactStatement": "By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods: 1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html",
"RemediationProcedure": "Perform the following to determine if VPC Flow logs is enabled: **From Console:** 1. Sign into the management console 2. Select `Services` then `VPC` 3. In the left navigation pane, select `Your VPCs` 4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. If no Flow Log exists, click `Create Flow Log` 7. For Filter, select `Reject` 8. Enter in a `Role` and `Destination Log Group` 9. Click `Create Log Flow` 10. Click on `CloudWatch Logs Group` **Note:** Setting the filter to \"Reject\" will dramatically reduce the logging data accumulation for this recommendation and provide sufficient information for the purposes of breach detection, research and remediation. However, during periods of least privilege security group engineering, setting this the filter to \"All\" can be very helpful in discovering existing traffic flows required for proper operation of an already running environment. **From Command Line:** 1. Create a policy document and name it as `role_policy_document.json` and paste the following content: ``` { \"Version\": \"2012-10-17\", \"Statement\": [ { \"Sid\": \"test\", \"Effect\": \"Allow\", \"Principal\": { \"Service\": \"ec2.amazonaws.com\" }, \"Action\": \"sts:AssumeRole\" } ] } ``` 2. Create another policy document and name it as `iam_policy.json` and paste the following content: ``` { \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\":[ \"logs:CreateLogGroup\", \"logs:CreateLogStream\", \"logs:DescribeLogGroups\", \"logs:DescribeLogStreams\", \"logs:PutLogEvents\", \"logs:GetLogEvents\", \"logs:FilterLogEvents\" ], \"Resource\": \"*\" } ] } ``` 3. Run the below command to create an IAM role: ``` aws iam create-role --role-name <aws_support_iam_role> --assume-role-policy-document file://<file-path>role_policy_document.json ``` 4. Run the below command to create an IAM policy: ``` aws iam create-policy --policy-name <ami-policy-name> --policy-document file://<file-path>iam-policy.json ``` 5. Run `attach-group-policy` command using the IAM policy ARN returned at the previous step to attach the policy to the IAM role (if the command succeeds, no output is returned): ``` aws iam attach-group-policy --policy-arn arn:aws:iam::<aws-account-id>:policy/<iam-policy-name> --group-name <group-name> ``` 6. Run `describe-vpcs` to get the VpcId available in the selected region: ``` aws ec2 describe-vpcs --region <region> ``` 7. The command output should return the VPC Id available in the selected region. 8. Run `create-flow-logs` to create a flow log for the vpc: ``` aws ec2 create-flow-logs --resource-type VPC --resource-ids <vpc-id> --traffic-type REJECT --log-group-name <log-group-name> --deliver-logs-permission-arn <iam-role-arn> ``` 9. Repeat step 8 for other vpcs available in the selected region. 10. Change the region by updating --region and repeat remediation procedure for other vpcs.",
"AuditProcedure": "Perform the following to determine if VPC Flow logs are enabled: **From Console:** 1. Sign into the management console 2. Select `Services` then `VPC` 3. In the left navigation pane, select `Your VPCs` 4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. Ensure a Log Flow exists that has `Active` in the `Status` column. **From Command Line:** 1. Run `describe-vpcs` command (OSX/Linux/UNIX) to list the VPC networks available in the current AWS region: ``` aws ec2 describe-vpcs --region <region> --query Vpcs[].VpcId ``` 2. The command output returns the `VpcId` available in the selected region. 3. Run `describe-flow-logs` command (OSX/Linux/UNIX) using the VPC ID to determine if the selected virtual network has the Flow Logs feature enabled: ``` aws ec2 describe-flow-logs --filter \"Name=resource-id,Values=<vpc-id>\" ``` 4. If there are no Flow Logs created for the selected VPC, the command output will return an `empty list []`. 5. Repeat step 3 for other VPCs available in the same region. 6. Change the region by updating `--region` and repeat steps 1 - 5 for all the VPCs.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html"
}
@@ -836,10 +839,10 @@
"AssessmentStatus": "Automated",
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.",
"RationaleStatement": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
"ImpactStatement": "This alert may be triggered by normal read-only console activities that attempt to opportunistically gather optional information, but gracefully fail if they don't have permissions.\n\nIf an excessive number of alerts are being generated then an organization may wish to consider adding read access to the limited IAM user permissions simply to quiet the alerts.\n\nIn some cases doing this may allow the users to actually view some areas of the system - any additional access given should be reviewed for alignment with the original limited IAM user intent.",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for unauthorized API calls and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name \"cloudtrail_log_group_name\" --filter-name \"<unauthorized_api_calls_metric>\" --metric-transformations metricName=unauthorized_api_calls_metric,metricNamespace=CISBenchmark,metricValue=1 --filter-pattern \"{ ($.errorCode = \"*UnauthorizedOperation\") || ($.errorCode = \"AccessDenied*\") || ($.sourceIPAddress!=\"delivery.logs.amazonaws.com\") || ($.eventName!=\"HeadBucket\") }\"\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n**Note**: Capture the TopicArn displayed when creating the SNS Topic in Step 2.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn from step 2> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name \"unauthorized_api_calls_alarm\" --metric-name \"unauthorized_api_calls_metric\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with \"Name\":` note `<cloudtrail__name>`\n\n- From value associated with \"CloudWatchLogsLogGroupArn\" note <cloudtrail_log_group_name>\n\nExample: for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*, <cloudtrail_log_group_name> would be NewGroup\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <\"Name\" as shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>` that you captured in step 1:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{ ($.errorCode = *UnauthorizedOperation) || ($.errorCode = AccessDenied*) || ($.sourceIPAddress!=delivery.logs.amazonaws.com) || ($.eventName!=HeadBucket) }\",\n```\n\n4. Note the \"filterName\" `<unauthorized_api_calls_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<unauthorized_api_calls_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName == `unauthorized_api_calls_metric`]\"\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"ImpactStatement": "This alert may be triggered by normal read-only console activities that attempt to opportunistically gather optional information, but gracefully fail if they don't have permissions. If an excessive number of alerts are being generated then an organization may wish to consider adding read access to the limited IAM user permissions simply to quiet the alerts. In some cases doing this may allow the users to actually view some areas of the system - any additional access given should be reviewed for alignment with the original limited IAM user intent.",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for unauthorized API calls and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"cloudtrail_log_group_name\" --filter-name \"<unauthorized_api_calls_metric>\" --metric-transformations metricName=unauthorized_api_calls_metric,metricNamespace=CISBenchmark,metricValue=1 --filter-pattern \"{ ($.errorCode = \"*UnauthorizedOperation\") || ($.errorCode = \"AccessDenied*\") || ($.sourceIPAddress!=\"delivery.logs.amazonaws.com\") || ($.eventName!=\"HeadBucket\") }\" ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. **Note**: Capture the TopicArn displayed when creating the SNS Topic in Step 2. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn from step 2> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"unauthorized_api_calls_alarm\" --metric-name \"unauthorized_api_calls_metric\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with \"Name\":` note `<cloudtrail__name>` - From value associated with \"CloudWatchLogsLogGroupArn\" note <cloudtrail_log_group_name> Example: for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*, <cloudtrail_log_group_name> would be NewGroup - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <\"Name\" as shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>` that you captured in step 1: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.errorCode = *UnauthorizedOperation) || ($.errorCode = AccessDenied*) || ($.sourceIPAddress!=delivery.logs.amazonaws.com) || ($.eventName!=HeadBucket) }\", ``` 4. Note the \"filterName\" `<unauthorized_api_calls_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<unauthorized_api_calls_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName == `unauthorized_api_calls_metric`]\" ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://aws.amazon.com/sns/:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -858,9 +861,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Security Groups are a stateful packet filter that controls ingress and egress traffic within a VPC. It is recommended that a metric filter and alarm be established for detecting changes to Security Groups.",
"RationaleStatement": "Monitoring changes to security group will help ensure that resources and services are not unintentionally exposed.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for security groups changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name \"<cloudtrail_log_group_name>\" --filter-name \"<security_group_changes_metric>\" --metric-transformations metricName= \"<security_group_changes_metric>\" ,metricNamespace=\"CISBenchmark\",metricValue=1 --filter-pattern \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\"\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name \"<sns_topic_name>\"\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn \"<sns_topic_arn>\" --protocol <protocol_for_sns> --notification-endpoint \"<sns_subscription_endpoints>\"\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name \"<security_group_changes_alarm>\" --metric-name \"<security_group_changes_metric>\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions \"<sns_topic_arn>\"\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\"\n```\n4. Note the `<security_group_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<security_group_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName== '<security_group_changes_metric>']\"\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for security groups changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"<cloudtrail_log_group_name>\" --filter-name \"<security_group_changes_metric>\" --metric-transformations metricName= \"<security_group_changes_metric>\" ,metricNamespace=\"CISBenchmark\",metricValue=1 --filter-pattern \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name \"<sns_topic_name>\" ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn \"<sns_topic_arn>\" --protocol <protocol_for_sns> --notification-endpoint \"<sns_subscription_endpoints>\" ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"<security_group_changes_alarm>\" --metric-name \"<security_group_changes_metric>\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions \"<sns_topic_arn>\" ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ``` 4. Note the `<security_group_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<security_group_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName== '<security_group_changes_metric>']\" ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -879,9 +882,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets within a VPC. It is recommended that a metric filter and alarm be established for changes made to NACLs.",
"RationaleStatement": "Monitoring changes to NACLs will help ensure that AWS resources and services are not unintentionally exposed.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for NACL changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<nacl_changes_metric>` --metric-transformations metricName= `<nacl_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<nacl_changes_alarm>` --metric-name `<nacl_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }\"\n```\n4. Note the `<nacl_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<nacl_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<nacl_changes_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for NACL changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<nacl_changes_metric>` --metric-transformations metricName= `<nacl_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<nacl_changes_alarm>` --metric-name `<nacl_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }\" ``` 4. Note the `<nacl_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<nacl_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<nacl_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -900,9 +903,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Network gateways are required to send/receive traffic to a destination outside of a VPC. It is recommended that a metric filter and alarm be established for changes to network gateways.",
"RationaleStatement": "Monitoring changes to network gateways will help ensure that all ingress/egress traffic traverses the VPC border via a controlled path.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for network gateways changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<network_gw_changes_metric>` --metric-transformations metricName= `<network_gw_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<network_gw_changes_alarm>` --metric-name `<network_gw_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }\"\n```\n4. Note the `<network_gw_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<network_gw_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<network_gw_changes_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for network gateways changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<network_gw_changes_metric>` --metric-transformations metricName= `<network_gw_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<network_gw_changes_alarm>` --metric-name `<network_gw_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }\" ``` 4. Note the `<network_gw_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<network_gw_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<network_gw_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -921,9 +924,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Routing tables are used to route network traffic between subnets and to network gateways. It is recommended that a metric filter and alarm be established for changes to route tables.",
"RationaleStatement": "Monitoring changes to route tables will help ensure that all VPC traffic flows through an expected path.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for route table changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<route_table_changes_metric>` --metric-transformations metricName= `<route_table_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<route_table_changes_alarm>` --metric-name `<route_table_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }\"\n```\n\n4. Note the `<route_table_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<route_table_changes_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<route_table_changes_metric>`]'\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for route table changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<route_table_changes_metric>` --metric-transformations metricName= `<route_table_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<route_table_changes_alarm>` --metric-name `<route_table_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }\" ``` 4. Note the `<route_table_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<route_table_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<route_table_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -942,9 +945,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is possible to have more than 1 VPC within an account, in addition it is also possible to create a peer connection between 2 VPCs enabling network traffic to route between VPCs. It is recommended that a metric filter and alarm be established for changes made to VPCs.",
"RationaleStatement": "Monitoring changes to VPC will help ensure VPC traffic flow is not getting impacted.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for VPC changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<vpc_changes_metric>` --metric-transformations metricName= `<vpc_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<vpc_changes_alarm>` --metric-name `<vpc_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }\"\n```\n\n4. Note the `<vpc_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<vpc_changes_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<vpc_changes_metric>`]'\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for VPC changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<vpc_changes_metric>` --metric-transformations metricName= `<vpc_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<vpc_changes_alarm>` --metric-name `<vpc_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }\" ``` 4. Note the `<vpc_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<vpc_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<vpc_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -963,8 +966,8 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for AWS Organizations changes made in the master AWS Account.",
"RationaleStatement": "Monitoring AWS Organizations changes can help you prevent any unwanted, accidental or intentional modifications that may lead to unauthorized access or other security breaches. This monitoring technique helps you to ensure that any unexpected changes performed within your AWS Organizations can be investigated and any unwanted changes can be rolled back.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for AWS Organizations changes and the `<cloudtrail_log_group_name>` taken from audit step 1:\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<organizations_changes>` --metric-transformations metricName= `<organizations_changes>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }'\n```\n**Note:** You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify:\n```\naws sns create-topic --name <sns_topic_name>\n```\n**Note:** you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2:\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n**Note:** you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2:\n```\naws cloudwatch put-metric-alarm --alarm-name `<organizations_changes>` --metric-name `<organizations_changes>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "1. Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n- Identify the log group name configured for use with active multi-region CloudTrail:\n- List all CloudTrails: \n```\naws cloudtrail describe-trails\n```\n- Identify Multi region Cloudtrails, Trails with `\"IsMultiRegionTrail\"` set to true\n- From value associated with CloudWatchLogsLogGroupArn note <cloudtrail_log_group_name>\n **Example:** for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*, <cloudtrail_log_group_name> would be NewGroup\n\n- Ensure Identified Multi region CloudTrail is active:\n```\naws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>\n```\nEnsure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events:\n```\naws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>\n```\n- Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to true and `ReadWriteType` set to `All`.\n\n2. Get a list of all associated metric filters for this <cloudtrail_log_group_name>:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }\"\n```\n4. Note the `<organizations_changes>` value associated with the filterPattern found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<organizations_changes>` captured in step 4:\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<organizations_changes>`]'\n```\n6. Note the AlarmActions value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic:\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\nExample of valid \"SubscriptionArn\": \n```\n\"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for AWS Organizations changes and the `<cloudtrail_log_group_name>` taken from audit step 1: ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<organizations_changes>` --metric-transformations metricName= `<organizations_changes>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }' ``` **Note:** You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify: ``` aws sns create-topic --name <sns_topic_name> ``` **Note:** you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2: ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note:** you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2: ``` aws cloudwatch put-metric-alarm --alarm-name `<organizations_changes>` --metric-name `<organizations_changes>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "1. Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: - Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: ``` aws cloudtrail describe-trails ``` - Identify Multi region Cloudtrails, Trails with `\"IsMultiRegionTrail\"` set to true - From value associated with CloudWatchLogsLogGroupArn note <cloudtrail_log_group_name> **Example:** for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*, <cloudtrail_log_group_name> would be NewGroup - Ensure Identified Multi region CloudTrail is active: ``` aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail> ``` Ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events: ``` aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails> ``` - Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to true and `ReadWriteType` set to `All`. 2. Get a list of all associated metric filters for this <cloudtrail_log_group_name>: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }\" ``` 4. Note the `<organizations_changes>` value associated with the filterPattern found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<organizations_changes>` captured in step 4: ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<organizations_changes>`]' ``` 6. Note the AlarmActions value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic: ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. Example of valid \"SubscriptionArn\": ``` \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_security_incident-response.html"
}
@@ -984,9 +987,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).",
"RationaleStatement": "Monitoring for single-factor console logins will increase visibility into accounts that are not protected by MFA.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for AWS Management Console sign-in without MFA and the `<cloudtrail_log_group_name>` taken from audit step 1.\n\nUse Command: \n\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<no_mfa_console_signin_metric>` --metric-transformations metricName= `<no_mfa_console_signin_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }'\n```\n\nOr (To reduce false positives incase Single Sign-On (SSO) is used in organization):\n\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<no_mfa_console_signin_metric>` --metric-transformations metricName= `<no_mfa_console_signin_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<no_mfa_console_signin_alarm>` --metric-name `<no_mfa_console_signin_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all `CloudTrails`:\n\n```\naws cloudtrail describe-trails\n```\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region `CloudTrail` is active\n\n```\naws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>\n```\n\nEnsure in the output that `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region 'Cloudtrail' captures all Management Events\n\n```\naws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>\n```\n\nEnsure in the output there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }\"\n```\n\nOr (To reduce false positives incase Single Sign-On (SSO) is used in organization):\n\n```\n\"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }\"\n```\n\n4. Note the `<no_mfa_console_signin_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<no_mfa_console_signin_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<no_mfa_console_signin_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored\n-Filter pattern set to `{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\"}` reduces false alarms raised when user logs in via SSO account.",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for AWS Management Console sign-in without MFA and the `<cloudtrail_log_group_name>` taken from audit step 1. Use Command: ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<no_mfa_console_signin_metric>` --metric-transformations metricName= `<no_mfa_console_signin_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }' ``` Or (To reduce false positives incase Single Sign-On (SSO) is used in organization): ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<no_mfa_console_signin_metric>` --metric-transformations metricName= `<no_mfa_console_signin_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<no_mfa_console_signin_alarm>` --metric-name `<no_mfa_console_signin_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all `CloudTrails`: ``` aws cloudtrail describe-trails ``` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region `CloudTrail` is active ``` aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail> ``` Ensure in the output that `IsLogging` is set to `TRUE` - Ensure identified Multi-region 'Cloudtrail' captures all Management Events ``` aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails> ``` Ensure in the output there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }\" ``` Or (To reduce false positives incase Single Sign-On (SSO) is used in organization): ``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }\" ``` 4. Note the `<no_mfa_console_signin_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<no_mfa_console_signin_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<no_mfa_console_signin_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored -Filter pattern set to `{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\"}` reduces false alarms raised when user logs in via SSO account.",
"References": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/viewing_metrics_with_cloudwatch.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1005,9 +1008,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for 'root' login attempts.",
"RationaleStatement": "Monitoring for 'root' account logins will provide visibility into the use of a fully privileged account and an opportunity to reduce the use of it.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for 'Root' account usage and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name `<cloudtrail_log_group_name>` --filter-name `<root_usage_metric>` --metric-transformations metricName= `<root_usage_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<root_usage_alarm>` --metric-name `<root_usage_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails:\n\n`aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }\"\n```\n\n4. Note the `<root_usage_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<root_usage_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<root_usage_metric>`]'\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "**Configuring log metric filter and alarm on Multi-region (global) CloudTrail**\n\n- ensures that activities from all regions (used as well as unused) are monitored\n\n- ensures that activities on all supported global services are monitored\n\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for 'Root' account usage and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `<cloudtrail_log_group_name>` --filter-name `<root_usage_metric>` --metric-transformations metricName= `<root_usage_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<root_usage_alarm>` --metric-name `<root_usage_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }\" ``` 4. Note the `<root_usage_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<root_usage_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<root_usage_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "**Configuring log metric filter and alarm on Multi-region (global) CloudTrail** - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1026,9 +1029,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established changes made to Identity and Access Management (IAM) policies.",
"RationaleStatement": "Monitoring changes to IAM policies will help ensure authentication and authorization controls remain intact.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for IAM policy changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name `<cloudtrail_log_group_name>` --filter-name `<iam_changes_metric>` --metric-transformations metricName= `<iam_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<iam_changes_alarm>` --metric-name `<iam_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails:\n\n`aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}\"\n```\n\n4. Note the `<iam_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<iam_changes_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<iam_changes_metric>`]'\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for IAM policy changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `<cloudtrail_log_group_name>` --filter-name `<iam_changes_metric>` --metric-transformations metricName= `<iam_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<iam_changes_alarm>` --metric-name `<iam_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}\" ``` 4. Note the `<iam_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<iam_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<iam_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1047,9 +1050,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
"RationaleStatement": "Monitoring changes to CloudTrail's configuration will help ensure sustained visibility to activities performed in the AWS account.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for cloudtrail configuration changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<cloudtrail_cfg_changes_metric>` --metric-transformations metricName= `<cloudtrail_cfg_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<cloudtrail_cfg_changes_alarm>` --metric-name `<cloudtrail_cfg_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }\"\n```\n\n4. Note the `<cloudtrail_cfg_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<cloudtrail_cfg_changes_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<cloudtrail_cfg_changes_metric>`]'\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for cloudtrail configuration changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<cloudtrail_cfg_changes_metric>` --metric-transformations metricName= `<cloudtrail_cfg_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<cloudtrail_cfg_changes_alarm>` --metric-name `<cloudtrail_cfg_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }\" ``` 4. Note the `<cloudtrail_cfg_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<cloudtrail_cfg_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<cloudtrail_cfg_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1068,9 +1071,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for failed console authentication attempts.",
"RationaleStatement": "Monitoring failed console logins may decrease lead time to detect an attempt to brute force a credential, which may provide an indicator, such as source IP, that can be used in other event correlation.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for AWS management Console Login Failures and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<console_signin_failure_metric>` --metric-transformations metricName= `<console_signin_failure_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }'\n```\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<console_signin_failure_alarm>` --metric-name `<console_signin_failure_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }\"\n```\n\n4. Note the `<console_signin_failure_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<console_signin_failure_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<console_signin_failure_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for AWS management Console Login Failures and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<console_signin_failure_metric>` --metric-transformations metricName= `<console_signin_failure_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<console_signin_failure_alarm>` --metric-name `<console_signin_failure_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }\" ``` 4. Note the `<console_signin_failure_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<console_signin_failure_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<console_signin_failure_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1089,9 +1092,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for customer created CMKs which have changed state to disabled or scheduled deletion.",
"RationaleStatement": "Data encrypted with disabled or deleted keys will no longer be accessible.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for disabled or scheduled for deletion CMK's and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<disable_or_delete_cmk_changes_metric>` --metric-transformations metricName= `<disable_or_delete_cmk_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }'\n```\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<disable_or_delete_cmk_changes_alarm>` --metric-name `<disable_or_delete_cmk_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }\"\n```\n4. Note the `<disable_or_delete_cmk_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<disable_or_delete_cmk_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<disable_or_delete_cmk_changes_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for disabled or scheduled for deletion CMK's and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<disable_or_delete_cmk_changes_metric>` --metric-transformations metricName= `<disable_or_delete_cmk_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<disable_or_delete_cmk_changes_alarm>` --metric-name `<disable_or_delete_cmk_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }\" ``` 4. Note the `<disable_or_delete_cmk_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<disable_or_delete_cmk_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<disable_or_delete_cmk_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1110,9 +1113,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes to S3 bucket policies.",
"RationaleStatement": "Monitoring changes to S3 bucket policies may reduce time to detect and correct permissive policies on sensitive S3 buckets.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for S3 bucket policy changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<s3_bucket_policy_changes_metric>` --metric-transformations metricName= `<s3_bucket_policy_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<s3_bucket_policy_changes_alarm>` --metric-name `<s3_bucket_policy_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }\"\n```\n4. Note the `<s3_bucket_policy_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<s3_bucket_policy_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<s3_bucket_policy_changes_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for S3 bucket policy changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<s3_bucket_policy_changes_metric>` --metric-transformations metricName= `<s3_bucket_policy_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<s3_bucket_policy_changes_alarm>` --metric-name `<s3_bucket_policy_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }\" ``` 4. Note the `<s3_bucket_policy_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<s3_bucket_policy_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<s3_bucket_policy_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1131,9 +1134,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
"RationaleStatement": "Monitoring changes to AWS Config configuration will help ensure sustained visibility of configuration items within the AWS account.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for AWS Configuration changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<aws_config_changes_metric>` --metric-transformations metricName= `<aws_config_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<aws_config_changes_alarm>` --metric-name `<aws_config_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }\"\n```\n4. Note the `<aws_config_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<aws_config_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<aws_config_changes_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for AWS Configuration changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<aws_config_changes_metric>` --metric-transformations metricName= `<aws_config_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<aws_config_changes_alarm>` --metric-name `<aws_config_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }\" ``` 4. Note the `<aws_config_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<aws_config_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<aws_config_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1154,8 +1157,8 @@
"Description": "The Network Access Control List (NACL) function provide stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
"RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\nPerform the following:\n1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home\n2. In the left pane, click `Network ACLs`\n3. For each network ACL to remediate, perform the following:\n - Select the network ACL\n - Click the `Inbound Rules` tab\n - Click `Edit inbound rules`\n - Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule\n - Click `Save`",
"AuditProcedure": "**From Console:**\n\nPerform the following to determine if the account is configured as prescribed:\n1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home\n2. In the left pane, click `Network ACLs`\n3. For each network ACL, perform the following:\n - Select the network ACL\n - Click the `Inbound Rules` tab\n - Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` and shows `ALLOW`\n\n**Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports",
"RemediationProcedure": "**From Console:** Perform the following: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL to remediate, perform the following: - Select the network ACL - Click the `Inbound Rules` tab - Click `Edit inbound rules` - Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule - Click `Save`",
"AuditProcedure": "**From Console:** Perform the following to determine if the account is configured as prescribed: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL, perform the following: - Select the network ACL - Click the `Inbound Rules` tab - Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` and shows `ALLOW` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Security.html#VPC_Security_Comparison"
}
@@ -1177,8 +1180,8 @@
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
"RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.",
"ImpactStatement": "When updating an existing environment, ensure that administrators have access to remote server administration ports through another mechanism before removing access by deleting the 0.0.0.0/0 inbound rule.",
"RemediationProcedure": "Perform the following to implement the prescribed state:\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. In the left pane, click `Security Groups` \n3. For each security group, perform the following:\n1. Select the security group\n2. Click the `Inbound Rules` tab\n3. Click the `Edit inbound rules` button\n4. Identify the rules to be edited or removed\n5. Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule\n6. Click `Save rules`",
"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. In the left pane, click `Security Groups` \n3. For each security group, perform the following:\n1. Select the security group\n2. Click the `Inbound Rules` tab\n3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` \n\n**Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.",
"RemediationProcedure": "Perform the following to implement the prescribed state: 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups` 3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Click the `Edit inbound rules` button 4. Identify the rules to be edited or removed 5. Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule 6. Click `Save rules`",
"AuditProcedure": "Perform the following to determine if the account is configured as prescribed: 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups` 3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#deleting-security-group-rule"
}
@@ -1195,11 +1198,11 @@
"Section": "5. Networking",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic.\n\nThe default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation.\n\n**NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.",
"Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic. The default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation. **NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.",
"RationaleStatement": "Configuring all VPC default security groups to restrict all traffic will encourage least privilege security group development and mindful placement of AWS resources into security groups which will in-turn reduce the exposure of those resources.",
"ImpactStatement": "Implementing this recommendation in an existing VPC containing operating resources requires extremely careful migration planning as the default security groups are likely to be enabling many ports that are unknown. Enabling VPC flow logging (of accepts) in an existing environment that is known to be breach free will reveal the current pattern of ports being used for each instance to communicate successfully.",
"RemediationProcedure": "Security Group Members\n\nPerform the following to implement the prescribed state:\n\n1. Identify AWS resources that exist within the default security group\n2. Create a set of least privilege security groups for those resources\n3. Place the resources in those security groups\n4. Remove the resources noted in #1 from the default security group\n\nSecurity Group State\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. Repeat the next steps for all VPCs - including the default VPC in each AWS region:\n3. In the left pane, click `Security Groups` \n4. For each default security group, perform the following:\n1. Select the `default` security group\n2. Click the `Inbound Rules` tab\n3. Remove any inbound rules\n4. Click the `Outbound Rules` tab\n5. Remove any Outbound rules\n\nRecommended:\n\nIAM groups allow you to edit the \"name\" field. After remediating default groups rules for all VPCs in all regions, edit this field to add text similar to \"DO NOT USE. DO NOT ADD RULES\"",
"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:\n\nSecurity Group State\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. Repeat the next steps for all VPCs - including the default VPC in each AWS region:\n3. In the left pane, click `Security Groups` \n4. For each default security group, perform the following:\n1. Select the `default` security group\n2. Click the `Inbound Rules` tab\n3. Ensure no rule exist\n4. Click the `Outbound Rules` tab\n5. Ensure no rules exist\n\nSecurity Group Members\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. Repeat the next steps for all default groups in all VPCs - including the default VPC in each AWS region:\n3. In the left pane, click `Security Groups` \n4. Copy the id of the default security group.\n5. Change to the EC2 Management Console at https://console.aws.amazon.com/ec2/v2/home\n6. In the filter column type 'Security Group ID : < security group id from #4 >'",
"RemediationProcedure": "Security Group Members Perform the following to implement the prescribed state: 1. Identify AWS resources that exist within the default security group 2. Create a set of least privilege security groups for those resources 3. Place the resources in those security groups 4. Remove the resources noted in #1 from the default security group Security Group State 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups` 4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Remove any inbound rules 4. Click the `Outbound Rules` tab 5. Remove any Outbound rules Recommended: IAM groups allow you to edit the \"name\" field. After remediating default groups rules for all VPCs in all regions, edit this field to add text similar to \"DO NOT USE. DO NOT ADD RULES\"",
"AuditProcedure": "Perform the following to determine if the account is configured as prescribed: Security Group State 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups` 4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exist 4. Click the `Outbound Rules` tab 5. Ensure no rules exist Security Group Members 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all default groups in all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups` 4. Copy the id of the default security group. 5. Change to the EC2 Management Console at https://console.aws.amazon.com/ec2/v2/home 6. In the filter column type 'Security Group ID : < security group id from #4 >'",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#default-security-group"
}
@@ -1219,8 +1222,8 @@
"Description": "Once a VPC peering connection is established, routing tables must be updated to establish any connections between the peered VPCs. These routes can be as specific as desired - even peering a VPC to only a single host on the other side of the connection.",
"RationaleStatement": "Being highly selective in peering routing tables is a very effective way of minimizing the impact of breach as resources outside of these routes are inaccessible to the peered VPC.",
"ImpactStatement": "",
"RemediationProcedure": "Remove and add route table entries to ensure that the least number of subnets or hosts as is required to accomplish the purpose for peering are routable.\n\n**From Command Line:**\n\n1. For each _<route\\_table\\_id>_ containing routes non compliant with your routing policy (which grants more than desired \"least access\"), delete the non compliant route:\n```\naws ec2 delete-route --route-table-id <route_table_id> --destination-cidr-block <non_compliant_destination_CIDR>\n```\n 2. Create a new compliant route:\n```\naws ec2 create-route --route-table-id <route_table_id> --destination-cidr-block <compliant_destination_CIDR> --vpc-peering-connection-id <peering_connection_id>\n```",
"AuditProcedure": "Review routing tables of peered VPCs for whether they route all subnets of each VPC and whether that is necessary to accomplish the intended purposes for peering the VPCs.\n\n**From Command Line:**\n\n1. List all the route tables from a VPC and check if \"GatewayId\" is pointing to a _<peering\\_connection\\_id>_ (e.g. pcx-1a2b3c4d) and if \"DestinationCidrBlock\" is as specific as desired.\n```\naws ec2 describe-route-tables --filter \"Name=vpc-id,Values=<vpc_id>\" --query \"RouteTables[*].{RouteTableId:RouteTableId, VpcId:VpcId, Routes:Routes, AssociatedSubnets:Associations[*].SubnetId}\"\n```",
"RemediationProcedure": "Remove and add route table entries to ensure that the least number of subnets or hosts as is required to accomplish the purpose for peering are routable. **From Command Line:** 1. For each _<route\\_table\\_id>_ containing routes non compliant with your routing policy (which grants more than desired \"least access\"), delete the non compliant route: ``` aws ec2 delete-route --route-table-id <route_table_id> --destination-cidr-block <non_compliant_destination_CIDR> ``` 2. Create a new compliant route: ``` aws ec2 create-route --route-table-id <route_table_id> --destination-cidr-block <compliant_destination_CIDR> --vpc-peering-connection-id <peering_connection_id> ```",
"AuditProcedure": "Review routing tables of peered VPCs for whether they route all subnets of each VPC and whether that is necessary to accomplish the intended purposes for peering the VPCs. **From Command Line:** 1. List all the route tables from a VPC and check if \"GatewayId\" is pointing to a _<peering\\_connection\\_id>_ (e.g. pcx-1a2b3c4d) and if \"DestinationCidrBlock\" is as specific as desired. ``` aws ec2 describe-route-tables --filter \"Name=vpc-id,Values=<vpc_id>\" --query \"RouteTables[*].{RouteTableId:RouteTableId, VpcId:VpcId, Routes:Routes, AssociatedSubnets:Associations[*].SubnetId}\" ```",
"AdditionalInformation": "If an organization has AWS transit gateway implemented in their VPC architecture they should look to apply the recommendation above for \"least access\" routing architecture at the AWS transit gateway level in combination with what must be implemented at the standard VPC route table. More specifically, to route traffic between two or more VPCs via a transit gateway VPCs must have an attachment to a transit gateway route table as well as a route, therefore to avoid routing traffic between VPCs an attachment to the transit gateway route table should only be added where there is an intention to route traffic between the VPCs. As transit gateways are able to host multiple route tables it is possible to group VPCs by attaching them to a common route table.",
"References": "https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/peering-configurations-partial-access.html:https://docs.aws.amazon.com/cli/latest/reference/ec2/create-vpc-peering-connection.html"
}

View File

@@ -15,11 +15,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization.\n\nAn AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.",
"Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.",
"RationaleStatement": "If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question, so it is in both the customers' and AWS' best interests that prompt contact can be established. This is best achieved by setting AWS account contact details to point to resources which have multiple individuals as recipients, such as email aliases and PABX hunt groups.",
"ImpactStatement": "",
"RemediationProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing ).\n\n1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/.\n2. On the navigation bar, choose your account name, and then choose `My Account`.\n3. On the `Account Settings` page, next to `Account Settings`, choose `Edit`.\n4. Next to the field that you need to update, choose `Edit`.\n5. After you have entered your changes, choose `Save changes`.\n6. After you have made your changes, choose `Done`.\n7. To edit your contact information, under `Contact Information`, choose `Edit`.\n8. For the fields that you want to change, type your updated information, and then choose `Update`.",
"AuditProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing )\n\n1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/.\n2. On the navigation bar, choose your account name, and then choose `My Account`.\n3. On the `Account Settings` page, review and verify the current details.\n4. Under `Contact Information`, review and verify the current details.",
"RemediationProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing ). 1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose `My Account`. 3. On the `Account Settings` page, next to `Account Settings`, choose `Edit`. 4. Next to the field that you need to update, choose `Edit`. 5. After you have entered your changes, choose `Save changes`. 6. After you have made your changes, choose `Done`. 7. To edit your contact information, under `Contact Information`, choose `Edit`. 8. For the fields that you want to change, type your updated information, and then choose `Update`.",
"AuditProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing ) 1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose `My Account`. 3. On the `Account Settings` page, review and verify the current details. 4. Under `Contact Information`, review and verify the current details.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/manage-account-payment.html#contact-info"
}
@@ -39,9 +39,9 @@
"Description": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.",
"RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that displays a time-sensitive key and have knowledge of a credential.",
"ImpactStatement": "AWS will soon end support for SMS multi-factor authentication (MFA). New customers are not allowed to use this feature. We recommend that existing customers switch to one of the following alternative methods of MFA.",
"RemediationProcedure": "Perform the following to enable MFA:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at 'https://console.aws.amazon.com/iam/'\n2. In the left pane, select `Users`.\n3. In the `User Name` list, choose the name of the intended MFA user.\n4. Choose the `Security Credentials` tab, and then choose `Manage MFA Device`.\n5. In the `Manage MFA Device wizard`, choose `Virtual MFA` device, and then choose `Continue`.\n\n IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes.\n\n6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see Virtual MFA Applications at https://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications). If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device).\n7. Determine whether the MFA app supports QR codes, and then do one of the following:\n\n - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.\n - In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application.\n\n When you are finished, the virtual MFA device starts generating one-time passwords.\n\n8. In the `Manage MFA Device wizard`, in the `MFA Code 1 box`, type the `one-time password` that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second `one-time password` into the `MFA Code 2 box`.\n\n9. Click `Assign MFA`.",
"AuditProcedure": "Perform the following to determine if a MFA device is enabled for all IAM users having a console password:\n\n**From Console:**\n\n1. Open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. In the left pane, select `Users` \n3. If the `MFA` or `Password age` columns are not visible in the table, click the gear icon at the upper right corner of the table and ensure a checkmark is next to both, then click `Close`.\n4. Ensure that for each user where the `Password age` column shows a password age, the `MFA` column shows `Virtual`, `U2F Security Key`, or `Hardware`.\n\n**From Command Line:**\n\n1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their password and MFA status:\n```\n aws iam generate-credential-report\n```\n```\n aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,8 \n```\n2. The output of this command will produce a table similar to the following:\n```\n user,password_enabled,mfa_active\n elise,false,false\n brandon,true,true\n rakesh,false,false\n helene,false,false\n paras,true,true\n anitha,false,false \n```\n3. For any column having `password_enabled` set to `true` , ensure `mfa_active` is also set to `true.`",
"AdditionalInformation": "**Forced IAM User Self-Service Remediation**\n\nAmazon has published a pattern that forces users to self-service setup MFA before they have access to their complete permissions set. Until they complete this step, they cannot access their full permissions. This pattern can be used on new AWS accounts. It can also be used on existing accounts - it is recommended users are given instructions and a grace period to accomplish MFA enrollment before active enforcement on existing AWS accounts.",
"RemediationProcedure": "Perform the following to enable MFA: **From Console:** 1. Sign in to the AWS Management Console and open the IAM console at 'https://console.aws.amazon.com/iam/' 2. In the left pane, select `Users`. 3. In the `User Name` list, choose the name of the intended MFA user. 4. Choose the `Security Credentials` tab, and then choose `Manage MFA Device`. 5. In the `Manage MFA Device wizard`, choose `Virtual MFA` device, and then choose `Continue`. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes. 6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see Virtual MFA Applications at https://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications). If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code. - In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application. When you are finished, the virtual MFA device starts generating one-time passwords. 8. In the `Manage MFA Device wizard`, in the `MFA Code 1 box`, type the `one-time password` that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second `one-time password` into the `MFA Code 2 box`. 9. Click `Assign MFA`.",
"AuditProcedure": "Perform the following to determine if a MFA device is enabled for all IAM users having a console password: **From Console:** 1. Open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left pane, select `Users` 3. If the `MFA` or `Password age` columns are not visible in the table, click the gear icon at the upper right corner of the table and ensure a checkmark is next to both, then click `Close`. 4. Ensure that for each user where the `Password age` column shows a password age, the `MFA` column shows `Virtual`, `U2F Security Key`, or `Hardware`. **From Command Line:** 1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their password and MFA status: ``` aws iam generate-credential-report ``` ``` aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,8 ``` 2. The output of this command will produce a table similar to the following: ``` user,password_enabled,mfa_active elise,false,false brandon,true,true rakesh,false,false helene,false,false paras,true,true anitha,false,false ``` 3. For any column having `password_enabled` set to `true` , ensure `mfa_active` is also set to `true.`",
"AdditionalInformation": "**Forced IAM User Self-Service Remediation** Amazon has published a pattern that forces users to self-service setup MFA before they have access to their complete permissions set. Until they complete this step, they cannot access their full permissions. This pattern can be used on new AWS accounts. It can also be used on existing accounts - it is recommended users are given instructions and a grace period to accomplish MFA enrollment before active enforcement on existing AWS accounts.",
"References": "https://tools.ietf.org/html/rfc6238:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#enable-mfa-for-privileged-users:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://blogs.aws.amazon.com/security/post/Tx2SJJYE082KBUK/How-to-Delegate-Management-of-Multi-Factor-Authentication-to-AWS-IAM-Users"
}
]
@@ -57,11 +57,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. \n\nProgrammatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. \n\nAWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.",
"RationaleStatement": "Requiring the additional steps be taken by the user for programmatic access after their profile has been created will give a stronger indication of intent that access keys are [a] necessary for their work and [b] once the access key is established on an account that the keys may be in use somewhere in the organization.\n\n**Note**: Even if it is known the user will need access keys, require them to create the keys themselves or put in a support ticket to have them created as a separate step from user creation.",
"Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. Programmatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. AWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.",
"RationaleStatement": "Requiring the additional steps be taken by the user for programmatic access after their profile has been created will give a stronger indication of intent that access keys are [a] necessary for their work and [b] once the access key is established on an account that the keys may be in use somewhere in the organization. **Note**: Even if it is known the user will need access keys, require them to create the keys themselves or put in a support ticket to have them created as a separate step from user creation.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to delete access keys that do not pass the audit:\n\n**From Console:**\n\n1. Login to the AWS Management Console:\n2. Click `Services` \n3. Click `IAM` \n4. Click on `Users` \n5. Click on `Security Credentials` \n6. As an Administrator \n - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used.\n7. As an IAM User\n - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used.\n\n**From Command Line:**\n```\naws iam delete-access-key --access-key-id <access-key-id-listed> --user-name <users-name>\n```",
"AuditProcedure": "Perform the following to determine if access keys were created upon user creation and are being used and rotated as prescribed:\n\n**From Console:**\n\n1. Login to the AWS Management Console\n2. Click `Services` \n3. Click `IAM` \n4. Click on a User where column `Password age` and `Access key age` is not set to `None`\n5. Click on `Security credentials` Tab\n6. Compare the user 'Creation time` to the Access Key `Created` date.\n6. For any that match, the key was created during initial user setup.\n\n- Keys that were created at the same time as the user profile and do not have a last used date should be deleted. Refer to the remediation below.\n\n**From Command Line:**\n\n1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their access keys utilization:\n```\n aws iam generate-credential-report\n```\n```\n aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,9,11,14,16\n```\n2. The output of this command will produce a table similar to the following:\n```\nuser,password_enabled,access_key_1_active,access_key_1_last_used_date,access_key_2_active,access_key_2_last_used_date\n elise,false,true,2015-04-16T15:14:00+00:00,false,N/A\n brandon,true,true,N/A,false,N/A\n rakesh,false,false,N/A,false,N/A\n helene,false,true,2015-11-18T17:47:00+00:00,false,N/A\n paras,true,true,2016-08-28T12:04:00+00:00,true,2016-03-04T10:11:00+00:00\n anitha,true,true,2016-06-08T11:43:00+00:00,true,N/A \n```\n3. For any user having `password_enabled` set to `true` AND `access_key_last_used_date` set to `N/A` refer to the remediation below.",
"RemediationProcedure": "Perform the following to delete access keys that do not pass the audit: **From Console:** 1. Login to the AWS Management Console: 2. Click `Services` 3. Click `IAM` 4. Click on `Users` 5. Click on `Security Credentials` 6. As an Administrator - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used. 7. As an IAM User - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used. **From Command Line:** ``` aws iam delete-access-key --access-key-id <access-key-id-listed> --user-name <users-name> ```",
"AuditProcedure": "Perform the following to determine if access keys were created upon user creation and are being used and rotated as prescribed: **From Console:** 1. Login to the AWS Management Console 2. Click `Services` 3. Click `IAM` 4. Click on a User where column `Password age` and `Access key age` is not set to `None` 5. Click on `Security credentials` Tab 6. Compare the user 'Creation time` to the Access Key `Created` date. 6. For any that match, the key was created during initial user setup. - Keys that were created at the same time as the user profile and do not have a last used date should be deleted. Refer to the remediation below. **From Command Line:** 1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their access keys utilization: ``` aws iam generate-credential-report ``` ``` aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,9,11,14,16 ``` 2. The output of this command will produce a table similar to the following: ``` user,password_enabled,access_key_1_active,access_key_1_last_used_date,access_key_2_active,access_key_2_last_used_date elise,false,true,2015-04-16T15:14:00+00:00,false,N/A brandon,true,true,N/A,false,N/A rakesh,false,false,N/A,false,N/A helene,false,true,2015-11-18T17:47:00+00:00,false,N/A paras,true,true,2016-08-28T12:04:00+00:00,true,2016-03-04T10:11:00+00:00 anitha,true,true,2016-06-08T11:43:00+00:00,true,N/A ``` 3. For any user having `password_enabled` set to `true` AND `access_key_last_used_date` set to `N/A` refer to the remediation below.",
"AdditionalInformation": "Credential report does not appear to contain \"Key Creation Date\"",
"References": "https://docs.aws.amazon.com/cli/latest/reference/iam/delete-access-key.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html"
}
@@ -71,7 +71,8 @@
"Id": "1.12",
"Description": "Ensure credentials unused for 45 days or greater are disabled",
"Checks": [
"iam_disable_45_days_credentials"
"iam_user_accesskey_unused",
"iam_user_console_access_unused"
],
"Attributes": [
{
@@ -81,8 +82,8 @@
"Description": "AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in 45 or greater days be deactivated or removed.",
"RationaleStatement": "Disabling or removing unnecessary credentials will reduce the window of opportunity for credentials associated with a compromised or abandoned account to be used.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\nPerform the following to manage Unused Password (IAM user console access)\n\n1. Login to the AWS Management Console:\n2. Click `Services` \n3. Click `IAM` \n4. Click on `Users` \n5. Click on `Security Credentials` \n6. Select user whose `Console last sign-in` is greater than 45 days\n7. Click `Security credentials`\n8. In section `Sign-in credentials`, `Console password` click `Manage` \n9. Under Console Access select `Disable`\n10.Click `Apply`\n\nPerform the following to deactivate Access Keys:\n\n1. Login to the AWS Management Console:\n2. Click `Services` \n3. Click `IAM` \n4. Click on `Users` \n5. Click on `Security Credentials` \n6. Select any access keys that are over 45 days old and that have been used and \n - Click on `Make Inactive`\n7. Select any access keys that are over 45 days old and that have not been used and \n - Click the X to `Delete`",
"AuditProcedure": "Perform the following to determine if unused credentials exist:\n\n**From Console:**\n\n1. Login to the AWS Management Console\n2. Click `Services` \n3. Click `IAM`\n4. Click on `Users`\n5. Click the `Settings` (gear) icon.\n6. Select `Console last sign-in`, `Access key last used`, and `Access Key Id`\n7. Click on `Close` \n8. Check and ensure that `Console last sign-in` is less than 45 days ago.\n\n**Note** - `Never` means the user has never logged in.\n\n9. Check and ensure that `Access key age` is less than 45 days and that `Access key last used` does not say `None`\n\nIf the user hasn't signed into the Console in the last 45 days or Access keys are over 45 days old refer to the remediation.\n\n**From Command Line:**\n\n**Download Credential Report:**\n\n1. Run the following commands:\n```\n aws iam generate-credential-report\n\n aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,5,6,9,10,11,14,15,16 | grep -v '^<root_account>'\n```\n\n**Ensure unused credentials do not exist:**\n\n2. For each user having `password_enabled` set to `TRUE` , ensure `password_last_used_date` is less than `45` days ago.\n\n- When `password_enabled` is set to `TRUE` and `password_last_used` is set to `No_Information` , ensure `password_last_changed` is less than 45 days ago.\n\n3. For each user having an `access_key_1_active` or `access_key_2_active` to `TRUE` , ensure the corresponding `access_key_n_last_used_date` is less than `45` days ago.\n\n- When a user having an `access_key_x_active` (where x is 1 or 2) to `TRUE` and corresponding access_key_x_last_used_date is set to `N/A', ensure `access_key_x_last_rotated` is less than 45 days ago.",
"RemediationProcedure": "**From Console:** Perform the following to manage Unused Password (IAM user console access) 1. Login to the AWS Management Console: 2. Click `Services` 3. Click `IAM` 4. Click on `Users` 5. Click on `Security Credentials` 6. Select user whose `Console last sign-in` is greater than 45 days 7. Click `Security credentials` 8. In section `Sign-in credentials`, `Console password` click `Manage` 9. Under Console Access select `Disable` 10.Click `Apply` Perform the following to deactivate Access Keys: 1. Login to the AWS Management Console: 2. Click `Services` 3. Click `IAM` 4. Click on `Users` 5. Click on `Security Credentials` 6. Select any access keys that are over 45 days old and that have been used and - Click on `Make Inactive` 7. Select any access keys that are over 45 days old and that have not been used and - Click the X to `Delete`",
"AuditProcedure": "Perform the following to determine if unused credentials exist: **From Console:** 1. Login to the AWS Management Console 2. Click `Services` 3. Click `IAM` 4. Click on `Users` 5. Click the `Settings` (gear) icon. 6. Select `Console last sign-in`, `Access key last used`, and `Access Key Id` 7. Click on `Close` 8. Check and ensure that `Console last sign-in` is less than 45 days ago. **Note** - `Never` means the user has never logged in. 9. Check and ensure that `Access key age` is less than 45 days and that `Access key last used` does not say `None` If the user hasn't signed into the Console in the last 45 days or Access keys are over 45 days old refer to the remediation. **From Command Line:** **Download Credential Report:** 1. Run the following commands: ``` aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,5,6,9,10,11,14,15,16 | grep -v '^<root_account>' ``` **Ensure unused credentials do not exist:** 2. For each user having `password_enabled` set to `TRUE` , ensure `password_last_used_date` is less than `45` days ago. - When `password_enabled` is set to `TRUE` and `password_last_used` is set to `No_Information` , ensure `password_last_changed` is less than 45 days ago. 3. For each user having an `access_key_1_active` or `access_key_2_active` to `TRUE` , ensure the corresponding `access_key_n_last_used_date` is less than `45` days ago. - When a user having an `access_key_x_active` (where x is 1 or 2) to `TRUE` and corresponding access_key_x_last_used_date is set to `N/A', ensure `access_key_x_last_rotated` is less than 45 days ago.",
"AdditionalInformation": "<root_account> is excluded in the audit since the root account should not be used for day to day business and would likely be unused for more than 45 days.",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#remove-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_admin-change-user.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html"
}
@@ -102,8 +103,8 @@
"Description": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)",
"RationaleStatement": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API. One of the best ways to protect your account is to not allow users to have multiple access keys.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`.\n2. In the left navigation panel, choose `Users`.\n3. Click on the IAM user name that you want to examine.\n4. On the IAM user configuration page, select `Security Credentials` tab.\n5. In `Access Keys` section, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working.\n6. In the same `Access Keys` section, identify your non-operational access keys (other than the chosen one) and deactivate it by clicking the `Make Inactive` link.\n7. If you receive the `Change Key Status` confirmation box, click `Deactivate` to switch off the selected key.\n8. Repeat steps no. 3 7 for each IAM user in your AWS account.\n\n**From Command Line:**\n\n1. Using the IAM user and access key information provided in the `Audit CLI`, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working.\n\n2. Run the `update-access-key` command below using the IAM user name and the non-operational access key IDs to deactivate the unnecessary key(s). Refer to the Audit section to identify the unnecessary access key ID for the selected IAM user\n\n**Note** - the command does not return any output:\n```\naws iam update-access-key --access-key-id <access-key-id> --status Inactive --user-name <user-name>\n```\n3. To confirm that the selected access key pair has been successfully `deactivated` run the `list-access-keys` audit command again for that IAM User:\n```\naws iam list-access-keys --user-name <user-name>\n```\n- The command output should expose the metadata for each access key associated with the IAM user. If the non-operational key pair(s) `Status` is set to `Inactive`, the key has been successfully deactivated and the IAM user access configuration adheres now to this recommendation.\n\n4. Repeat steps no. 1 3 for each IAM user in your AWS account.",
"AuditProcedure": "**From Console:**\n\n1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`.\n2. In the left navigation panel, choose `Users`.\n3. Click on the IAM user name that you want to examine.\n4. On the IAM user configuration page, select `Security Credentials` tab.\n5. Under `Access Keys` section, in the Status column, check the current status for each access key associated with the IAM user. If the selected IAM user has more than one access key activated then the users access configuration does not adhere to security best practices and the risk of accidental exposures increases.\n- Repeat steps no. 3 5 for each IAM user in your AWS account.\n\n**From Command Line:**\n\n1. Run `list-users` command to list all IAM users within your account:\n```\naws iam list-users --query \"Users[*].UserName\"\n```\nThe command output should return an array that contains all your IAM user names.\n\n2. Run `list-access-keys` command using the IAM user name list to return the current status of each access key associated with the selected IAM user:\n```\naws iam list-access-keys --user-name <user-name>\n```\nThe command output should expose the metadata `(\"Username\", \"AccessKeyId\", \"Status\", \"CreateDate\")` for each access key on that user account.\n\n3. Check the `Status` property value for each key returned to determine each keys current state. If the `Status` property value for more than one IAM access key is set to `Active`, the user access configuration does not adhere to this recommendation, refer to the remediation below.\n\n- Repeat steps no. 2 and 3 for each IAM user in your AWS account.",
"RemediationProcedure": "**From Console:** 1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. In `Access Keys` section, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working. 6. In the same `Access Keys` section, identify your non-operational access keys (other than the chosen one) and deactivate it by clicking the `Make Inactive` link. 7. If you receive the `Change Key Status` confirmation box, click `Deactivate` to switch off the selected key. 8. Repeat steps no. 3 7 for each IAM user in your AWS account. **From Command Line:** 1. Using the IAM user and access key information provided in the `Audit CLI`, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working. 2. Run the `update-access-key` command below using the IAM user name and the non-operational access key IDs to deactivate the unnecessary key(s). Refer to the Audit section to identify the unnecessary access key ID for the selected IAM user **Note** - the command does not return any output: ``` aws iam update-access-key --access-key-id <access-key-id> --status Inactive --user-name <user-name> ``` 3. To confirm that the selected access key pair has been successfully `deactivated` run the `list-access-keys` audit command again for that IAM User: ``` aws iam list-access-keys --user-name <user-name> ``` - The command output should expose the metadata for each access key associated with the IAM user. If the non-operational key pair(s) `Status` is set to `Inactive`, the key has been successfully deactivated and the IAM user access configuration adheres now to this recommendation. 4. Repeat steps no. 1 3 for each IAM user in your AWS account.",
"AuditProcedure": "**From Console:** 1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. Under `Access Keys` section, in the Status column, check the current status for each access key associated with the IAM user. If the selected IAM user has more than one access key activated then the users access configuration does not adhere to security best practices and the risk of accidental exposures increases. - Repeat steps no. 3 5 for each IAM user in your AWS account. **From Command Line:** 1. Run `list-users` command to list all IAM users within your account: ``` aws iam list-users --query \"Users[*].UserName\" ``` The command output should return an array that contains all your IAM user names. 2. Run `list-access-keys` command using the IAM user name list to return the current status of each access key associated with the selected IAM user: ``` aws iam list-access-keys --user-name <user-name> ``` The command output should expose the metadata `(\"Username\", \"AccessKeyId\", \"Status\", \"CreateDate\")` for each access key on that user account. 3. Check the `Status` property value for each key returned to determine each keys current state. If the `Status` property value for more than one IAM access key is set to `Active`, the user access configuration does not adhere to this recommendation, refer to the remediation below. - Repeat steps no. 2 and 3 for each IAM user in your AWS account.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html"
}
@@ -121,10 +122,10 @@
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.",
"RationaleStatement": "Rotating access keys will reduce the window of opportunity for an access key that is associated with a compromised or terminated account to be used.\n\nAccess keys should be rotated to ensure that data cannot be accessed with an old key which might have been lost, cracked, or stolen.",
"RationaleStatement": "Rotating access keys will reduce the window of opportunity for an access key that is associated with a compromised or terminated account to be used. Access keys should be rotated to ensure that data cannot be accessed with an old key which might have been lost, cracked, or stolen.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to rotate access keys:\n\n**From Console:**\n\n1. Go to Management Console (https://console.aws.amazon.com/iam)\n2. Click on `Users`\n3. Click on `Security Credentials` \n4. As an Administrator \n - Click on `Make Inactive` for keys that have not been rotated in `90` Days\n5. As an IAM User\n - Click on `Make Inactive` or `Delete` for keys which have not been rotated or used in `90` Days\n6. Click on `Create Access Key` \n7. Update programmatic call with new Access Key credentials\n\n**From Command Line:**\n\n1. While the first access key is still active, create a second access key, which is active by default. Run the following command:\n```\naws iam create-access-key\n```\n\nAt this point, the user has two active access keys.\n\n2. Update all applications and tools to use the new access key.\n3. Determine whether the first access key is still in use by using this command:\n```\naws iam get-access-key-last-used\n```\n4. One approach is to wait several days and then check the old access key for any use before proceeding.\n\nEven if step Step 3 indicates no use of the old key, it is recommended that you do not immediately delete the first access key. Instead, change the state of the first access key to Inactive using this command:\n```\naws iam update-access-key\n```\n5. Use only the new access key to confirm that your applications are working. Any applications and tools that still use the original access key will stop working at this point because they no longer have access to AWS resources. If you find such an application or tool, you can switch its state back to Active to reenable the first access key. Then return to step Step 2 and update this application to use the new key.\n\n6. After you wait some period of time to ensure that all applications and tools have been updated, you can delete the first access key with this command:\n```\naws iam delete-access-key\n```",
"AuditProcedure": "Perform the following to determine if access keys are rotated as prescribed:\n\n**From Console:**\n\n1. Go to Management Console (https://console.aws.amazon.com/iam)\n2. Click on `Users`\n3. Click `setting` icon\n4. Select `Console last sign-in`\n5. Click `Close`\n6. Ensure that `Access key age` is less than 90 days ago. note) `None` in the `Access key age` means the user has not used the access key.\n\n**From Command Line:**\n\n```\naws iam generate-credential-report\naws iam get-credential-report --query 'Content' --output text | base64 -d\n```\nThe `access_key_1_last_rotated` field in this file notes The date and time, in ISO 8601 date-time format, when the user's access key was created or last changed. If the user does not have an active access key, the value in this field is N/A (not applicable).",
"RemediationProcedure": "Perform the following to rotate access keys: **From Console:** 1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click on `Security Credentials` 4. As an Administrator - Click on `Make Inactive` for keys that have not been rotated in `90` Days 5. As an IAM User - Click on `Make Inactive` or `Delete` for keys which have not been rotated or used in `90` Days 6. Click on `Create Access Key` 7. Update programmatic call with new Access Key credentials **From Command Line:** 1. While the first access key is still active, create a second access key, which is active by default. Run the following command: ``` aws iam create-access-key ``` At this point, the user has two active access keys. 2. Update all applications and tools to use the new access key. 3. Determine whether the first access key is still in use by using this command: ``` aws iam get-access-key-last-used ``` 4. One approach is to wait several days and then check the old access key for any use before proceeding. Even if step Step 3 indicates no use of the old key, it is recommended that you do not immediately delete the first access key. Instead, change the state of the first access key to Inactive using this command: ``` aws iam update-access-key ``` 5. Use only the new access key to confirm that your applications are working. Any applications and tools that still use the original access key will stop working at this point because they no longer have access to AWS resources. If you find such an application or tool, you can switch its state back to Active to reenable the first access key. Then return to step Step 2 and update this application to use the new key. 6. After you wait some period of time to ensure that all applications and tools have been updated, you can delete the first access key with this command: ``` aws iam delete-access-key ```",
"AuditProcedure": "Perform the following to determine if access keys are rotated as prescribed: **From Console:** 1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click `setting` icon 4. Select `Console last sign-in` 5. Click `Close` 6. Ensure that `Access key age` is less than 90 days ago. note) `None` in the `Access key age` means the user has not used the access key. **From Command Line:** ``` aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d ``` The `access_key_1_last_rotated` field in this file notes The date and time, in ISO 8601 date-time format, when the user's access key was created or last changed. If the user does not have an active access key, the value in this field is N/A (not applicable).",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#rotate-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html"
}
@@ -141,11 +142,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. \n\nOnly the third implementation is recommended.",
"Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. Only the third implementation is recommended.",
"RationaleStatement": "Assigning IAM policy only through groups unifies permissions management to a single, flexible layer consistent with organizational functional roles. By unifying permissions management, the likelihood of excessive permissions is reduced.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to create an IAM group and assign a policy to it:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. In the navigation pane, click `Groups` and then click `Create New Group` .\n3. In the `Group Name` box, type the name of the group and then click `Next Step` .\n4. In the list of policies, select the check box for each policy that you want to apply to all members of the group. Then click `Next Step` .\n5. Click `Create Group` \n\nPerform the following to add a user to a given group:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. In the navigation pane, click `Groups` \n3. Select the group to add a user to\n4. Click `Add Users To Group` \n5. Select the users to be added to the group\n6. Click `Add Users` \n\nPerform the following to remove a direct association between a user and policy:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. In the left navigation pane, click on Users\n3. For each user:\n - Select the user\n - Click on the `Permissions` tab\n - Expand `Permissions policies` \n - Click `X` for each policy; then click Detach or Remove (depending on policy type)",
"AuditProcedure": "Perform the following to determine if an inline policy is set or a policy is directly attached to users:\n\n1. Run the following to get a list of IAM users:\n```\n aws iam list-users --query 'Users[*].UserName' --output text \n```\n2. For each user returned, run the following command to determine if any policies are attached to them:\n```\n aws iam list-attached-user-policies --user-name <iam_user>\n aws iam list-user-policies --user-name <iam_user> \n```\n3. If any policies are returned, the user has an inline policy or direct policy attachment.",
"RemediationProcedure": "Perform the following to create an IAM group and assign a policy to it: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups` and then click `Create New Group` . 3. In the `Group Name` box, type the name of the group and then click `Next Step` . 4. In the list of policies, select the check box for each policy that you want to apply to all members of the group. Then click `Next Step` . 5. Click `Create Group` Perform the following to add a user to a given group: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups` 3. Select the group to add a user to 4. Click `Add Users To Group` 5. Select the users to be added to the group 6. Click `Add Users` Perform the following to remove a direct association between a user and policy: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left navigation pane, click on Users 3. For each user: - Select the user - Click on the `Permissions` tab - Expand `Permissions policies` - Click `X` for each policy; then click Detach or Remove (depending on policy type)",
"AuditProcedure": "Perform the following to determine if an inline policy is set or a policy is directly attached to users: 1. Run the following to get a list of IAM users: ``` aws iam list-users --query 'Users[*].UserName' --output text ``` 2. For each user returned, run the following command to determine if any policies are attached to them: ``` aws iam list-attached-user-policies --user-name <iam_user> aws iam list-user-policies --user-name <iam_user> ``` 3. If any policies are returned, the user has an inline policy or direct policy attachment.",
"AdditionalInformation": "",
"References": "http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html"
}
@@ -155,7 +156,8 @@
"Id": "1.16",
"Description": "Ensure IAM policies that allow full \"*:*\" administrative privileges are not attached",
"Checks": [
"iam_policy_no_administrative_privileges"
"iam_aws_attached_policy_no_administrative_privileges",
"iam_customer_attached_policy_no_administrative_privileges"
],
"Attributes": [
{
@@ -163,10 +165,10 @@
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered a standard security advice to grant _least privilege_ -that is, granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform _only_ those tasks, instead of allowing full administrative privileges.",
"RationaleStatement": "It's more secure to start with a minimum set of permissions and grant additional permissions as necessary, rather than starting with permissions that are too lenient and then trying to tighten them later.\n\nProviding full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions.\n\nIAM policies that have a statement with \"Effect\": \"Allow\" with \"Action\": \"\\*\" over \"Resource\": \"\\*\" should be removed.",
"RationaleStatement": "It's more secure to start with a minimum set of permissions and grant additional permissions as necessary, rather than starting with permissions that are too lenient and then trying to tighten them later. Providing full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions. IAM policies that have a statement with \"Effect\": \"Allow\" with \"Action\": \"\\*\" over \"Resource\": \"\\*\" should be removed.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\nPerform the following to detach the policy that has full administrative privileges:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. In the navigation pane, click Policies and then search for the policy name found in the audit step.\n3. Select the policy that needs to be deleted.\n4. In the policy action menu, select first `Detach` \n5. Select all Users, Groups, Roles that have this policy attached\n6. Click `Detach Policy` \n7. In the policy action menu, select `Detach` \n\n**From Command Line:**\n\nPerform the following to detach the policy that has full administrative privileges as found in the audit step:\n\n1. Lists all IAM users, groups, and roles that the specified managed policy is attached to.\n\n```\n aws iam list-entities-for-policy --policy-arn <policy_arn>\n```\n2. Detach the policy from all IAM Users:\n```\n aws iam detach-user-policy --user-name <iam_user> --policy-arn <policy_arn>\n```\n3. Detach the policy from all IAM Groups:\n```\n aws iam detach-group-policy --group-name <iam_group> --policy-arn <policy_arn>\n```\n4. Detach the policy from all IAM Roles:\n```\n aws iam detach-role-policy --role-name <iam_role> --policy-arn <policy_arn>\n```",
"AuditProcedure": "Perform the following to determine what policies are created:\n\n**From Command Line:**\n\n1. Run the following to get a list of IAM policies:\n```\n aws iam list-policies --only-attached --output text\n```\n2. For each policy returned, run the following command to determine if any policies is allowing full administrative privileges on the account:\n```\n aws iam get-policy-version --policy-arn <policy_arn> --version-id <version>\n```\n3. In output ensure policy should not have any Statement block with `\"Effect\": \"Allow\"` and `Action` set to `\"*\"` and `Resource` set to `\"*\"`",
"RemediationProcedure": "**From Console:** Perform the following to detach the policy that has full administrative privileges: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click Policies and then search for the policy name found in the audit step. 3. Select the policy that needs to be deleted. 4. In the policy action menu, select first `Detach` 5. Select all Users, Groups, Roles that have this policy attached 6. Click `Detach Policy` 7. In the policy action menu, select `Detach` **From Command Line:** Perform the following to detach the policy that has full administrative privileges as found in the audit step: 1. Lists all IAM users, groups, and roles that the specified managed policy is attached to. ``` aws iam list-entities-for-policy --policy-arn <policy_arn> ``` 2. Detach the policy from all IAM Users: ``` aws iam detach-user-policy --user-name <iam_user> --policy-arn <policy_arn> ``` 3. Detach the policy from all IAM Groups: ``` aws iam detach-group-policy --group-name <iam_group> --policy-arn <policy_arn> ``` 4. Detach the policy from all IAM Roles: ``` aws iam detach-role-policy --role-name <iam_role> --policy-arn <policy_arn> ```",
"AuditProcedure": "Perform the following to determine what policies are created: **From Command Line:** 1. Run the following to get a list of IAM policies: ``` aws iam list-policies --only-attached --output text ``` 2. For each policy returned, run the following command to determine if any policies is allowing full administrative privileges on the account: ``` aws iam get-policy-version --policy-arn <policy_arn> --version-id <version> ``` 3. In output ensure policy should not have any Statement block with `\"Effect\": \"Allow\"` and `Action` set to `\"*\"` and `Resource` set to `\"*\"`",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://docs.aws.amazon.com/cli/latest/reference/iam/index.html#cli-aws-iam"
}
@@ -186,8 +188,8 @@
"Description": "AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.",
"RationaleStatement": "By implementing least privilege for access control, an IAM Role will require an appropriate IAM Policy to allow Support Center Access in order to manage Incidents with AWS Support.",
"ImpactStatement": "All AWS Support plans include an unlimited number of account and billing support cases, with no long-term contracts. Support billing calculations are performed on a per-account basis for all plans. Enterprise Support plan customers have the option to include multiple enabled accounts in an aggregated monthly billing calculation. Monthly charges for the Business and Enterprise support plans are based on each month's AWS usage charges, subject to a monthly minimum, billed in advance.",
"RemediationProcedure": "**From Command Line:**\n\n1. Create an IAM role for managing incidents with AWS:\n - Create a trust relationship policy document that allows <iam_user> to manage AWS incidents, and save it locally as /tmp/TrustPolicy.json:\n```\n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"AWS\": \"<iam_user>\"\n },\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n }\n```\n2. Create the IAM role using the above trust policy:\n```\naws iam create-role --role-name <aws_support_iam_role> --assume-role-policy-document file:///tmp/TrustPolicy.json\n```\n3. Attach 'AWSSupportAccess' managed policy to the created IAM role:\n```\naws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess --role-name <aws_support_iam_role>\n```",
"AuditProcedure": "**From Command Line:**\n\n1. List IAM policies, filter for the 'AWSSupportAccess' managed policy, and note the \"Arn\" element value:\n```\naws iam list-policies --query \"Policies[?PolicyName == 'AWSSupportAccess']\"\n```\n2. Check if the 'AWSSupportAccess' policy is attached to any role:\n\n```\naws iam list-entities-for-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess\n```\n\n3. In Output, Ensure `PolicyRoles` does not return empty. 'Example: Example: PolicyRoles: [ ]'\n\nIf it returns empty refer to the remediation below.",
"RemediationProcedure": "**From Command Line:** 1. Create an IAM role for managing incidents with AWS: - Create a trust relationship policy document that allows <iam_user> to manage AWS incidents, and save it locally as /tmp/TrustPolicy.json: ``` { \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Principal\": { \"AWS\": \"<iam_user>\" }, \"Action\": \"sts:AssumeRole\" } ] } ``` 2. Create the IAM role using the above trust policy: ``` aws iam create-role --role-name <aws_support_iam_role> --assume-role-policy-document file:///tmp/TrustPolicy.json ``` 3. Attach 'AWSSupportAccess' managed policy to the created IAM role: ``` aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess --role-name <aws_support_iam_role> ```",
"AuditProcedure": "**From Command Line:** 1. List IAM policies, filter for the 'AWSSupportAccess' managed policy, and note the \"Arn\" element value: ``` aws iam list-policies --query \"Policies[?PolicyName == 'AWSSupportAccess']\" ``` 2. Check if the 'AWSSupportAccess' policy is attached to any role: ``` aws iam list-entities-for-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess ``` 3. In Output, Ensure `PolicyRoles` does not return empty. 'Example: Example: PolicyRoles: [ ]' If it returns empty refer to the remediation below.",
"AdditionalInformation": "AWSSupportAccess policy is a global AWS resource. It has same ARN as `arn:aws:iam::aws:policy/AWSSupportAccess` for every account.",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://aws.amazon.com/premiumsupport/pricing/:https://docs.aws.amazon.com/cli/latest/reference/iam/list-policies.html:https://docs.aws.amazon.com/cli/latest/reference/iam/attach-role-policy.html:https://docs.aws.amazon.com/cli/latest/reference/iam/list-entities-for-policy.html"
}
@@ -205,10 +207,10 @@
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. \"AWS Access\" means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.",
"RationaleStatement": "AWS IAM roles reduce the risks associated with sharing and rotating credentials that can be used outside of AWS itself. If credentials are compromised, they can be used from outside of the AWS account they give access to. In contrast, in order to leverage role permissions an attacker would need to gain and maintain access to a specific instance to use the privileges associated with it.\n\nAdditionally, if credentials are encoded into compiled applications or other hard to change mechanisms, then they are even more unlikely to be properly rotated due to service disruption risks. As time goes on, credentials that cannot be rotated are more likely to be known by an increasing number of individuals who no longer work for the organization owning the credentials.",
"RationaleStatement": "AWS IAM roles reduce the risks associated with sharing and rotating credentials that can be used outside of AWS itself. If credentials are compromised, they can be used from outside of the AWS account they give access to. In contrast, in order to leverage role permissions an attacker would need to gain and maintain access to a specific instance to use the privileges associated with it. Additionally, if credentials are encoded into compiled applications or other hard to change mechanisms, then they are even more unlikely to be properly rotated due to service disruption risks. As time goes on, credentials that cannot be rotated are more likely to be known by an increasing number of individuals who no longer work for the organization owning the credentials.",
"ImpactStatement": "",
"RemediationProcedure": "IAM roles can only be associated at the launch of an instance. To remediate an instance to add it to a role you must create a new instance.\n\nIf the instance has no external dependencies on its current private ip or public addresses are elastic IPs:\n\n1. In AWS IAM create a new role. Assign a permissions policy if needed permissions are already known.\n2. In the AWS console launch a new instance with identical settings to the existing instance, and ensure that the newly created role is selected.\n3. Shutdown both the existing instance and the new instance.\n4. Detach disks from both instances.\n5. Attach the existing instance disks to the new instance.\n6. Boot the new instance and you should have the same machine, but with the associated role.\n\n**Note:** if your environment has dependencies on a dynamically assigned PRIVATE IP address you can create an AMI from the existing instance, destroy the old one and then when launching from the AMI, manually assign the previous private IP address.\n\n**Note: **if your environment has dependencies on a dynamically assigned PUBLIC IP address there is not a way ensure the address is retained and assign an instance role. Dependencies on dynamically assigned public IP addresses are a bad practice and, if possible, you may wish to rebuild the instance with a new elastic IP address and make the investment to remediate affected systems while assigning the system to a role.",
"AuditProcedure": "Where an instance is associated with a Role:\n\nFor instances that are known to perform AWS actions, ensure that they belong to an instance role that has the necessary permissions:\n\n1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings)\n2. Open the EC2 Dashboard and choose \"Instances\"\n3. Click the EC2 instance that performs AWS actions, in the lower pane details find \"IAM Role\"\n4. If the Role is blank, the instance is not assigned to one.\n5. If the Role is filled in, it does not mean the instance might not \\*also\\* have credentials encoded on it for some activities.\n\nWhere an Instance Contains Embedded Credentials:\n\n- On the instance that is known to perform AWS actions, audit all scripts and environment variables to ensure that none of them contain AWS credentials.\n\nWhere an Instance Application Contains Embedded Credentials:\n\n- Applications that run on an instance may also have credentials embedded. This is a bad practice, but even worse if the source code is stored in a public code repository such as github. When an application contains credentials can be determined by eliminating all other sources of credentials and if the application can still access AWS resources - it likely contains embedded credentials. Another method is to examine all source code and configuration files of the application.",
"RemediationProcedure": "IAM roles can only be associated at the launch of an instance. To remediate an instance to add it to a role you must create a new instance. If the instance has no external dependencies on its current private ip or public addresses are elastic IPs: 1. In AWS IAM create a new role. Assign a permissions policy if needed permissions are already known. 2. In the AWS console launch a new instance with identical settings to the existing instance, and ensure that the newly created role is selected. 3. Shutdown both the existing instance and the new instance. 4. Detach disks from both instances. 5. Attach the existing instance disks to the new instance. 6. Boot the new instance and you should have the same machine, but with the associated role. **Note:** if your environment has dependencies on a dynamically assigned PRIVATE IP address you can create an AMI from the existing instance, destroy the old one and then when launching from the AMI, manually assign the previous private IP address. **Note: **if your environment has dependencies on a dynamically assigned PUBLIC IP address there is not a way ensure the address is retained and assign an instance role. Dependencies on dynamically assigned public IP addresses are a bad practice and, if possible, you may wish to rebuild the instance with a new elastic IP address and make the investment to remediate affected systems while assigning the system to a role.",
"AuditProcedure": "Where an instance is associated with a Role: For instances that are known to perform AWS actions, ensure that they belong to an instance role that has the necessary permissions: 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Open the EC2 Dashboard and choose \"Instances\" 3. Click the EC2 instance that performs AWS actions, in the lower pane details find \"IAM Role\" 4. If the Role is blank, the instance is not assigned to one. 5. If the Role is filled in, it does not mean the instance might not \\*also\\* have credentials encoded on it for some activities. Where an Instance Contains Embedded Credentials: - On the instance that is known to perform AWS actions, audit all scripts and environment variables to ensure that none of them contain AWS credentials. Where an Instance Application Contains Embedded Credentials: - Applications that run on an instance may also have credentials embedded. This is a bad practice, but even worse if the source code is stored in a public code repository such as github. When an application contains credentials can be determined by eliminating all other sources of credentials and if the application can still access AWS resources - it likely contains embedded credentials. Another method is to examine all source code and configuration files of the application.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html"
}
@@ -225,11 +227,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates. \nUse IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.",
"Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates. Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.",
"RationaleStatement": "Removing expired SSL/TLS certificates eliminates the risk that an invalid certificate will be deployed accidentally to a resource such as AWS Elastic Load Balancer (ELB), which can damage the credibility of the application/website behind the ELB. As a best practice, it is recommended to delete expired certificates.",
"ImpactStatement": "Deleting the certificate could have implications for your application if you are using an expired server certificate with Elastic Load Balancing, CloudFront, etc.\nOne has to make configurations at respective services to ensure there is no interruption in application functionality.",
"RemediationProcedure": "**From Console:**\n\nRemoving expired certificates via AWS Management Console is not currently supported. To delete SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).\n\n**From Command Line:**\n\nTo delete Expired Certificate run following command by replacing <CERTIFICATE_NAME> with the name of the certificate to delete:\n\n```\naws iam delete-server-certificate --server-certificate-name <CERTIFICATE_NAME>\n```\n\nWhen the preceding command is successful, it does not return any output.",
"AuditProcedure": "**From Console:**\n\nGetting the certificates expiration information via AWS Management Console is not currently supported. \nTo request information about the SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).\n\n**From Command Line:**\n\nRun list-server-certificates command to list all the IAM-stored server certificates:\n\n```\naws iam list-server-certificates\n```\n\nThe command output should return an array that contains all the SSL/TLS certificates currently stored in IAM and their metadata (name, ID, expiration date, etc):\n\n```\n{\n \"ServerCertificateMetadataList\": [\n {\n \"ServerCertificateId\": \"EHDGFRW7EJFYTE88D\",\n \"ServerCertificateName\": \"MyServerCertificate\",\n \"Expiration\": \"2018-07-10T23:59:59Z\",\n \"Path\": \"/\",\n \"Arn\": \"arn:aws:iam::012345678910:server-certificate/MySSLCertificate\",\n \"UploadDate\": \"2018-06-10T11:56:08Z\"\n }\n ]\n}\n```\n\nVerify the `ServerCertificateName` and `Expiration` parameter value (expiration date) for each SSL/TLS certificate returned by the list-server-certificates command and determine if there are any expired server certificates currently stored in AWS IAM. If so, use the AWS API to remove them.\n\nIf this command returns:\n```\n{ { \"ServerCertificateMetadataList\": [] }\n```\nThis means that there are no expired certificates, It DOES NOT mean that no certificates exist.",
"ImpactStatement": "Deleting the certificate could have implications for your application if you are using an expired server certificate with Elastic Load Balancing, CloudFront, etc. One has to make configurations at respective services to ensure there is no interruption in application functionality.",
"RemediationProcedure": "**From Console:** Removing expired certificates via AWS Management Console is not currently supported. To delete SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI). **From Command Line:** To delete Expired Certificate run following command by replacing <CERTIFICATE_NAME> with the name of the certificate to delete: ``` aws iam delete-server-certificate --server-certificate-name <CERTIFICATE_NAME> ``` When the preceding command is successful, it does not return any output.",
"AuditProcedure": "**From Console:** Getting the certificates expiration information via AWS Management Console is not currently supported. To request information about the SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI). **From Command Line:** Run list-server-certificates command to list all the IAM-stored server certificates: ``` aws iam list-server-certificates ``` The command output should return an array that contains all the SSL/TLS certificates currently stored in IAM and their metadata (name, ID, expiration date, etc): ``` { \"ServerCertificateMetadataList\": [ { \"ServerCertificateId\": \"EHDGFRW7EJFYTE88D\", \"ServerCertificateName\": \"MyServerCertificate\", \"Expiration\": \"2018-07-10T23:59:59Z\", \"Path\": \"/\", \"Arn\": \"arn:aws:iam::012345678910:server-certificate/MySSLCertificate\", \"UploadDate\": \"2018-06-10T11:56:08Z\" } ] } ``` Verify the `ServerCertificateName` and `Expiration` parameter value (expiration date) for each SSL/TLS certificate returned by the list-server-certificates command and determine if there are any expired server certificates currently stored in AWS IAM. If so, use the AWS API to remove them. If this command returns: ``` { { \"ServerCertificateMetadataList\": [] } ``` This means that there are no expired certificates, It DOES NOT mean that no certificates exist.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html:https://docs.aws.amazon.com/cli/latest/reference/iam/delete-server-certificate.html"
}
@@ -249,8 +251,8 @@
"Description": "AWS provides customers with the option of specifying the contact information for account's security team. It is recommended that this information be provided.",
"RationaleStatement": "Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to establish security contact information:\n\n**From Console:**\n\n1. Click on your account name at the top right corner of the console.\n2. From the drop-down menu Click `My Account` \n3. Scroll down to the `Alternate Contacts` section\n4. Enter contact information in the `Security` section\n\n**Note:** Consider specifying an internal email distribution list to ensure emails are regularly monitored by more than one individual.",
"AuditProcedure": "Perform the following to determine if security contact information is present:\n\n**From Console:**\n\n1. Click on your account name at the top right corner of the console\n2. From the drop-down menu Click `My Account` \n3. Scroll down to the `Alternate Contacts` section\n4. Ensure contact information is specified in the `Security` section",
"RemediationProcedure": "Perform the following to establish security contact information: **From Console:** 1. Click on your account name at the top right corner of the console. 2. From the drop-down menu Click `My Account` 3. Scroll down to the `Alternate Contacts` section 4. Enter contact information in the `Security` section **Note:** Consider specifying an internal email distribution list to ensure emails are regularly monitored by more than one individual.",
"AuditProcedure": "Perform the following to determine if security contact information is present: **From Console:** 1. Click on your account name at the top right corner of the console 2. From the drop-down menu Click `My Account` 3. Scroll down to the `Alternate Contacts` section 4. Ensure contact information is specified in the `Security` section",
"AdditionalInformation": "",
"References": ""
}
@@ -267,11 +269,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable IAM Access analyzer for IAM policies about all resources in each region.\n\nIAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access.\nAccess Analyzer analyzes only policies that are applied to resources in the same AWS Region.",
"Description": "Enable IAM Access analyzer for IAM policies about all resources in each region. IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access. Access Analyzer analyzes only policies that are applied to resources in the same AWS Region.",
"RationaleStatement": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data. Access Analyzer identifies resources that are shared with external principals by using logic-based reasoning to analyze the resource-based policies in your AWS environment. IAM Access Analyzer continuously monitors all policies for S3 bucket, IAM roles, KMS(Key Management Service) keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\nPerform the following to enable IAM Access analyzer for IAM policies:\n\n1. Open the IAM console at `https://console.aws.amazon.com/iam/.`\n2. Choose `Access analyzer`.\n3. Choose `Create analyzer`.\n4. On the `Create analyzer` page, confirm that the `Region` displayed is the Region where you want to enable Access Analyzer.\n5. Enter a name for the analyzer. `Optional as it will generate a name for you automatically`.\n6. Add any tags that you want to apply to the analyzer. `Optional`. \n7. Choose `Create Analyzer`.\n8. Repeat these step for each active region\n\n**From Command Line:**\n\nRun the following command:\n```\naws accessanalyzer create-analyzer --analyzer-name <NAME> --type <ACCOUNT|ORGANIZATION>\n```\nRepeat this command above for each active region.\n\n**Note:** The IAM Access Analyzer is successfully configured only when the account you use has the necessary permissions.",
"AuditProcedure": "**From Console:**\n\n1. Open the IAM console at `https://console.aws.amazon.com/iam/`\n2. Choose `Access analyzer`\n3. Click 'Analyzers'\n4. Ensure that at least one analyzer is present\n5. Ensure that the `STATUS` is set to `Active`\n6. Repeat these step for each active region\n\n**From Command Line:**\n\n1. Run the following command:\n```\naws accessanalyzer list-analyzers | grep status\n```\n2. Ensure that at least one Analyzer the `status` is set to `ACTIVE`\n\n3. Repeat the steps above for each active region.\n\nIf an Access analyzer is not listed for each region or the status is not set to active refer to the remediation procedure below.",
"RemediationProcedure": "**From Console:** Perform the following to enable IAM Access analyzer for IAM policies: 1. Open the IAM console at `https://console.aws.amazon.com/iam/.` 2. Choose `Access analyzer`. 3. Choose `Create analyzer`. 4. On the `Create analyzer` page, confirm that the `Region` displayed is the Region where you want to enable Access Analyzer. 5. Enter a name for the analyzer. `Optional as it will generate a name for you automatically`. 6. Add any tags that you want to apply to the analyzer. `Optional`. 7. Choose `Create Analyzer`. 8. Repeat these step for each active region **From Command Line:** Run the following command: ``` aws accessanalyzer create-analyzer --analyzer-name <NAME> --type <ACCOUNT|ORGANIZATION> ``` Repeat this command above for each active region. **Note:** The IAM Access Analyzer is successfully configured only when the account you use has the necessary permissions.",
"AuditProcedure": "**From Console:** 1. Open the IAM console at `https://console.aws.amazon.com/iam/` 2. Choose `Access analyzer` 3. Click 'Analyzers' 4. Ensure that at least one analyzer is present 5. Ensure that the `STATUS` is set to `Active` 6. Repeat these step for each active region **From Command Line:** 1. Run the following command: ``` aws accessanalyzer list-analyzers | grep status ``` 2. Ensure that at least one Analyzer the `status` is set to `ACTIVE` 3. Repeat the steps above for each active region. If an Access analyzer is not listed for each region or the status is not set to active refer to the remediation procedure below.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/get-analyzer.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/create-analyzer.html"
}
@@ -292,7 +294,7 @@
"RationaleStatement": "Centralizing IAM user management to a single identity store reduces complexity and thus the likelihood of access management errors.",
"ImpactStatement": "",
"RemediationProcedure": "The remediation procedure will vary based on the individual organization's implementation of identity federation and/or AWS Organizations with the acceptance criteria that no non-service IAM users, and non-root accounts, are present outside the account providing centralized IAM user management.",
"AuditProcedure": "For multi-account AWS environments with an external identity provider... \n\n1. Determine the master account for identity federation or IAM user management\n2. Login to that account through the AWS Management Console\n3. Click `Services` \n4. Click `IAM` \n5. Click `Identity providers`\n6. Verify the configuration\n\nThen..., determine all accounts that should not have local users present. For each account...\n\n1. Determine all accounts that should not have local users present\n2. Log into the AWS Management Console\n3. Switch role into each identified account\n4. Click `Services` \n5. Click `IAM` \n6. Click `Users`\n7. Confirm that no IAM users representing individuals are present\n\nFor multi-account AWS environments implementing AWS Organizations without an external identity provider... \n\n1. Determine all accounts that should not have local users present\n2. Log into the AWS Management Console\n3. Switch role into each identified account\n4. Click `Services` \n5. Click `IAM` \n6. Click `Users`\n7. Confirm that no IAM users representing individuals are present",
"AuditProcedure": "For multi-account AWS environments with an external identity provider... 1. Determine the master account for identity federation or IAM user management 2. Login to that account through the AWS Management Console 3. Click `Services` 4. Click `IAM` 5. Click `Identity providers` 6. Verify the configuration Then..., determine all accounts that should not have local users present. For each account... 1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click `Services` 5. Click `IAM` 6. Click `Users` 7. Confirm that no IAM users representing individuals are present For multi-account AWS environments implementing AWS Organizations without an external identity provider... 1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click `Services` 5. Click `IAM` 6. Click `Users` 7. Confirm that no IAM users representing individuals are present",
"AdditionalInformation": "",
"References": ""
}
@@ -312,8 +314,8 @@
"Description": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established.",
"RationaleStatement": "When creating a new AWS account, a default super user is automatically created. This account is referred to as the 'root user' or 'root' account. It is recommended that the use of this account be limited and highly controlled. During events in which the 'root' password is no longer accessible or the MFA token associated with 'root' is lost/destroyed it is possible, through authentication using secret questions and associated answers, to recover 'root' user login access.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Login to the AWS Account as the 'root' user\n2. Click on the _<Root\\_Account\\_Name>_ from the top right of the console\n3. From the drop-down menu Click _My Account_\n4. Scroll down to the `Configure Security Questions` section\n5. Click on `Edit` \n6. Click on each `Question` \n - From the drop-down select an appropriate question\n - Click on the `Answer` section\n - Enter an appropriate answer \n - Follow process for all 3 questions\n7. Click `Update` when complete\n8. Save Questions and Answers and place in a secure physical location",
"AuditProcedure": "**From Console:**\n\n1. Login to the AWS account as the 'root' user\n2. On the top right you will see the _<Root\\_Account\\_Name>_\n3. Click on the _<Root\\_Account\\_Name>_\n4. From the drop-down menu Click `My Account` \n5. In the `Configure Security Challenge Questions` section on the `Personal Information` page, configure three security challenge questions.\n6. Click `Save questions` .",
"RemediationProcedure": "**From Console:** 1. Login to the AWS Account as the 'root' user 2. Click on the _<Root\\_Account\\_Name>_ from the top right of the console 3. From the drop-down menu Click _My Account_ 4. Scroll down to the `Configure Security Questions` section 5. Click on `Edit` 6. Click on each `Question` - From the drop-down select an appropriate question - Click on the `Answer` section - Enter an appropriate answer - Follow process for all 3 questions 7. Click `Update` when complete 8. Save Questions and Answers and place in a secure physical location",
"AuditProcedure": "**From Console:** 1. Login to the AWS account as the 'root' user 2. On the top right you will see the _<Root\\_Account\\_Name>_ 3. Click on the _<Root\\_Account\\_Name>_ 4. From the drop-down menu Click `My Account` 5. In the `Configure Security Challenge Questions` section on the `Personal Information` page, configure three security challenge questions. 6. Click `Save questions` .",
"AdditionalInformation": "",
"References": ""
}
@@ -333,8 +335,8 @@
"Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be removed.",
"RationaleStatement": "Removing access keys associated with the 'root' user account limits vectors by which the account can be compromised. Additionally, removing the 'root' access keys encourages the creation and use of role based accounts that are least privileged.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to delete or disable active 'root' user access keys\n\n**From Console:**\n\n1. Sign in to the AWS Management Console as 'root' and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n2. Click on _<Root\\_Account\\_Name>_ at the top right and select `My Security Credentials` from the drop down list\n3. On the pop out screen Click on `Continue to Security Credentials` \n4. Click on `Access Keys` _(Access Key ID and Secret Access Key)_\n5. Under the `Status` column if there are any Keys which are Active\n - Click on `Make Inactive` - (Temporarily disable Key - may be needed again)\n - Click `Delete` - (Deleted keys cannot be recovered)",
"AuditProcedure": "Perform the following to determine if the 'root' user account has access keys:\n\n**From Console:**\n\n1. Login to the AWS Management Console\n2. Click `Services` \n3. Click `IAM` \n4. Click on `Credential Report` \n5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file\n6. For the `<root_account>` user, ensure the `access_key_1_active` and `access_key_2_active` fields are set to `FALSE` .\n\n**From Command Line:**\n\nRun the following command:\n```\n aws iam get-account-summary | grep \"AccountAccessKeysPresent\" \n```\nIf no 'root' access keys exist the output will show \"AccountAccessKeysPresent\": 0,. \n\nIf the output shows a \"1\" than 'root' keys exist, refer to the remediation procedure below.",
"RemediationProcedure": "Perform the following to delete or disable active 'root' user access keys **From Console:** 1. Sign in to the AWS Management Console as 'root' and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. Click on _<Root\\_Account\\_Name>_ at the top right and select `My Security Credentials` from the drop down list 3. On the pop out screen Click on `Continue to Security Credentials` 4. Click on `Access Keys` _(Access Key ID and Secret Access Key)_ 5. Under the `Status` column if there are any Keys which are Active - Click on `Make Inactive` - (Temporarily disable Key - may be needed again) - Click `Delete` - (Deleted keys cannot be recovered)",
"AuditProcedure": "Perform the following to determine if the 'root' user account has access keys: **From Console:** 1. Login to the AWS Management Console 2. Click `Services` 3. Click `IAM` 4. Click on `Credential Report` 5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `<root_account>` user, ensure the `access_key_1_active` and `access_key_2_active` fields are set to `FALSE` . **From Command Line:** Run the following command: ``` aws iam get-account-summary | grep \"AccountAccessKeysPresent\" ``` If no 'root' access keys exist the output will show \"AccountAccessKeysPresent\": 0,. If the output shows a \"1\" than 'root' keys exist, refer to the remediation procedure below.",
"AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions is not enabled by default. However, on request to AWS support enables 'root' access only through access-keys (CLI, API methods) for us-gov cloud region.",
"References": "http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html:http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetAccountSummary.html:https://aws.amazon.com/blogs/security/an-easier-way-to-determine-the-presence-of-aws-account-access-keys/"
}
@@ -351,11 +353,11 @@
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device.\n\n**Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.",
"Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device. **Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.",
"RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that emits a time-sensitive key and have knowledge of a credential.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to establish MFA for the 'root' user account:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\n\n Note: to manage MFA devices for the 'root' AWS account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials.\n\n2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account.\n3. Choose `Activate MFA` \n4. In the wizard, choose `A virtual MFA` device and then choose `Next Step` .\n5. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes.\n6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see [Virtual MFA Applications](http://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications).) If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device).\n7. Determine whether the MFA app supports QR codes, and then do one of the following:\n\n - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.\n - In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application.\n\nWhen you are finished, the virtual MFA device starts generating one-time passwords.\n\nIn the Manage MFA Device wizard, in the Authentication Code 1 box, type the one-time password that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second one-time password into the Authentication Code 2 box. Choose Assign Virtual MFA.",
"AuditProcedure": "Perform the following to determine if the 'root' user account has MFA setup:\n\n**From Console:**\n\n1. Login to the AWS Management Console\n2. Click `Services` \n3. Click `IAM` \n4. Click on `Credential Report` \n5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file\n6. For the `<root_account>` user, ensure the `mfa_active` field is set to `TRUE` .\n\n**From Command Line:**\n\n1. Run the following command:\n```\n aws iam get-account-summary | grep \"AccountMFAEnabled\"\n```\n2. Ensure the AccountMFAEnabled property is set to 1",
"RemediationProcedure": "Perform the following to establish MFA for the 'root' user account: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the 'root' AWS account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials. 2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA` 4. In the wizard, choose `A virtual MFA` device and then choose `Next Step` . 5. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes. 6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see [Virtual MFA Applications](http://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications).) If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code. - In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application. When you are finished, the virtual MFA device starts generating one-time passwords. In the Manage MFA Device wizard, in the Authentication Code 1 box, type the one-time password that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second one-time password into the Authentication Code 2 box. Choose Assign Virtual MFA.",
"AuditProcedure": "Perform the following to determine if the 'root' user account has MFA setup: **From Console:** 1. Login to the AWS Management Console 2. Click `Services` 3. Click `IAM` 4. Click on `Credential Report` 5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `<root_account>` user, ensure the `mfa_active` field is set to `TRUE` . **From Command Line:** 1. Run the following command: ``` aws iam get-account-summary | grep \"AccountMFAEnabled\" ``` 2. Ensure the AccountMFAEnabled property is set to 1",
"AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions does not have console access. This recommendation is not applicable for us-gov cloud regions.",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html#enable-virt-mfa-for-root"
}
@@ -373,10 +375,10 @@
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the 'root' user account be protected with a hardware MFA.",
"RationaleStatement": "A hardware MFA has a smaller attack surface than a virtual MFA. For example, a hardware MFA does not suffer the attack surface introduced by the mobile smartphone on which a virtual MFA resides.\n\n**Note**: Using hardware MFA for many, many AWS accounts may create a logistical device management issue. If this is the case, consider implementing this Level 2 recommendation selectively to the highest security AWS accounts and the Level 1 recommendation applied to the remaining accounts.",
"RationaleStatement": "A hardware MFA has a smaller attack surface than a virtual MFA. For example, a hardware MFA does not suffer the attack surface introduced by the mobile smartphone on which a virtual MFA resides. **Note**: Using hardware MFA for many, many AWS accounts may create a logistical device management issue. If this is the case, consider implementing this Level 2 recommendation selectively to the highest security AWS accounts and the Level 1 recommendation applied to the remaining accounts.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to establish a hardware MFA for the 'root' user account:\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/).\nNote: to manage MFA devices for the AWS 'root' user account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials.\n2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account.\n3. Choose `Activate MFA` \n4. In the wizard, choose `A hardware MFA` device and then choose `Next Step` .\n5. In the `Serial Number` box, enter the serial number that is found on the back of the MFA device.\n6. In the `Authentication Code 1` box, enter the six-digit number displayed by the MFA device. You might need to press the button on the front of the device to display the number.\n7. Wait 30 seconds while the device refreshes the code, and then enter the next six-digit number into the `Authentication Code 2` box. You might need to press the button on the front of the device again to display the second number.\n8. Choose `Next Step` . The MFA device is now associated with the AWS account. The next time you use your AWS account credentials to sign in, you must type a code from the hardware MFA device.\n\nRemediation for this recommendation is not available through AWS CLI.",
"AuditProcedure": "Perform the following to determine if the 'root' user account has a hardware MFA setup:\n\n1. Run the following command to determine if the 'root' account has MFA setup:\n```\n aws iam get-account-summary | grep \"AccountMFAEnabled\"\n```\n\nThe `AccountMFAEnabled` property is set to `1` will ensure that the 'root' user account has MFA (Virtual or Hardware) Enabled.\nIf `AccountMFAEnabled` property is set to `0` the account is not compliant with this recommendation.\n\n2. If `AccountMFAEnabled` property is set to `1`, determine 'root' account has Hardware MFA enabled.\nRun the following command to list all virtual MFA devices:\n```\n aws iam list-virtual-mfa-devices \n```\nIf the output contains one MFA with the following Serial Number, it means the MFA is virtual, not hardware and the account is not compliant with this recommendation:\n\n `\"SerialNumber\": \"arn:aws:iam::_<aws_account_number>_:mfa/root-account-mfa-device\"`",
"RemediationProcedure": "Perform the following to establish a hardware MFA for the 'root' user account: 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the AWS 'root' user account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials. 2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA` 4. In the wizard, choose `A hardware MFA` device and then choose `Next Step` . 5. In the `Serial Number` box, enter the serial number that is found on the back of the MFA device. 6. In the `Authentication Code 1` box, enter the six-digit number displayed by the MFA device. You might need to press the button on the front of the device to display the number. 7. Wait 30 seconds while the device refreshes the code, and then enter the next six-digit number into the `Authentication Code 2` box. You might need to press the button on the front of the device again to display the second number. 8. Choose `Next Step` . The MFA device is now associated with the AWS account. The next time you use your AWS account credentials to sign in, you must type a code from the hardware MFA device. Remediation for this recommendation is not available through AWS CLI.",
"AuditProcedure": "Perform the following to determine if the 'root' user account has a hardware MFA setup: 1. Run the following command to determine if the 'root' account has MFA setup: ``` aws iam get-account-summary | grep \"AccountMFAEnabled\" ``` The `AccountMFAEnabled` property is set to `1` will ensure that the 'root' user account has MFA (Virtual or Hardware) Enabled. If `AccountMFAEnabled` property is set to `0` the account is not compliant with this recommendation. 2. If `AccountMFAEnabled` property is set to `1`, determine 'root' account has Hardware MFA enabled. Run the following command to list all virtual MFA devices: ``` aws iam list-virtual-mfa-devices ``` If the output contains one MFA with the following Serial Number, it means the MFA is virtual, not hardware and the account is not compliant with this recommendation: `\"SerialNumber\": \"arn:aws:iam::_<aws_account_number>_:mfa/root-account-mfa-device\"`",
"AdditionalInformation": "IAM User account 'root' for us-gov cloud regions does not have console access. This control is not applicable for us-gov cloud regions.",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html#enable-hw-mfa-for-root"
}
@@ -396,9 +398,9 @@
"Description": "With the creation of an AWS account, a 'root user' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.",
"RationaleStatement": "The 'root user' has unrestricted access to and control over all account resources. Use of it is inconsistent with the principles of least privilege and separation of duties, and can lead to unnecessary harm due to error or account compromise.",
"ImpactStatement": "",
"RemediationProcedure": "If you find that the 'root' user account is being used for daily activity to include administrative tasks that do not require the 'root' user:\n\n1. Change the 'root' user password.\n2. Deactivate or delete any access keys associate with the 'root' user.\n\n**Remember, anyone who has 'root' user credentials for your AWS account has unrestricted access to and control of all the resources in your account, including billing information.",
"AuditProcedure": "**From Console:**\n\n1. Login to the AWS Management Console at `https://console.aws.amazon.com/iam/`\n2. In the left pane, click `Credential Report`\n3. Click on `Download Report`\n4. Open of Save the file locally\n5. Locate the `<root account>` under the user column\n6. Review `password_last_used, access_key_1_last_used_date, access_key_2_last_used_date` to determine when the 'root user' was last used.\n\n**From Command Line:**\n\nRun the following CLI commands to provide a credential report for determining the last time the 'root user' was used:\n```\naws iam generate-credential-report\n```\n```\naws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,5,11,16 | grep -B1 '<root_account>'\n```\n\nReview `password_last_used`, `access_key_1_last_used_date`, `access_key_2_last_used_date` to determine when the _root user_ was last used.\n\n**Note:** There are a few conditions under which the use of the 'root' user account is required. Please see the reference links for all of the tasks that require use of the 'root' user.",
"AdditionalInformation": "The 'root' user for us-gov cloud regions is not enabled by default. However, on request to AWS support, they can enable the 'root' user and grant access only through access-keys (CLI, API methods) for us-gov cloud region. If the 'root' user for us-gov cloud regions is enabled, this recommendation is applicable.\n\nMonitoring usage of the 'root' user can be accomplished by implementing recommendation 3.3 Ensure a log metric filter and alarm exist for usage of the 'root' user.",
"RemediationProcedure": "If you find that the 'root' user account is being used for daily activity to include administrative tasks that do not require the 'root' user: 1. Change the 'root' user password. 2. Deactivate or delete any access keys associate with the 'root' user. **Remember, anyone who has 'root' user credentials for your AWS account has unrestricted access to and control of all the resources in your account, including billing information.",
"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console at `https://console.aws.amazon.com/iam/` 2. In the left pane, click `Credential Report` 3. Click on `Download Report` 4. Open of Save the file locally 5. Locate the `<root account>` under the user column 6. Review `password_last_used, access_key_1_last_used_date, access_key_2_last_used_date` to determine when the 'root user' was last used. **From Command Line:** Run the following CLI commands to provide a credential report for determining the last time the 'root user' was used: ``` aws iam generate-credential-report ``` ``` aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,5,11,16 | grep -B1 '<root_account>' ``` Review `password_last_used`, `access_key_1_last_used_date`, `access_key_2_last_used_date` to determine when the _root user_ was last used. **Note:** There are a few conditions under which the use of the 'root' user account is required. Please see the reference links for all of the tasks that require use of the 'root' user.",
"AdditionalInformation": "The 'root' user for us-gov cloud regions is not enabled by default. However, on request to AWS support, they can enable the 'root' user and grant access only through access-keys (CLI, API methods) for us-gov cloud region. If the 'root' user for us-gov cloud regions is enabled, this recommendation is applicable. Monitoring usage of the 'root' user can be accomplished by implementing recommendation 3.3 Ensure a log metric filter and alarm exist for usage of the 'root' user.",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html:https://docs.aws.amazon.com/general/latest/gr/aws_tasks-that-require-root.html"
}
]
@@ -417,8 +419,8 @@
"Description": "Password policies are, in part, used to enforce password complexity requirements. IAM password policies can be used to ensure password are at least a given length. It is recommended that the password policy require a minimum password length 14.",
"RationaleStatement": "Setting a password complexity policy increases account resiliency against brute force login attempts.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to set the password policy as prescribed:\n\n**From Console:**\n\n1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings)\n2. Go to IAM Service on the AWS Console\n3. Click on Account Settings on the Left Pane\n4. Set \"Minimum password length\" to `14` or greater.\n5. Click \"Apply password policy\"\n\n**From Command Line:**\n```\n aws iam update-account-password-policy --minimum-password-length 14\n```\nNote: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.",
"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:\n\n**From Console:**\n\n1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings)\n2. Go to IAM Service on the AWS Console\n3. Click on Account Settings on the Left Pane\n4. Ensure \"Minimum password length\" is set to 14 or greater.\n\n**From Command Line:**\n```\naws iam get-account-password-policy\n```\nEnsure the output of the above command includes \"MinimumPasswordLength\": 14 (or higher)",
"RemediationProcedure": "Perform the following to set the password policy as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Set \"Minimum password length\" to `14` or greater. 5. Click \"Apply password policy\" **From Command Line:** ``` aws iam update-account-password-policy --minimum-password-length 14 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.",
"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Minimum password length\" is set to 14 or greater. **From Command Line:** ``` aws iam get-account-password-policy ``` Ensure the output of the above command includes \"MinimumPasswordLength\": 14 (or higher)",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy"
}
@@ -438,8 +440,8 @@
"Description": "IAM password policies can prevent the reuse of a given password by the same user. It is recommended that the password policy prevent the reuse of passwords.",
"RationaleStatement": "Preventing password reuse increases account resiliency against brute force login attempts.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to set the password policy as prescribed:\n\n**From Console:**\n\n1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings)\n2. Go to IAM Service on the AWS Console\n3. Click on Account Settings on the Left Pane\n4. Check \"Prevent password reuse\"\n5. Set \"Number of passwords to remember\" is set to `24` \n\n**From Command Line:**\n```\n aws iam update-account-password-policy --password-reuse-prevention 24\n```\nNote: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.",
"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:\n\n**From Console:**\n\n1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings)\n2. Go to IAM Service on the AWS Console\n3. Click on Account Settings on the Left Pane\n4. Ensure \"Prevent password reuse\" is checked\n5. Ensure \"Number of passwords to remember\" is set to 24\n\n**From Command Line:**\n```\naws iam get-account-password-policy \n```\nEnsure the output of the above command includes \"PasswordReusePrevention\": 24",
"RemediationProcedure": "Perform the following to set the password policy as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Check \"Prevent password reuse\" 5. Set \"Number of passwords to remember\" is set to `24` **From Command Line:** ``` aws iam update-account-password-policy --password-reuse-prevention 24 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.",
"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed: **From Console:** 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Prevent password reuse\" is checked 5. Ensure \"Number of passwords to remember\" is set to 24 **From Command Line:** ``` aws iam get-account-password-policy ``` Ensure the output of the above command includes \"PasswordReusePrevention\": 24",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy"
}
@@ -459,8 +461,8 @@
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
"RationaleStatement": "Encrypting data at rest reduces the likelihood that it is unintentionally exposed and can nullify the impact of disclosure if the encryption remains unbroken.",
"ImpactStatement": "Amazon S3 buckets with default bucket encryption using SSE-KMS cannot be used as destination buckets for Amazon S3 server access logging. Only SSE-S3 default encryption is supported for server access log destination buckets.",
"RemediationProcedure": "**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Select a Bucket.\n3. Click on 'Properties'.\n4. Click edit on `Default Encryption`.\n5. Select either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`.\n6. Click `Save`\n7. Repeat for all the buckets in your AWS account lacking encryption.\n\n**From Command Line:**\n\nRun either \n```\naws s3api put-bucket-encryption --bucket <bucket name> --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"AES256\"}}]}'\n```\n or \n```\naws s3api put-bucket-encryption --bucket <bucket name> --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"aws:kms\",\"KMSMasterKeyID\": \"aws/s3\"}}]}'\n```\n\n**Note:** the KMSMasterKeyID can be set to the master key of your choosing; aws/s3 is an AWS preconfigured default.",
"AuditProcedure": "**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Select a Bucket.\n3. Click on 'Properties'.\n4. Verify that `Default Encryption` is enabled, and displays either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`.\n5. Repeat for all the buckets in your AWS account.\n\n**From Command Line:**\n\n1. Run command to list buckets\n```\naws s3 ls\n```\n2. For each bucket, run \n```\naws s3api get-bucket-encryption --bucket <bucket name>\n```\n3. Verify that either \n```\n\"SSEAlgorithm\": \"AES256\"\n```\n or \n```\n\"SSEAlgorithm\": \"aws:kms\"```\n is displayed.",
"RemediationProcedure": "**From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select a Bucket. 3. Click on 'Properties'. 4. Click edit on `Default Encryption`. 5. Select either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`. 6. Click `Save` 7. Repeat for all the buckets in your AWS account lacking encryption. **From Command Line:** Run either ``` aws s3api put-bucket-encryption --bucket <bucket name> --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"AES256\"}}]}' ``` or ``` aws s3api put-bucket-encryption --bucket <bucket name> --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"aws:kms\",\"KMSMasterKeyID\": \"aws/s3\"}}]}' ``` **Note:** the KMSMasterKeyID can be set to the master key of your choosing; aws/s3 is an AWS preconfigured default.",
"AuditProcedure": "**From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select a Bucket. 3. Click on 'Properties'. 4. Verify that `Default Encryption` is enabled, and displays either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`. 5. Repeat for all the buckets in your AWS account. **From Command Line:** 1. Run command to list buckets ``` aws s3 ls ``` 2. For each bucket, run ``` aws s3api get-bucket-encryption --bucket <bucket name> ``` 3. Verify that either ``` \"SSEAlgorithm\": \"AES256\" ``` or ``` \"SSEAlgorithm\": \"aws:kms\"``` is displayed.",
"AdditionalInformation": "S3 bucket encryption only applies to objects as they are placed in the bucket. Enabling S3 bucket encryption does **not** encrypt objects previously stored within the bucket.",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/default-bucket-encryption.html:https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-related-resources"
}
@@ -480,8 +482,8 @@
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
"RationaleStatement": "By default, Amazon S3 allows both HTTP and HTTPS requests. To achieve only allowing access to Amazon S3 objects through HTTPS you also have to explicitly deny access to HTTP requests. Bucket policies that allow HTTPS requests without explicitly denying HTTP requests will not comply with this recommendation.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/\n2. Select the Check box next to the Bucket.\n3. Click on 'Permissions'.\n4. Click 'Bucket Policy'\n5. Add this to the existing policy filling in the required information\n```\n{\n \"Sid\": <optional>\",\n \"Effect\": \"Deny\",\n \"Principal\": \"*\",\n \"Action\": \"s3:*\",\n \"Resource\": \"arn:aws:s3:::<bucket_name>/*\",\n \"Condition\": {\n \"Bool\": {\n \"aws:SecureTransport\": \"false\"\n }\n }\n }\n```\n6. Save\n7. Repeat for all the buckets in your AWS account that contain sensitive data.\n\n**From Console** \n\nusing AWS Policy Generator:\n\n1. Repeat steps 1-4 above.\n2. Click on `Policy Generator` at the bottom of the Bucket Policy Editor\n3. Select Policy Type\n`S3 Bucket Policy`\n4. Add Statements\n- `Effect` = Deny\n- `Principal` = *\n- `AWS Service` = Amazon S3\n- `Actions` = *\n- `Amazon Resource Name` = <ARN of the S3 Bucket>\n5. Generate Policy\n6. Copy the text and add it to the Bucket Policy.\n\n**From Command Line:**\n\n1. Export the bucket policy to a json file.\n```\naws s3api get-bucket-policy --bucket <bucket_name> --query Policy --output text > policy.json\n```\n\n2. Modify the policy.json file by adding in this statement:\n```\n{\n \"Sid\": <optional>\",\n \"Effect\": \"Deny\",\n \"Principal\": \"*\",\n \"Action\": \"s3:*\",\n \"Resource\": \"arn:aws:s3:::<bucket_name>/*\",\n \"Condition\": {\n \"Bool\": {\n \"aws:SecureTransport\": \"false\"\n }\n }\n }\n```\n3. Apply this modified policy back to the S3 bucket:\n```\naws s3api put-bucket-policy --bucket <bucket_name> --policy file://policy.json\n```",
"AuditProcedure": "To allow access to HTTPS you can use a condition that checks for the key `\"aws:SecureTransport: true\"`. This means that the request is sent through HTTPS but that HTTP can still be used. So to make sure you do not allow HTTP access confirm that there is a bucket policy that explicitly denies access for HTTP requests and that it contains the key \"aws:SecureTransport\": \"false\".\n\n**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/\n2. Select the Check box next to the Bucket.\n3. Click on 'Permissions', then Click on `Bucket Policy`.\n4. Ensure that a policy is listed that matches:\n```\n'{\n \"Sid\": <optional>,\n \"Effect\": \"Deny\",\n \"Principal\": \"*\",\n \"Action\": \"s3:*\",\n \"Resource\": \"arn:aws:s3:::<bucket_name>/*\",\n \"Condition\": {\n \"Bool\": {\n \"aws:SecureTransport\": \"false\"\n }'\n```\n`<optional>` and `<bucket_name>` will be specific to your account\n\n5. Repeat for all the buckets in your AWS account.\n\n**From Command Line:**\n\n1. List all of the S3 Buckets \n```\naws s3 ls\n```\n2. Using the list of buckets run this command on each of them:\n```\naws s3api get-bucket-policy --bucket <bucket_name> | grep aws:SecureTransport\n```\n3. Confirm that `aws:SecureTransport` is set to false `aws:SecureTransport:false`\n4. Confirm that the policy line has Effect set to Deny 'Effect:Deny'",
"RemediationProcedure": "**From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions'. 4. Click 'Bucket Policy' 5. Add this to the existing policy filling in the required information ``` { \"Sid\": <optional>\", \"Effect\": \"Deny\", \"Principal\": \"*\", \"Action\": \"s3:*\", \"Resource\": \"arn:aws:s3:::<bucket_name>/*\", \"Condition\": { \"Bool\": { \"aws:SecureTransport\": \"false\" } } } ``` 6. Save 7. Repeat for all the buckets in your AWS account that contain sensitive data. **From Console** using AWS Policy Generator: 1. Repeat steps 1-4 above. 2. Click on `Policy Generator` at the bottom of the Bucket Policy Editor 3. Select Policy Type `S3 Bucket Policy` 4. Add Statements - `Effect` = Deny - `Principal` = * - `AWS Service` = Amazon S3 - `Actions` = * - `Amazon Resource Name` = <ARN of the S3 Bucket> 5. Generate Policy 6. Copy the text and add it to the Bucket Policy. **From Command Line:** 1. Export the bucket policy to a json file. ``` aws s3api get-bucket-policy --bucket <bucket_name> --query Policy --output text > policy.json ``` 2. Modify the policy.json file by adding in this statement: ``` { \"Sid\": <optional>\", \"Effect\": \"Deny\", \"Principal\": \"*\", \"Action\": \"s3:*\", \"Resource\": \"arn:aws:s3:::<bucket_name>/*\", \"Condition\": { \"Bool\": { \"aws:SecureTransport\": \"false\" } } } ``` 3. Apply this modified policy back to the S3 bucket: ``` aws s3api put-bucket-policy --bucket <bucket_name> --policy file://policy.json ```",
"AuditProcedure": "To allow access to HTTPS you can use a condition that checks for the key `\"aws:SecureTransport: true\"`. This means that the request is sent through HTTPS but that HTTP can still be used. So to make sure you do not allow HTTP access confirm that there is a bucket policy that explicitly denies access for HTTP requests and that it contains the key \"aws:SecureTransport\": \"false\". **From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions', then Click on `Bucket Policy`. 4. Ensure that a policy is listed that matches: ``` '{ \"Sid\": <optional>, \"Effect\": \"Deny\", \"Principal\": \"*\", \"Action\": \"s3:*\", \"Resource\": \"arn:aws:s3:::<bucket_name>/*\", \"Condition\": { \"Bool\": { \"aws:SecureTransport\": \"false\" }' ``` `<optional>` and `<bucket_name>` will be specific to your account 5. Repeat for all the buckets in your AWS account. **From Command Line:** 1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Using the list of buckets run this command on each of them: ``` aws s3api get-bucket-policy --bucket <bucket_name> | grep aws:SecureTransport ``` 3. Confirm that `aws:SecureTransport` is set to false `aws:SecureTransport:false` 4. Confirm that the policy line has Effect set to Deny 'Effect:Deny'",
"AdditionalInformation": "",
"References": "https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-policy-for-config-rule/:https://aws.amazon.com/blogs/security/how-to-use-bucket-policies-and-apply-defense-in-depth-to-help-secure-your-amazon-s3-data/:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/get-bucket-policy.html"
}
@@ -501,8 +503,8 @@
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
"RationaleStatement": "Adding MFA delete to an S3 bucket, requires additional authentication when you change the version state of your bucket or you delete and object version adding another layer of security in the event your security credentials are compromised or unauthorized access is granted.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the steps below to enable MFA delete on an S3 bucket.\n\nNote:\n-You cannot enable MFA Delete using the AWS Management Console. You must use the AWS CLI or API.\n-You must use your 'root' account to enable MFA Delete on S3 buckets.\n\n**From Command line:**\n\n1. Run the s3api put-bucket-versioning command\n\n```\naws s3api put-bucket-versioning --profile my-root-profile --bucket Bucket_Name --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa “arn:aws:iam::aws_account_id:mfa/root-account-mfa-device passcode”\n```",
"AuditProcedure": "Perform the steps below to confirm MFA delete is configured on an S3 Bucket\n\n**From Console:**\n\n1. Login to the S3 console at `https://console.aws.amazon.com/s3/`\n\n2. Click the `Check` box next to the Bucket name you want to confirm\n\n3. In the window under `Properties`\n\n4. Confirm that Versioning is `Enabled`\n\n5. Confirm that MFA Delete is `Enabled`\n\n**From Command Line:**\n\n1. Run the `get-bucket-versioning`\n```\naws s3api get-bucket-versioning --bucket my-bucket\n```\n\nOutput example:\n```\n<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"> \n <Status>Enabled</Status>\n <MfaDelete>Enabled</MfaDelete> \n</VersioningConfiguration>\n```\n\nIf the Console or the CLI output does not show Versioning and MFA Delete `enabled` refer to the remediation below.",
"RemediationProcedure": "Perform the steps below to enable MFA delete on an S3 bucket. Note: -You cannot enable MFA Delete using the AWS Management Console. You must use the AWS CLI or API. -You must use your 'root' account to enable MFA Delete on S3 buckets. **From Command line:** 1. Run the s3api put-bucket-versioning command ``` aws s3api put-bucket-versioning --profile my-root-profile --bucket Bucket_Name --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa “arn:aws:iam::aws_account_id:mfa/root-account-mfa-device passcode” ```",
"AuditProcedure": "Perform the steps below to confirm MFA delete is configured on an S3 Bucket **From Console:** 1. Login to the S3 console at `https://console.aws.amazon.com/s3/` 2. Click the `Check` box next to the Bucket name you want to confirm 3. In the window under `Properties` 4. Confirm that Versioning is `Enabled` 5. Confirm that MFA Delete is `Enabled` **From Command Line:** 1. Run the `get-bucket-versioning` ``` aws s3api get-bucket-versioning --bucket my-bucket ``` Output example: ``` <VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"> <Status>Enabled</Status> <MfaDelete>Enabled</MfaDelete> </VersioningConfiguration> ``` If the Console or the CLI output does not show Versioning and MFA Delete `enabled` refer to the remediation below.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete:https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html:https://aws.amazon.com/blogs/security/securing-access-to-aws-using-mfa-part-3/:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_lost-or-broken.html"
}
@@ -520,10 +522,10 @@
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
"RationaleStatement": "Using a Cloud service or 3rd Party software to continuously monitor and automate the process of data discovery and classification for S3 buckets using machine learning and pattern matching is a strong defense in protecting that information.\n\nAmazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS.",
"RationaleStatement": "Using a Cloud service or 3rd Party software to continuously monitor and automate the process of data discovery and classification for S3 buckets using machine learning and pattern matching is a strong defense in protecting that information. Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS.",
"ImpactStatement": "There is a cost associated with using Amazon Macie. There is also typically a cost associated with 3rd Party tools that perform similar processes and protection.",
"RemediationProcedure": "Perform the steps below to enable and configure Amazon Macie\n\n**From Console:**\n\n1. Log on to the Macie console at `https://console.aws.amazon.com/macie/`\n\n2. Click `Get started`.\n\n3. Click `Enable Macie`.\n\nSetup a repository for sensitive data discovery results\n\n1. In the Left pane, under Settings, click `Discovery results`.\n\n2. Make sure `Create bucket` is selected.\n\n3. Create a bucket, enter a name for the bucket. The name must be unique across all S3 buckets. In addition, the name must start with a lowercase letter or a number.\n\n4. Click on `Advanced`.\n\n5. Block all public access, make sure `Yes` is selected.\n\n6. KMS encryption, specify the AWS KMS key that you want to use to encrypt the results. The key must be a symmetric, customer master key (CMK) that's in the same Region as the S3 bucket.\n\n7. Click on `Save`\n\nCreate a job to discover sensitive data\n\n1. In the left pane, click `S3 buckets`. Macie displays a list of all the S3 buckets for your account.\n\n2. Select the `check box` for each bucket that you want Macie to analyze as part of the job\n\n3. Click `Create job`.\n\n3. Click `Quick create`.\n\n4. For the Name and description step, enter a name and, optionally, a description of the job.\n\n5. Then click `Next`.\n\n6. For the Review and create step, click `Submit`.\n\nReview your findings\n\n1. In the left pane, click `Findings`.\n\n2. To view the details of a specific finding, choose any field other than the check box for the finding.\n\nIf you are using a 3rd Party tool to manage and protect your s3 data, follow the Vendor documentation for implementing and configuring that tool.",
"AuditProcedure": "Perform the following steps to determine if Macie is running:\n\n**From Console:**\n\n 1. Login to the Macie console at https://console.aws.amazon.com/macie/\n\n 2. In the left hand pane click on By job under findings.\n\n 3. Confirm that you have a Job setup for your S3 Buckets\n\nWhen you log into the Macie console if you aren't taken to the summary page and you don't have a job setup and running then refer to the remediation procedure below.\n\nIf you are using a 3rd Party tool to manage and protect your s3 data you meet this recommendation.",
"RemediationProcedure": "Perform the steps below to enable and configure Amazon Macie **From Console:** 1. Log on to the Macie console at `https://console.aws.amazon.com/macie/` 2. Click `Get started`. 3. Click `Enable Macie`. Setup a repository for sensitive data discovery results 1. In the Left pane, under Settings, click `Discovery results`. 2. Make sure `Create bucket` is selected. 3. Create a bucket, enter a name for the bucket. The name must be unique across all S3 buckets. In addition, the name must start with a lowercase letter or a number. 4. Click on `Advanced`. 5. Block all public access, make sure `Yes` is selected. 6. KMS encryption, specify the AWS KMS key that you want to use to encrypt the results. The key must be a symmetric, customer master key (CMK) that's in the same Region as the S3 bucket. 7. Click on `Save` Create a job to discover sensitive data 1. In the left pane, click `S3 buckets`. Macie displays a list of all the S3 buckets for your account. 2. Select the `check box` for each bucket that you want Macie to analyze as part of the job 3. Click `Create job`. 3. Click `Quick create`. 4. For the Name and description step, enter a name and, optionally, a description of the job. 5. Then click `Next`. 6. For the Review and create step, click `Submit`. Review your findings 1. In the left pane, click `Findings`. 2. To view the details of a specific finding, choose any field other than the check box for the finding. If you are using a 3rd Party tool to manage and protect your s3 data, follow the Vendor documentation for implementing and configuring that tool.",
"AuditProcedure": "Perform the following steps to determine if Macie is running: **From Console:** 1. Login to the Macie console at https://console.aws.amazon.com/macie/ 2. In the left hand pane click on By job under findings. 3. Confirm that you have a Job setup for your S3 Buckets When you log into the Macie console if you aren't taken to the summary page and you don't have a job setup and running then refer to the remediation procedure below. If you are using a 3rd Party tool to manage and protect your s3 data you meet this recommendation.",
"AdditionalInformation": "",
"References": "https://aws.amazon.com/macie/getting-started/:https://docs.aws.amazon.com/workspaces/latest/adminguide/data-protection.html:https://docs.aws.amazon.com/macie/latest/user/data-classification.html"
}
@@ -533,6 +535,7 @@
"Id": "2.1.5",
"Description": "Ensure that S3 Buckets are configured with 'Block public access (bucket settings)'",
"Checks": [
"s3_bucket_level_public_access_block",
"s3_account_level_public_access_blocks"
],
"Attributes": [
@@ -541,10 +544,10 @@
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
"RationaleStatement": "Amazon S3 `Block public access (bucket settings)` prevents the accidental or malicious public exposure of data contained within the respective bucket(s). \n\nAmazon S3 `Block public access (account settings)` prevents the accidental or malicious public exposure of data contained within all buckets of the respective AWS account.\n\nWhether blocking public access to all or some buckets is an organizational decision that should be based on data sensitivity, least privilege, and use case.",
"RationaleStatement": "Amazon S3 `Block public access (bucket settings)` prevents the accidental or malicious public exposure of data contained within the respective bucket(s). Amazon S3 `Block public access (account settings)` prevents the accidental or malicious public exposure of data contained within all buckets of the respective AWS account. Whether blocking public access to all or some buckets is an organizational decision that should be based on data sensitivity, least privilege, and use case.",
"ImpactStatement": "When you apply Block Public Access settings to an account, the settings apply to all AWS Regions globally. The settings might not take effect in all Regions immediately or simultaneously, but they eventually propagate to all Regions.",
"RemediationProcedure": "**If utilizing Block Public Access (bucket settings)**\n\n**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Select the Check box next to the Bucket.\n3. Click on 'Edit public access settings'.\n4. Click 'Block all public access'\n5. Repeat for all the buckets in your AWS account that contain sensitive data.\n\n**From Command Line:**\n\n1. List all of the S3 Buckets\n```\naws s3 ls\n```\n2. Set the Block Public Access to true on that bucket\n```\naws s3api put-public-access-block --bucket <name-of-bucket> --public-access-block-configuration \"BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true\"\n```\n\n**If utilizing Block Public Access (account settings)**\n\n**From Console:**\n\nIf the output reads `true` for the separate configuration settings then it is set on the account.\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Choose `Block Public Access (account settings)`\n3. Choose `Edit` to change the block public access settings for all the buckets in your AWS account\n4. Choose the settings you want to change, and then choose `Save`. For details about each setting, pause on the `i` icons.\n5. When you're asked for confirmation, enter `confirm`. Then Click `Confirm` to save your changes.\n\n**From Command Line:**\n\nTo set Block Public access settings for this account, run the following command:\n```\naws s3control put-public-access-block\n--public-access-block-configuration BlockPublicAcls=true, IgnorePublicAcls=true, BlockPublicPolicy=true, RestrictPublicBuckets=true\n--account-id <value>\n```",
"AuditProcedure": "**If utilizing Block Public Access (bucket settings)**\n\n**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Select the Check box next to the Bucket.\n3. Click on 'Edit public access settings'.\n4. Ensure that block public access settings are set appropriately for this bucket\n5. Repeat for all the buckets in your AWS account.\n\n**From Command Line:**\n\n1. List all of the S3 Buckets\n```\naws s3 ls\n```\n2. Find the public access setting on that bucket\n```\naws s3api get-public-access-block --bucket <name-of-the-bucket>\n```\nOutput if Block Public access is enabled:\n\n```\n{\n \"PublicAccessBlockConfiguration\": {\n \"BlockPublicAcls\": true,\n \"IgnorePublicAcls\": true,\n \"BlockPublicPolicy\": true,\n \"RestrictPublicBuckets\": true\n }\n}\n```\n\nIf the output reads `false` for the separate configuration settings then proceed to the remediation.\n\n**If utilizing Block Public Access (account settings)**\n\n**From Console:**\n\n1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ \n2. Choose `Block public access (account settings)`\n3. Ensure that block public access settings are set appropriately for your AWS account.\n\n**From Command Line:**\n\nTo check Public access settings for this account status, run the following command,\n`aws s3control get-public-access-block --account-id <ACCT_ID> --region <REGION_NAME>`\n\nOutput if Block Public access is enabled:\n\n```\n{\n \"PublicAccessBlockConfiguration\": {\n \"IgnorePublicAcls\": true, \n \"BlockPublicPolicy\": true, \n \"BlockPublicAcls\": true, \n \"RestrictPublicBuckets\": true\n }\n}\n```\n\nIf the output reads `false` for the separate configuration settings then proceed to the remediation.",
"RemediationProcedure": "**If utilizing Block Public Access (bucket settings)** **From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Click 'Block all public access' 5. Repeat for all the buckets in your AWS account that contain sensitive data. **From Command Line:** 1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Set the Block Public Access to true on that bucket ``` aws s3api put-public-access-block --bucket <name-of-bucket> --public-access-block-configuration \"BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true\" ``` **If utilizing Block Public Access (account settings)** **From Console:** If the output reads `true` for the separate configuration settings then it is set on the account. 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Choose `Block Public Access (account settings)` 3. Choose `Edit` to change the block public access settings for all the buckets in your AWS account 4. Choose the settings you want to change, and then choose `Save`. For details about each setting, pause on the `i` icons. 5. When you're asked for confirmation, enter `confirm`. Then Click `Confirm` to save your changes. **From Command Line:** To set Block Public access settings for this account, run the following command: ``` aws s3control put-public-access-block --public-access-block-configuration BlockPublicAcls=true, IgnorePublicAcls=true, BlockPublicPolicy=true, RestrictPublicBuckets=true --account-id <value> ```",
"AuditProcedure": "**If utilizing Block Public Access (bucket settings)** **From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Ensure that block public access settings are set appropriately for this bucket 5. Repeat for all the buckets in your AWS account. **From Command Line:** 1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Find the public access setting on that bucket ``` aws s3api get-public-access-block --bucket <name-of-the-bucket> ``` Output if Block Public access is enabled: ``` { \"PublicAccessBlockConfiguration\": { \"BlockPublicAcls\": true, \"IgnorePublicAcls\": true, \"BlockPublicPolicy\": true, \"RestrictPublicBuckets\": true } } ``` If the output reads `false` for the separate configuration settings then proceed to the remediation. **If utilizing Block Public Access (account settings)** **From Console:** 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Choose `Block public access (account settings)` 3. Ensure that block public access settings are set appropriately for your AWS account. **From Command Line:** To check Public access settings for this account status, run the following command, `aws s3control get-public-access-block --account-id <ACCT_ID> --region <REGION_NAME>` Output if Block Public access is enabled: ``` { \"PublicAccessBlockConfiguration\": { \"IgnorePublicAcls\": true, \"BlockPublicPolicy\": true, \"BlockPublicAcls\": true, \"RestrictPublicBuckets\": true } } ``` If the output reads `false` for the separate configuration settings then proceed to the remediation.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/block-public-access-account.html"
}
@@ -564,8 +567,8 @@
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
"RationaleStatement": "Encrypting data at rest reduces the likelihood that it is unintentionally exposed and can nullify the impact of disclosure if the encryption remains unbroken.",
"ImpactStatement": "Losing access or removing the KMS key in use by the EBS volumes will result in no longer being able to access the volumes.",
"RemediationProcedure": "**From Console:**\n\n1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/ \n2. Under `Account attributes`, click `EBS encryption`.\n3. Click `Manage`.\n4. Click the `Enable` checkbox.\n5. Click `Update EBS encryption`\n6. Repeat for every region requiring the change.\n\n**Note:** EBS volume encryption is configured per region.\n\n**From Command Line:**\n\n1. Run \n```\naws --region <region> ec2 enable-ebs-encryption-by-default\n```\n2. Verify that `\"EbsEncryptionByDefault\": true` is displayed.\n3. Repeat every region requiring the change.\n\n**Note:** EBS volume encryption is configured per region.",
"AuditProcedure": "**From Console:**\n\n1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/ \n2. Under `Account attributes`, click `EBS encryption`.\n3. Verify `Always encrypt new EBS volumes` displays `Enabled`.\n4. Review every region in-use.\n\n**Note:** EBS volume encryption is configured per region.\n\n**From Command Line:**\n\n1. Run \n```\naws --region <region> ec2 get-ebs-encryption-by-default\n```\n2. Verify that `\"EbsEncryptionByDefault\": true` is displayed.\n3. Review every region in-use.\n\n**Note:** EBS volume encryption is configured per region.",
"RemediationProcedure": "**From Console:** 1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/ 2. Under `Account attributes`, click `EBS encryption`. 3. Click `Manage`. 4. Click the `Enable` checkbox. 5. Click `Update EBS encryption` 6. Repeat for every region requiring the change. **Note:** EBS volume encryption is configured per region. **From Command Line:** 1. Run ``` aws --region <region> ec2 enable-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Repeat every region requiring the change. **Note:** EBS volume encryption is configured per region.",
"AuditProcedure": "**From Console:** 1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/ 2. Under `Account attributes`, click `EBS encryption`. 3. Verify `Always encrypt new EBS volumes` displays `Enabled`. 4. Review every region in-use. **Note:** EBS volume encryption is configured per region. **From Command Line:** 1. Run ``` aws --region <region> ec2 get-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Review every region in-use. **Note:** EBS volume encryption is configured per region.",
"AdditionalInformation": "Default EBS volume encryption only applies to newly created EBS volumes. Existing EBS volumes are **not** converted automatically.",
"References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html:https://aws.amazon.com/blogs/aws/new-opt-in-to-default-encryption-for-new-ebs-volumes/"
}
@@ -585,8 +588,8 @@
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
"RationaleStatement": "Databases are likely to hold sensitive and critical data, it is highly recommended to implement encryption in order to protect your data from unauthorized access or disclosure. With RDS encryption enabled, the data stored on the instance's underlying storage, the automated backups, read replicas, and snapshots, are all encrypted.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/.\n2. In the left navigation panel, click on `Databases`\n3. Select the Database instance that needs to be encrypted.\n4. Click on `Actions` button placed at the top right and select `Take Snapshot`.\n5. On the Take Snapshot page, enter a database name of which you want to take a snapshot in the `Snapshot Name` field and click on `Take Snapshot`.\n6. Select the newly created snapshot and click on the `Action` button placed at the top right and select `Copy snapshot` from the Action menu.\n7. On the Make Copy of DB Snapshot page, perform the following:\n\n- In the New DB Snapshot Identifier field, Enter a name for the `new snapshot`.\n- Check `Copy Tags`, New snapshot must have the same tags as the source snapshot.\n- Select `Yes` from the `Enable Encryption` dropdown list to enable encryption, You can choose to use the AWS default encryption key or custom key from Master Key dropdown list.\n\n8. Click `Copy Snapshot` to create an encrypted copy of the selected instance snapshot.\n9. Select the new Snapshot Encrypted Copy and click on the `Action` button placed at the top right and select `Restore Snapshot` button from the Action menu, This will restore the encrypted snapshot to a new database instance.\n10. On the Restore DB Instance page, enter a unique name for the new database instance in the DB Instance Identifier field.\n11. Review the instance configuration details and click `Restore DB Instance`.\n12. As the new instance provisioning process is completed can update application configuration to refer to the endpoint of the new Encrypted database instance Once the database endpoint is changed at the application level, can remove the unencrypted instance.\n\n**From Command Line:**\n\n1. Run `describe-db-instances` command to list all RDS database names available in the selected AWS region, The command output should return the database instance identifier.\n```\naws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier'\n```\n2. Run `create-db-snapshot` command to create a snapshot for the selected database instance, The command output will return the `new snapshot` with name DB Snapshot Name.\n```\naws rds create-db-snapshot --region <region-name> --db-snapshot-identifier <DB-Snapshot-Name> --db-instance-identifier <DB-Name>\n```\n3. Now run `list-aliases` command to list the KMS keys aliases available in a specified region, The command output should return each `key alias currently available`. For our RDS encryption activation process, locate the ID of the AWS default KMS key.\n```\naws kms list-aliases --region <region-name>\n```\n4. Run `copy-db-snapshot` command using the default KMS key ID for RDS instances returned earlier to create an encrypted copy of the database instance snapshot, The command output will return the `encrypted instance snapshot configuration`.\n```\naws rds copy-db-snapshot --region <region-name> --source-db-snapshot-identifier <DB-Snapshot-Name> --target-db-snapshot-identifier <DB-Snapshot-Name-Encrypted> --copy-tags --kms-key-id <KMS-ID-For-RDS>\n```\n5. Run `restore-db-instance-from-db-snapshot` command to restore the encrypted snapshot created at the previous step to a new database instance, If successful, the command output should return the new encrypted database instance configuration.\n```\naws rds restore-db-instance-from-db-snapshot --region <region-name> --db-instance-identifier <DB-Name-Encrypted> --db-snapshot-identifier <DB-Snapshot-Name-Encrypted>\n```\n6. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region, Output will return database instance identifier name Select encrypted database name that we just created DB-Name-Encrypted.\n```\naws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier'\n```\n7. Run again `describe-db-instances` command using the RDS instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True`.\n```\naws rds describe-db-instances --region <region-name> --db-instance-identifier <DB-Name-Encrypted> --query 'DBInstances[*].StorageEncrypted'\n```",
"AuditProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/\n2. In the navigation pane, under RDS dashboard, click `Databases`.\n3. Select the RDS Instance that you want to examine\n4. Click `Instance Name` to see details, then click on `Configuration` tab.\n5. Under Configuration Details section, In Storage pane search for the `Encryption Enabled` Status.\n6. If the current status is set to `Disabled`, Encryption is not enabled for the selected RDS Instance database instance.\n7. Repeat steps 3 to 7 to verify encryption status of other RDS Instance in same region.\n8. Change region from the top of the navigation bar and repeat audit for other regions.\n\n**From Command Line:**\n\n1. Run `describe-db-instances` command to list all RDS Instance database names, available in the selected AWS region, Output will return each Instance database identifier-name.\n ```\naws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier'\n```\n2. Run again `describe-db-instances` command using the RDS Instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True` Or `False`.\n```\naws rds describe-db-instances --region <region-name> --db-instance-identifier <DB-Name> --query 'DBInstances[*].StorageEncrypted'\n```\n3. If the StorageEncrypted parameter value is `False`, Encryption is not enabled for the selected RDS database instance.\n4. Repeat steps 1 to 3 for auditing each RDS Instance and change Region to verify for other regions",
"RemediationProcedure": "**From Console:** 1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases` 3. Select the Database instance that needs to be encrypted. 4. Click on `Actions` button placed at the top right and select `Take Snapshot`. 5. On the Take Snapshot page, enter a database name of which you want to take a snapshot in the `Snapshot Name` field and click on `Take Snapshot`. 6. Select the newly created snapshot and click on the `Action` button placed at the top right and select `Copy snapshot` from the Action menu. 7. On the Make Copy of DB Snapshot page, perform the following: - In the New DB Snapshot Identifier field, Enter a name for the `new snapshot`. - Check `Copy Tags`, New snapshot must have the same tags as the source snapshot. - Select `Yes` from the `Enable Encryption` dropdown list to enable encryption, You can choose to use the AWS default encryption key or custom key from Master Key dropdown list. 8. Click `Copy Snapshot` to create an encrypted copy of the selected instance snapshot. 9. Select the new Snapshot Encrypted Copy and click on the `Action` button placed at the top right and select `Restore Snapshot` button from the Action menu, This will restore the encrypted snapshot to a new database instance. 10. On the Restore DB Instance page, enter a unique name for the new database instance in the DB Instance Identifier field. 11. Review the instance configuration details and click `Restore DB Instance`. 12. As the new instance provisioning process is completed can update application configuration to refer to the endpoint of the new Encrypted database instance Once the database endpoint is changed at the application level, can remove the unencrypted instance. **From Command Line:** 1. Run `describe-db-instances` command to list all RDS database names available in the selected AWS region, The command output should return the database instance identifier. ``` aws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run `create-db-snapshot` command to create a snapshot for the selected database instance, The command output will return the `new snapshot` with name DB Snapshot Name. ``` aws rds create-db-snapshot --region <region-name> --db-snapshot-identifier <DB-Snapshot-Name> --db-instance-identifier <DB-Name> ``` 3. Now run `list-aliases` command to list the KMS keys aliases available in a specified region, The command output should return each `key alias currently available`. For our RDS encryption activation process, locate the ID of the AWS default KMS key. ``` aws kms list-aliases --region <region-name> ``` 4. Run `copy-db-snapshot` command using the default KMS key ID for RDS instances returned earlier to create an encrypted copy of the database instance snapshot, The command output will return the `encrypted instance snapshot configuration`. ``` aws rds copy-db-snapshot --region <region-name> --source-db-snapshot-identifier <DB-Snapshot-Name> --target-db-snapshot-identifier <DB-Snapshot-Name-Encrypted> --copy-tags --kms-key-id <KMS-ID-For-RDS> ``` 5. Run `restore-db-instance-from-db-snapshot` command to restore the encrypted snapshot created at the previous step to a new database instance, If successful, the command output should return the new encrypted database instance configuration. ``` aws rds restore-db-instance-from-db-snapshot --region <region-name> --db-instance-identifier <DB-Name-Encrypted> --db-snapshot-identifier <DB-Snapshot-Name-Encrypted> ``` 6. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region, Output will return database instance identifier name Select encrypted database name that we just created DB-Name-Encrypted. ``` aws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier' ``` 7. Run again `describe-db-instances` command using the RDS instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True`. ``` aws rds describe-db-instances --region <region-name> --db-instance-identifier <DB-Name-Encrypted> --query 'DBInstances[*].StorageEncrypted' ```",
"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/ 2. In the navigation pane, under RDS dashboard, click `Databases`. 3. Select the RDS Instance that you want to examine 4. Click `Instance Name` to see details, then click on `Configuration` tab. 5. Under Configuration Details section, In Storage pane search for the `Encryption Enabled` Status. 6. If the current status is set to `Disabled`, Encryption is not enabled for the selected RDS Instance database instance. 7. Repeat steps 3 to 7 to verify encryption status of other RDS Instance in same region. 8. Change region from the top of the navigation bar and repeat audit for other regions. **From Command Line:** 1. Run `describe-db-instances` command to list all RDS Instance database names, available in the selected AWS region, Output will return each Instance database identifier-name. ``` aws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run again `describe-db-instances` command using the RDS Instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True` Or `False`. ``` aws rds describe-db-instances --region <region-name> --db-instance-identifier <DB-Name> --query 'DBInstances[*].StorageEncrypted' ``` 3. If the StorageEncrypted parameter value is `False`, Encryption is not enabled for the selected RDS database instance. 4. Repeat steps 1 to 3 for auditing each RDS Instance and change Region to verify for other regions",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html:https://aws.amazon.com/blogs/database/selecting-the-right-encryption-options-for-amazon-rds-and-amazon-aurora-database-engines/#:~:text=With%20RDS%2Dencrypted%20resources%2C%20data,transparent%20to%20your%20database%20engine.:https://aws.amazon.com/rds/features/security/"
}
@@ -606,8 +609,8 @@
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
"RationaleStatement": "AWS RDS will occasionally deprecate minor engine versions and provide new ones for an upgrade. When the last version number within the release is replaced, the version changed is considered minor. With Auto Minor Version Upgrade feature enabled, the version upgrades will occur automatically during the specified maintenance window so your RDS instances can get the new features, bug fixes, and security patches for their database engines.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/.\n2. In the left navigation panel, click on `Databases`.\n3. Select the RDS instance that wants to update.\n4. Click on the `Modify` button placed on the top right side.\n5. On the `Modify DB Instance: <instance identifier>` page, In the `Maintenance` section, select `Auto minor version upgrade` click on the `Yes` radio button.\n6. At the bottom of the page click on `Continue`, check to Apply Immediately to apply the changes immediately, or select `Apply during the next scheduled maintenance window` to avoid any downtime.\n7. Review the changes and click on `Modify DB Instance`. The instance status should change from available to modifying and back to available. Once the feature is enabled, the `Auto Minor Version Upgrade` status should change to `Yes`.\n\n**From Command Line:**\n\n1. Run `describe-db-instances` command to list all RDS database instance names, available in the selected AWS region:\n```\naws rds describe-db-instances --region <regionName> --query 'DBInstances[*].DBInstanceIdentifier'\n```\n2. The command output should return each database instance identifier.\n3. Run the `modify-db-instance` command to modify the selected RDS instance configuration this command will apply the changes immediately, Remove `--apply-immediately` to apply changes during the next scheduled maintenance window and avoid any downtime:\n```\naws rds modify-db-instance --region <regionName> --db-instance-identifier <dbInstanceIdentifier> --auto-minor-version-upgrade --apply-immediately\n```\n4. The command output should reveal the new configuration metadata for the RDS instance and check `AutoMinorVersionUpgrade` parameter value.\n5. Run `describe-db-instances` command to check if the Auto Minor Version Upgrade feature has been successfully enable:\n```\naws rds describe-db-instances --region <regionName> --db-instance-identifier <dbInstanceIdentifier> --query 'DBInstances[*].AutoMinorVersionUpgrade'\n```\n6. The command output should return the feature current status set to `true`, the feature is `enabled` and the minor engine upgrades will be applied to the selected RDS instance.",
"AuditProcedure": "**From Console:**\n\n1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/.\n2. In the left navigation panel, click on `Databases`.\n3. Select the RDS instance that wants to examine.\n4. Click on the `Maintenance and backups` panel.\n5. Under the `Maintenance` section, search for the Auto Minor Version Upgrade status.\n- If the current status is set to `Disabled`, means the feature is not set and the minor engine upgrades released will not be applied to the selected RDS instance\n\n**From Command Line:**\n\n1. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region:\n```\naws rds describe-db-instances --region <regionName> --query 'DBInstances[*].DBInstanceIdentifier'\n```\n2. The command output should return each database instance identifier.\n3. Run again `describe-db-instances` command using the RDS instance identifier returned earlier to determine the Auto Minor Version Upgrade status for the selected instance:\n```\naws rds describe-db-instances --region <regionName> --db-instance-identifier <dbInstanceIdentifier> --query 'DBInstances[*].AutoMinorVersionUpgrade'\n```\n4. The command output should return the feature current status. If the current status is set to `true`, the feature is enabled and the minor engine upgrades will be applied to the selected RDS instance.",
"RemediationProcedure": "**From Console:** 1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases`. 3. Select the RDS instance that wants to update. 4. Click on the `Modify` button placed on the top right side. 5. On the `Modify DB Instance: <instance identifier>` page, In the `Maintenance` section, select `Auto minor version upgrade` click on the `Yes` radio button. 6. At the bottom of the page click on `Continue`, check to Apply Immediately to apply the changes immediately, or select `Apply during the next scheduled maintenance window` to avoid any downtime. 7. Review the changes and click on `Modify DB Instance`. The instance status should change from available to modifying and back to available. Once the feature is enabled, the `Auto Minor Version Upgrade` status should change to `Yes`. **From Command Line:** 1. Run `describe-db-instances` command to list all RDS database instance names, available in the selected AWS region: ``` aws rds describe-db-instances --region <regionName> --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance identifier. 3. Run the `modify-db-instance` command to modify the selected RDS instance configuration this command will apply the changes immediately, Remove `--apply-immediately` to apply changes during the next scheduled maintenance window and avoid any downtime: ``` aws rds modify-db-instance --region <regionName> --db-instance-identifier <dbInstanceIdentifier> --auto-minor-version-upgrade --apply-immediately ``` 4. The command output should reveal the new configuration metadata for the RDS instance and check `AutoMinorVersionUpgrade` parameter value. 5. Run `describe-db-instances` command to check if the Auto Minor Version Upgrade feature has been successfully enable: ``` aws rds describe-db-instances --region <regionName> --db-instance-identifier <dbInstanceIdentifier> --query 'DBInstances[*].AutoMinorVersionUpgrade' ``` 6. The command output should return the feature current status set to `true`, the feature is `enabled` and the minor engine upgrades will be applied to the selected RDS instance.",
"AuditProcedure": "**From Console:** 1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases`. 3. Select the RDS instance that wants to examine. 4. Click on the `Maintenance and backups` panel. 5. Under the `Maintenance` section, search for the Auto Minor Version Upgrade status. - If the current status is set to `Disabled`, means the feature is not set and the minor engine upgrades released will not be applied to the selected RDS instance **From Command Line:** 1. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region: ``` aws rds describe-db-instances --region <regionName> --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance identifier. 3. Run again `describe-db-instances` command using the RDS instance identifier returned earlier to determine the Auto Minor Version Upgrade status for the selected instance: ``` aws rds describe-db-instances --region <regionName> --db-instance-identifier <dbInstanceIdentifier> --query 'DBInstances[*].AutoMinorVersionUpgrade' ``` 4. The command output should return the feature current status. If the current status is set to `true`, the feature is enabled and the minor engine upgrades will be applied to the selected RDS instance.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_RDS_Managing.html:https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Upgrading.html:https://aws.amazon.com/rds/faqs/"
}
@@ -627,8 +630,8 @@
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.",
"RationaleStatement": "Ensure that no public-facing RDS database instances are provisioned in your AWS account and restrict unauthorized access in order to minimize security risks. When the RDS instance allows unrestricted access (0.0.0.0/0), everyone and everything on the Internet can establish a connection to your database and this can increase the opportunity for malicious activities such as brute force attacks, PostgreSQL injections, or DoS/DDoS attacks.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/.\n2. Under the navigation panel, On RDS Dashboard, click `Databases`.\n3. Select the RDS instance that you want to update.\n4. Click `Modify` from the dashboard top menu.\n5. On the Modify DB Instance panel, under the `Connectivity` section, click on `Additional connectivity configuration` and update the value for `Publicly Accessible` to Not publicly accessible to restrict public access. Follow the below steps to update subnet configurations:\n- Select the `Connectivity and security` tab, and click on the VPC attribute value inside the `Networking` section.\n- Select the `Details` tab from the VPC dashboard bottom panel and click on Route table configuration attribute value.\n- On the Route table details page, select the Routes tab from the dashboard bottom panel and click on `Edit routes`.\n- On the Edit routes page, update the Destination of Target which is set to `igw-xxxxx` and click on `Save` routes.\n6. On the Modify DB Instance panel Click on `Continue` and In the Scheduling of modifications section, perform one of the following actions based on your requirements:\n- Select Apply during the next scheduled maintenance window to apply the changes automatically during the next scheduled maintenance window.\n- Select Apply immediately to apply the changes right away. With this option, any pending modifications will be asynchronously applied as soon as possible, regardless of the maintenance window setting for this RDS database instance. Note that any changes available in the pending modifications queue are also applied. If any of the pending modifications require downtime, choosing this option can cause unexpected downtime for the application.\n7. Repeat steps 3 to 6 for each RDS instance available in the current region.\n8. Change the AWS region from the navigation bar to repeat the process for other regions.\n\n**From Command Line:**\n\n1. Run `describe-db-instances` command to list all RDS database names identifiers, available in the selected AWS region:\n```\naws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier'\n```\n2. The command output should return each database instance identifier.\n3. Run `modify-db-instance` command to modify the selected RDS instance configuration. Then use the following command to disable the `Publicly Accessible` flag for the selected RDS instances. This command use the apply-immediately flag. If you want `to avoid any downtime --no-apply-immediately flag can be used`:\n```\naws rds modify-db-instance --region <region-name> --db-instance-identifier <db-name> --no-publicly-accessible --apply-immediately\n```\n4. The command output should reveal the `PubliclyAccessible` configuration under pending values and should get applied at the specified time.\n5. Updating the Internet Gateway Destination via AWS CLI is not currently supported To update information about Internet Gateway use the AWS Console Procedure.\n6. Repeat steps 1 to 5 for each RDS instance provisioned in the current region.\n7. Change the AWS region by using the --region filter to repeat the process for other regions.",
"AuditProcedure": "**From Console:**\n\n1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/.\n2. Under the navigation panel, On RDS Dashboard, click `Databases`.\n3. Select the RDS instance that you want to examine.\n4. Click `Instance Name` from the dashboard, Under `Connectivity and Security.\n5. On the `Security`, check if the Publicly Accessible flag status is set to `Yes`, follow the below-mentioned steps to check database subnet access.\n- In the `networking` section, click the subnet link available under `Subnets`\n- The link will redirect you to the VPC Subnets page.\n- Select the subnet listed on the page and click the `Route Table` tab from the dashboard bottom panel. If the route table contains any entries with the destination `CIDR block set to 0.0.0.0/0` and with an `Internet Gateway` attached.\n- The selected RDS database instance was provisioned inside a public subnet, therefore is not running within a logically isolated environment and can be accessible from the Internet.\n6. Repeat steps no. 4 and 5 to determine the type (public or private) and subnet for other RDS database instances provisioned in the current region.\n8. Change the AWS region from the navigation bar and repeat the audit process for other regions.\n\n**From Command Line:**\n\n1. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region:\n```\naws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier'\n```\n2. The command output should return each database instance `identifier`.\n3. Run again `describe-db-instances` command using the `PubliclyAccessible` parameter as query filter to reveal the database instance Publicly Accessible flag status:\n```\naws rds describe-db-instances --region <region-name> --db-instance-identifier <db-instance-name> --query 'DBInstances[*].PubliclyAccessible'\n```\n4. Check for the Publicly Accessible parameter status, If the Publicly Accessible flag is set to `Yes`. Then selected RDS database instance is publicly accessible and insecure, follow the below-mentioned steps to check database subnet access\n5. Run again `describe-db-instances` command using the RDS database instance identifier that you want to check and appropriate filtering to describe the VPC subnet(s) associated with the selected instance:\n```\naws rds describe-db-instances --region <region-name> --db-instance-identifier <db-name> --query 'DBInstances[*].DBSubnetGroup.Subnets[]'\n```\n- The command output should list the subnets available in the selected database subnet group.\n6. Run `describe-route-tables` command using the ID of the subnet returned at the previous step to describe the routes of the VPC route table associated with the selected subnet:\n```\naws ec2 describe-route-tables --region <region-name> --filters \"Name=association.subnet-id,Values=<SubnetID>\" --query 'RouteTables[*].Routes[]'\n```\n- If the command returns the route table associated with database instance subnet ID. Check the `GatewayId` and `DestinationCidrBlock` attributes values returned in the output. If the route table contains any entries with the `GatewayId` value set to `igw-xxxxxxxx` and the `DestinationCidrBlock` value set to `0.0.0.0/0`, the selected RDS database instance was provisioned inside a public subnet.\n- Or\n- If the command returns empty results, the route table is implicitly associated with subnet, therefore the audit process continues with the next step\n7. Run again `describe-db-instances` command using the RDS database instance identifier that you want to check and appropriate filtering to describe the VPC ID associated with the selected instance:\n```\naws rds describe-db-instances --region <region-name> --db-instance-identifier <db-name> --query 'DBInstances[*].DBSubnetGroup.VpcId'\n```\n- The command output should show the VPC ID in the selected database subnet group\n8. Now run `describe-route-tables` command using the ID of the VPC returned at the previous step to describe the routes of the VPC main route table implicitly associated with the selected subnet:\n```\naws ec2 describe-route-tables --region <region-name> --filters \"Name=vpc-id,Values=<VPC-ID>\" \"Name=association.main,Values=true\" --query 'RouteTables[*].Routes[]'\n```\n- The command output returns the VPC main route table implicitly associated with database instance subnet ID. Check the `GatewayId` and `DestinationCidrBlock` attributes values returned in the output. If the route table contains any entries with the `GatewayId` value set to `igw-xxxxxxxx` and the `DestinationCidrBlock` value set to `0.0.0.0/0`, the selected RDS database instance was provisioned inside a public subnet, therefore is not running within a logically isolated environment and does not adhere to AWS security best practices.",
"RemediationProcedure": "**From Console:** 1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. Under the navigation panel, On RDS Dashboard, click `Databases`. 3. Select the RDS instance that you want to update. 4. Click `Modify` from the dashboard top menu. 5. On the Modify DB Instance panel, under the `Connectivity` section, click on `Additional connectivity configuration` and update the value for `Publicly Accessible` to Not publicly accessible to restrict public access. Follow the below steps to update subnet configurations: - Select the `Connectivity and security` tab, and click on the VPC attribute value inside the `Networking` section. - Select the `Details` tab from the VPC dashboard bottom panel and click on Route table configuration attribute value. - On the Route table details page, select the Routes tab from the dashboard bottom panel and click on `Edit routes`. - On the Edit routes page, update the Destination of Target which is set to `igw-xxxxx` and click on `Save` routes. 6. On the Modify DB Instance panel Click on `Continue` and In the Scheduling of modifications section, perform one of the following actions based on your requirements: - Select Apply during the next scheduled maintenance window to apply the changes automatically during the next scheduled maintenance window. - Select Apply immediately to apply the changes right away. With this option, any pending modifications will be asynchronously applied as soon as possible, regardless of the maintenance window setting for this RDS database instance. Note that any changes available in the pending modifications queue are also applied. If any of the pending modifications require downtime, choosing this option can cause unexpected downtime for the application. 7. Repeat steps 3 to 6 for each RDS instance available in the current region. 8. Change the AWS region from the navigation bar to repeat the process for other regions. **From Command Line:** 1. Run `describe-db-instances` command to list all RDS database names identifiers, available in the selected AWS region: ``` aws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance identifier. 3. Run `modify-db-instance` command to modify the selected RDS instance configuration. Then use the following command to disable the `Publicly Accessible` flag for the selected RDS instances. This command use the apply-immediately flag. If you want `to avoid any downtime --no-apply-immediately flag can be used`: ``` aws rds modify-db-instance --region <region-name> --db-instance-identifier <db-name> --no-publicly-accessible --apply-immediately ``` 4. The command output should reveal the `PubliclyAccessible` configuration under pending values and should get applied at the specified time. 5. Updating the Internet Gateway Destination via AWS CLI is not currently supported To update information about Internet Gateway use the AWS Console Procedure. 6. Repeat steps 1 to 5 for each RDS instance provisioned in the current region. 7. Change the AWS region by using the --region filter to repeat the process for other regions.",
"AuditProcedure": "**From Console:** 1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. Under the navigation panel, On RDS Dashboard, click `Databases`. 3. Select the RDS instance that you want to examine. 4. Click `Instance Name` from the dashboard, Under `Connectivity and Security. 5. On the `Security`, check if the Publicly Accessible flag status is set to `Yes`, follow the below-mentioned steps to check database subnet access. - In the `networking` section, click the subnet link available under `Subnets` - The link will redirect you to the VPC Subnets page. - Select the subnet listed on the page and click the `Route Table` tab from the dashboard bottom panel. If the route table contains any entries with the destination `CIDR block set to 0.0.0.0/0` and with an `Internet Gateway` attached. - The selected RDS database instance was provisioned inside a public subnet, therefore is not running within a logically isolated environment and can be accessible from the Internet. 6. Repeat steps no. 4 and 5 to determine the type (public or private) and subnet for other RDS database instances provisioned in the current region. 8. Change the AWS region from the navigation bar and repeat the audit process for other regions. **From Command Line:** 1. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region: ``` aws rds describe-db-instances --region <region-name> --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance `identifier`. 3. Run again `describe-db-instances` command using the `PubliclyAccessible` parameter as query filter to reveal the database instance Publicly Accessible flag status: ``` aws rds describe-db-instances --region <region-name> --db-instance-identifier <db-instance-name> --query 'DBInstances[*].PubliclyAccessible' ``` 4. Check for the Publicly Accessible parameter status, If the Publicly Accessible flag is set to `Yes`. Then selected RDS database instance is publicly accessible and insecure, follow the below-mentioned steps to check database subnet access 5. Run again `describe-db-instances` command using the RDS database instance identifier that you want to check and appropriate filtering to describe the VPC subnet(s) associated with the selected instance: ``` aws rds describe-db-instances --region <region-name> --db-instance-identifier <db-name> --query 'DBInstances[*].DBSubnetGroup.Subnets[]' ``` - The command output should list the subnets available in the selected database subnet group. 6. Run `describe-route-tables` command using the ID of the subnet returned at the previous step to describe the routes of the VPC route table associated with the selected subnet: ``` aws ec2 describe-route-tables --region <region-name> --filters \"Name=association.subnet-id,Values=<SubnetID>\" --query 'RouteTables[*].Routes[]' ``` - If the command returns the route table associated with database instance subnet ID. Check the `GatewayId` and `DestinationCidrBlock` attributes values returned in the output. If the route table contains any entries with the `GatewayId` value set to `igw-xxxxxxxx` and the `DestinationCidrBlock` value set to `0.0.0.0/0`, the selected RDS database instance was provisioned inside a public subnet. - Or - If the command returns empty results, the route table is implicitly associated with subnet, therefore the audit process continues with the next step 7. Run again `describe-db-instances` command using the RDS database instance identifier that you want to check and appropriate filtering to describe the VPC ID associated with the selected instance: ``` aws rds describe-db-instances --region <region-name> --db-instance-identifier <db-name> --query 'DBInstances[*].DBSubnetGroup.VpcId' ``` - The command output should show the VPC ID in the selected database subnet group 8. Now run `describe-route-tables` command using the ID of the VPC returned at the previous step to describe the routes of the VPC main route table implicitly associated with the selected subnet: ``` aws ec2 describe-route-tables --region <region-name> --filters \"Name=vpc-id,Values=<VPC-ID>\" \"Name=association.main,Values=true\" --query 'RouteTables[*].Routes[]' ``` - The command output returns the VPC main route table implicitly associated with database instance subnet ID. Check the `GatewayId` and `DestinationCidrBlock` attributes values returned in the output. If the route table contains any entries with the `GatewayId` value set to `igw-xxxxxxxx` and the `DestinationCidrBlock` value set to `0.0.0.0/0`, the selected RDS database instance was provisioned inside a public subnet, therefore is not running within a logically isolated environment and does not adhere to AWS security best practices.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.html:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html:https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html:https://aws.amazon.com/rds/faqs/"
}
@@ -648,8 +651,8 @@
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",
"RationaleStatement": "Data should be encrypted at rest to reduce the risk of a data breach via direct access to the storage device.",
"ImpactStatement": "",
"RemediationProcedure": "**It is important to note that EFS file system data at rest encryption must be turned on when creating the file system.**\n\nIf an EFS file system has been created without data at rest encryption enabled then you must create another EFS file system with the correct configuration and transfer the data.\n\n**Steps to create an EFS file system with data encrypted at rest:**\n\n**From Console:**\n1. Login to the AWS Management Console and Navigate to `Elastic File System (EFS)` dashboard.\n2. Select `File Systems` from the left navigation panel.\n3. Click `Create File System` button from the dashboard top menu to start the file system setup process.\n4. On the `Configure file system access` configuration page, perform the following actions.\n- Choose the right VPC from the VPC dropdown list.\n- Within Create mount targets section, select the checkboxes for all of the Availability Zones (AZs) within the selected VPC. These will be your mount targets.\n- Click `Next step` to continue.\n\n5. Perform the following on the `Configure optional settings` page.\n- Create `tags` to describe your new file system.\n- Choose `performance mode` based on your requirements.\n- Check `Enable encryption` checkbox and choose `aws/elasticfilesystem` from Select KMS master key dropdown list to enable encryption for the new file system using the default master key provided and managed by AWS KMS.\n- Click `Next step` to continue.\n\n6. Review the file system configuration details on the `review and create` page and then click `Create File System` to create your new AWS EFS file system.\n7. Copy the data from the old unencrypted EFS file system onto the newly create encrypted file system.\n8. Remove the unencrypted file system as soon as your data migration to the newly create encrypted file system is completed.\n9. Change the AWS region from the navigation bar and repeat the entire process for other aws regions.\n\n**From CLI:**\n1. Run describe-file-systems command to describe the configuration information available for the selected (unencrypted) file system (see Audit section to identify the right resource):\n```\naws efs describe-file-systems --region <region> --file-system-id <file-system-id from audit section step 2 output>\n```\n2. The command output should return the requested configuration information.\n3. To provision a new AWS EFS file system, you need to generate a universally unique identifier (UUID) in order to create the token required by the create-file-system command. To create the required token, you can use a randomly generated UUID from \"https://www.uuidgenerator.net\".\n4. Run create-file-system command using the unique token created at the previous step.\n```\naws efs create-file-system --region <region> --creation-token <Token (randomly generated UUID from step 3)> --performance-mode generalPurpose --encrypted\n```\n5. The command output should return the new file system configuration metadata.\n6. Run create-mount-target command using the newly created EFS file system ID returned at the previous step as identifier and the ID of the Availability Zone (AZ) that will represent the mount target:\n```\naws efs create-mount-target --region <region> --file-system-id <file-system-id> --subnet-id <subnet-id>\n```\n7. The command output should return the new mount target metadata.\n8. Now you can mount your file system from an EC2 instance.\n9. Copy the data from the old unencrypted EFS file system onto the newly create encrypted file system.\n10. Remove the unencrypted file system as soon as your data migration to the newly create encrypted file system is completed.\n```\naws efs delete-file-system --region <region> --file-system-id <unencrypted-file-system-id>\n```\n11. Change the AWS region by updating the --region and repeat the entire process for other aws regions.",
"AuditProcedure": "**From Console:**\n1. Login to the AWS Management Console and Navigate to `Elastic File System (EFS) dashboard.\n2. Select `File Systems` from the left navigation panel.\n3. Each item on the list has a visible Encrypted field that displays data at rest encryption status.\n4. Validate that this field reads `Encrypted` for all EFS file systems in all AWS regions.\n\n**From CLI:**\n1. Run describe-file-systems command using custom query filters to list the identifiers of all AWS EFS file systems currently available within the selected region:\n```\naws efs describe-file-systems --region <region> --output table --query 'FileSystems[*].FileSystemId'\n```\n2. The command output should return a table with the requested file system IDs.\n3. Run describe-file-systems command using the ID of the file system that you want to examine as identifier and the necessary query filters:\n```\naws efs describe-file-systems --region <region> --file-system-id <file-system-id from step 2 output> --query 'FileSystems[*].Encrypted'\n```\n4. The command output should return the file system encryption status true or false. If the returned value is `false`, the selected AWS EFS file system is not encrypted and if the returned value is `true`, the selected AWS EFS file system is encrypted.",
"RemediationProcedure": "**It is important to note that EFS file system data at rest encryption must be turned on when creating the file system.** If an EFS file system has been created without data at rest encryption enabled then you must create another EFS file system with the correct configuration and transfer the data. **Steps to create an EFS file system with data encrypted at rest:** **From Console:** 1. Login to the AWS Management Console and Navigate to `Elastic File System (EFS)` dashboard. 2. Select `File Systems` from the left navigation panel. 3. Click `Create File System` button from the dashboard top menu to start the file system setup process. 4. On the `Configure file system access` configuration page, perform the following actions. - Choose the right VPC from the VPC dropdown list. - Within Create mount targets section, select the checkboxes for all of the Availability Zones (AZs) within the selected VPC. These will be your mount targets. - Click `Next step` to continue. 5. Perform the following on the `Configure optional settings` page. - Create `tags` to describe your new file system. - Choose `performance mode` based on your requirements. - Check `Enable encryption` checkbox and choose `aws/elasticfilesystem` from Select KMS master key dropdown list to enable encryption for the new file system using the default master key provided and managed by AWS KMS. - Click `Next step` to continue. 6. Review the file system configuration details on the `review and create` page and then click `Create File System` to create your new AWS EFS file system. 7. Copy the data from the old unencrypted EFS file system onto the newly create encrypted file system. 8. Remove the unencrypted file system as soon as your data migration to the newly create encrypted file system is completed. 9. Change the AWS region from the navigation bar and repeat the entire process for other aws regions. **From CLI:** 1. Run describe-file-systems command to describe the configuration information available for the selected (unencrypted) file system (see Audit section to identify the right resource): ``` aws efs describe-file-systems --region <region> --file-system-id <file-system-id from audit section step 2 output> ``` 2. The command output should return the requested configuration information. 3. To provision a new AWS EFS file system, you need to generate a universally unique identifier (UUID) in order to create the token required by the create-file-system command. To create the required token, you can use a randomly generated UUID from \"https://www.uuidgenerator.net\". 4. Run create-file-system command using the unique token created at the previous step. ``` aws efs create-file-system --region <region> --creation-token <Token (randomly generated UUID from step 3)> --performance-mode generalPurpose --encrypted ``` 5. The command output should return the new file system configuration metadata. 6. Run create-mount-target command using the newly created EFS file system ID returned at the previous step as identifier and the ID of the Availability Zone (AZ) that will represent the mount target: ``` aws efs create-mount-target --region <region> --file-system-id <file-system-id> --subnet-id <subnet-id> ``` 7. The command output should return the new mount target metadata. 8. Now you can mount your file system from an EC2 instance. 9. Copy the data from the old unencrypted EFS file system onto the newly create encrypted file system. 10. Remove the unencrypted file system as soon as your data migration to the newly create encrypted file system is completed. ``` aws efs delete-file-system --region <region> --file-system-id <unencrypted-file-system-id> ``` 11. Change the AWS region by updating the --region and repeat the entire process for other aws regions.",
"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console and Navigate to `Elastic File System (EFS) dashboard. 2. Select `File Systems` from the left navigation panel. 3. Each item on the list has a visible Encrypted field that displays data at rest encryption status. 4. Validate that this field reads `Encrypted` for all EFS file systems in all AWS regions. **From CLI:** 1. Run describe-file-systems command using custom query filters to list the identifiers of all AWS EFS file systems currently available within the selected region: ``` aws efs describe-file-systems --region <region> --output table --query 'FileSystems[*].FileSystemId' ``` 2. The command output should return a table with the requested file system IDs. 3. Run describe-file-systems command using the ID of the file system that you want to examine as identifier and the necessary query filters: ``` aws efs describe-file-systems --region <region> --file-system-id <file-system-id from step 2 output> --query 'FileSystems[*].Encrypted' ``` 4. The command output should return the file system encryption status true or false. If the returned value is `false`, the selected AWS EFS file system is not encrypted and if the returned value is `true`, the selected AWS EFS file system is encrypted.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/efs/latest/ug/encryption-at-rest.html:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/efs/index.html#efs"
}
@@ -667,10 +670,10 @@
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail provides a history of AWS API calls for an account, including API calls made via the Management Console, SDKs, command line tools, and higher-level AWS services (such as CloudFormation).",
"RationaleStatement": "The AWS API call history produced by CloudTrail enables security analysis, resource change tracking, and compliance auditing. Additionally, \n\n- ensuring that a multi-regions trail exists will ensure that unexpected activity occurring in otherwise unused regions is detected\n\n- ensuring that a multi-regions trail exists will ensure that `Global Service Logging` is enabled for a trail by default to capture recording of events generated on \nAWS global services\n\n- for a multi-regions trail, ensuring that management events configured for all type of Read/Writes ensures recording of management operations that are performed on all resources in an AWS account",
"ImpactStatement": "S3 lifecycle features can be used to manage the accumulation and management of logs over time. See the following AWS resource for more information on these features:\n\n1. https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html",
"RemediationProcedure": "Perform the following to enable global (Multi-region) CloudTrail logging:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. Click on _Trails_ on the left navigation pane\n3. Click `Get Started Now` , if presented\n - Click `Add new trail` \n - Enter a trail name in the `Trail name` box\n - Set the `Apply trail to all regions` option to `Yes` \n - Specify an S3 bucket name in the `S3 bucket` box\n - Click `Create` \n4. If 1 or more trails already exist, select the target trail to enable for global logging\n5. Click the edit icon (pencil) next to `Apply trail to all regions` , Click `Yes` and Click `Save`.\n6. Click the edit icon (pencil) next to `Management Events` click `All` for setting `Read/Write Events` and Click `Save`.\n\n**From Command Line:**\n```\naws cloudtrail create-trail --name <trail_name> --bucket-name <s3_bucket_for_cloudtrail> --is-multi-region-trail \naws cloudtrail update-trail --name <trail_name> --is-multi-region-trail\n```\n\nNote: Creating CloudTrail via CLI without providing any overriding options configures `Management Events` to set `All` type of `Read/Writes` by default.",
"AuditProcedure": "Perform the following to determine if CloudTrail is enabled for all regions:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. Click on `Trails` on the left navigation pane\n - You will be presented with a list of trails across all regions\n3. Ensure at least one Trail has `All` specified in the `Region` column\n4. Click on a trail via the link in the _Name_ column\n5. Ensure `Logging` is set to `ON` \n6. Ensure `Apply trail to all regions` is set to `Yes`\n7. In section `Management Events` ensure `Read/Write Events` set to `ALL`\n\n**From Command Line:**\n```\n aws cloudtrail describe-trails\n```\nEnsure `IsMultiRegionTrail` is set to `true` \n```\naws cloudtrail get-trail-status --name <trailname shown in describe-trails>\n```\nEnsure `IsLogging` is set to `true`\n```\naws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>\n```\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`",
"RationaleStatement": "The AWS API call history produced by CloudTrail enables security analysis, resource change tracking, and compliance auditing. Additionally, - ensuring that a multi-regions trail exists will ensure that unexpected activity occurring in otherwise unused regions is detected - ensuring that a multi-regions trail exists will ensure that `Global Service Logging` is enabled for a trail by default to capture recording of events generated on AWS global services - for a multi-regions trail, ensuring that management events configured for all type of Read/Writes ensures recording of management operations that are performed on all resources in an AWS account",
"ImpactStatement": "S3 lifecycle features can be used to manage the accumulation and management of logs over time. See the following AWS resource for more information on these features: 1. https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html",
"RemediationProcedure": "Perform the following to enable global (Multi-region) CloudTrail logging: **From Console:** 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on _Trails_ on the left navigation pane 3. Click `Get Started Now` , if presented - Click `Add new trail` - Enter a trail name in the `Trail name` box - Set the `Apply trail to all regions` option to `Yes` - Specify an S3 bucket name in the `S3 bucket` box - Click `Create` 4. If 1 or more trails already exist, select the target trail to enable for global logging 5. Click the edit icon (pencil) next to `Apply trail to all regions` , Click `Yes` and Click `Save`. 6. Click the edit icon (pencil) next to `Management Events` click `All` for setting `Read/Write Events` and Click `Save`. **From Command Line:** ``` aws cloudtrail create-trail --name <trail_name> --bucket-name <s3_bucket_for_cloudtrail> --is-multi-region-trail aws cloudtrail update-trail --name <trail_name> --is-multi-region-trail ``` Note: Creating CloudTrail via CLI without providing any overriding options configures `Management Events` to set `All` type of `Read/Writes` by default.",
"AuditProcedure": "Perform the following to determine if CloudTrail is enabled for all regions: **From Console:** 1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane - You will be presented with a list of trails across all regions 3. Ensure at least one Trail has `All` specified in the `Region` column 4. Click on a trail via the link in the _Name_ column 5. Ensure `Logging` is set to `ON` 6. Ensure `Apply trail to all regions` is set to `Yes` 7. In section `Management Events` ensure `Read/Write Events` set to `ALL` **From Command Line:** ``` aws cloudtrail describe-trails ``` Ensure `IsMultiRegionTrail` is set to `true` ``` aws cloudtrail get-trail-status --name <trailname shown in describe-trails> ``` Ensure `IsLogging` is set to `true` ``` aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails> ``` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html?icmpid=docs_cloudtrail_console#logging-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-services.html#cloud-trail-supported-services-data-events"
}
@@ -690,8 +693,8 @@
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
"RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity within your S3 Buckets using Amazon CloudWatch Events.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/`\n2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine.\n3. Click `Properties` tab to see in detail bucket configuration.\n4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/`\n5. Once the Cloudtrail is selected, check the `Write` event checkbox, so that `object-level` logging for Write events is enabled.\n6. Repeat steps 2 to 5 to enable object-level logging of write events for other S3 buckets.\n\n**From Command Line:**\n\n1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier:\n```\naws cloudtrail put-event-selectors --region <region-name> --trail-name <trail-name> --event-selectors '[{ \"ReadWriteType\": \"WriteOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::<s3-bucket-name>/\"] }] }]'\n```\n2. The command output will be `object-level` event trail configuration.\n3. If you want to enable it for all buckets at once then change Values parameter to `[\"arn:aws:s3\"]` in command given above.\n4. Repeat step 1 for each s3 bucket to update `object-level` logging of write events.\n5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.",
"AuditProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and navigate to CloudTrail dashboard at `https://console.aws.amazon.com/cloudtrail/`\n2. In the left panel, click `Trails` and then click on the CloudTrail Name that you want to examine.\n3. Review `General details`\n4. Confirm that `Multi-region trail` is set to `Yes`\n5. Scroll down to `Data events`\n6. Confirm that it reads:\nData events: S3\nBucket Name: All current and future S3 buckets\nRead: Enabled\nWrite: Enabled\n7. Repeat steps 2 to 6 to verify that Multi-region trail and Data events logging of S3 buckets in CloudTrail.\nIf the CloudTrails do not have multi-region and data events configured for S3 refer to the remediation below.\n\n**From Command Line:**\n\n1. Run `list-trails` command to list the names of all Amazon CloudTrail trails currently available in all AWS regions:\n```\naws cloudtrail list-trails\n```\n2. The command output will be a list of all the trail names to include.\n\"TrailARN\": \"arn:aws:cloudtrail:<region>:<account#>:trail/<trailname>\",\n\"Name\": \"<trailname>\",\n\"HomeRegion\": \"<region>\"\n3. Next run 'get-trail- command to determine Multi-region.\n```\naws cloudtrail get-trail --name <trailname> --region <region_name>\n```\n4. The command output should include:\n\"IsMultiRegionTrail\": true,\n5. Next run `get-event-selectors` command using the `Name` of the trail and the `region` returned in step 2 to determine if Data events logging feature is enabled within the selected CloudTrail trail for all S3 buckets:\n```\naws cloudtrail get-event-selectors --region <HomeRegion> --trail-name <trailname> --query EventSelectors[*].DataResources[]\n```\n6. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector.\n\"Type\": \"AWS::S3::Object\",\n \"Values\": [\n \"arn:aws:s3\"\n7. If the `get-event-selectors` command returns an empty array '[]', the Data events are not included in the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded.\n8. Repeat steps 1 to 5 for auditing each CloudTrail to determine if Data events for S3 are covered.\nIf Multi-region is not set to true and the Data events does not show S3 defined as shown refer to the remediation procedure below.",
"RemediationProcedure": "**From Console:** 1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, check the `Write` event checkbox, so that `object-level` logging for Write events is enabled. 6. Repeat steps 2 to 5 to enable object-level logging of write events for other S3 buckets. **From Command Line:** 1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier: ``` aws cloudtrail put-event-selectors --region <region-name> --trail-name <trail-name> --event-selectors '[{ \"ReadWriteType\": \"WriteOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::<s3-bucket-name>/\"] }] }]' ``` 2. The command output will be `object-level` event trail configuration. 3. If you want to enable it for all buckets at once then change Values parameter to `[\"arn:aws:s3\"]` in command given above. 4. Repeat step 1 for each s3 bucket to update `object-level` logging of write events. 5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.",
"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console and navigate to CloudTrail dashboard at `https://console.aws.amazon.com/cloudtrail/` 2. In the left panel, click `Trails` and then click on the CloudTrail Name that you want to examine. 3. Review `General details` 4. Confirm that `Multi-region trail` is set to `Yes` 5. Scroll down to `Data events` 6. Confirm that it reads: Data events: S3 Bucket Name: All current and future S3 buckets Read: Enabled Write: Enabled 7. Repeat steps 2 to 6 to verify that Multi-region trail and Data events logging of S3 buckets in CloudTrail. If the CloudTrails do not have multi-region and data events configured for S3 refer to the remediation below. **From Command Line:** 1. Run `list-trails` command to list the names of all Amazon CloudTrail trails currently available in all AWS regions: ``` aws cloudtrail list-trails ``` 2. The command output will be a list of all the trail names to include. \"TrailARN\": \"arn:aws:cloudtrail:<region>:<account#>:trail/<trailname>\", \"Name\": \"<trailname>\", \"HomeRegion\": \"<region>\" 3. Next run 'get-trail- command to determine Multi-region. ``` aws cloudtrail get-trail --name <trailname> --region <region_name> ``` 4. The command output should include: \"IsMultiRegionTrail\": true, 5. Next run `get-event-selectors` command using the `Name` of the trail and the `region` returned in step 2 to determine if Data events logging feature is enabled within the selected CloudTrail trail for all S3 buckets: ``` aws cloudtrail get-event-selectors --region <HomeRegion> --trail-name <trailname> --query EventSelectors[*].DataResources[] ``` 6. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. \"Type\": \"AWS::S3::Object\", \"Values\": [ \"arn:aws:s3\" 7. If the `get-event-selectors` command returns an empty array '[]', the Data events are not included in the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. 8. Repeat steps 1 to 5 for auditing each CloudTrail to determine if Data events for S3 are covered. If Multi-region is not set to true and the Data events does not show S3 defined as shown refer to the remediation procedure below.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html"
}
@@ -711,8 +714,8 @@
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
"RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity using Amazon CloudWatch Events.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/`\n2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine.\n3. Click `Properties` tab to see in detail bucket configuration.\n4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/`\n5. Once the Cloudtrail is selected, check the Read event checkbox, so that `object-level` logging for `Read` events is enabled.\n6. Repeat steps 2 to 5 to enable `object-level` logging of read events for other S3 buckets.\n\n**From Command Line:**\n1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier:\n```\naws cloudtrail put-event-selectors --region <region-name> --trail-name <trail-name> --event-selectors '[{ \"ReadWriteType\": \"ReadOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::<s3-bucket-name>/\"] }] }]'\n```\n2. The command output will be `object-level` event trail configuration.\n3. If you want to enable it for all buckets at ones then change Values parameter to `[\"arn:aws:s3\"]` in command given above.\n4. Repeat step 1 for each s3 bucket to update `object-level` logging of read events.\n5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.",
"AuditProcedure": "**From Console:**\n\n1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/`\n2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine.\n3. Click `Properties` tab to see in detail bucket configuration.\n4. If the current status for `Object-level` logging is set to `Disabled`, then object-level logging of read events for the selected s3 bucket is not set.\n5. If the current status for `Object-level` logging is set to `Enabled`, but the Read event check-box is unchecked, then object-level logging of read events for the selected s3 bucket is not set.\n6. Repeat steps 2 to 5 to verify `object-level` logging for `read` events of your other S3 buckets.\n\n**From Command Line:**\n1. Run `describe-trails` command to list the names of all Amazon CloudTrail trails currently available in the selected AWS region:\n```\naws cloudtrail describe-trails --region <region-name> --output table --query trailList[*].Name\n```\n2. The command output will be table of the requested trail names.\n3. Run `get-event-selectors` command using the name of the trail returned at the previous step and custom query filters to determine if Data events logging feature is enabled within the selected CloudTrail trail configuration for s3 bucket resources:\n```\naws cloudtrail get-event-selectors --region <region-name> --trail-name <trail-name> --query EventSelectors[*].DataResources[]\n```\n4. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector.\n5. If the `get-event-selectors` command returns an empty array, the Data events are not included into the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded.\n6. Repeat steps 1 to 5 for auditing each s3 bucket to identify other trails that are missing the capability to log Data events.\n7. Change the AWS region by updating the `--region` command parameter and perform the audit process for other regions.",
"RemediationProcedure": "**From Console:** 1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, check the Read event checkbox, so that `object-level` logging for `Read` events is enabled. 6. Repeat steps 2 to 5 to enable `object-level` logging of read events for other S3 buckets. **From Command Line:** 1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier: ``` aws cloudtrail put-event-selectors --region <region-name> --trail-name <trail-name> --event-selectors '[{ \"ReadWriteType\": \"ReadOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::<s3-bucket-name>/\"] }] }]' ``` 2. The command output will be `object-level` event trail configuration. 3. If you want to enable it for all buckets at ones then change Values parameter to `[\"arn:aws:s3\"]` in command given above. 4. Repeat step 1 for each s3 bucket to update `object-level` logging of read events. 5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.",
"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. If the current status for `Object-level` logging is set to `Disabled`, then object-level logging of read events for the selected s3 bucket is not set. 5. If the current status for `Object-level` logging is set to `Enabled`, but the Read event check-box is unchecked, then object-level logging of read events for the selected s3 bucket is not set. 6. Repeat steps 2 to 5 to verify `object-level` logging for `read` events of your other S3 buckets. **From Command Line:** 1. Run `describe-trails` command to list the names of all Amazon CloudTrail trails currently available in the selected AWS region: ``` aws cloudtrail describe-trails --region <region-name> --output table --query trailList[*].Name ``` 2. The command output will be table of the requested trail names. 3. Run `get-event-selectors` command using the name of the trail returned at the previous step and custom query filters to determine if Data events logging feature is enabled within the selected CloudTrail trail configuration for s3 bucket resources: ``` aws cloudtrail get-event-selectors --region <region-name> --trail-name <trail-name> --query EventSelectors[*].DataResources[] ``` 4. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. 5. If the `get-event-selectors` command returns an empty array, the Data events are not included into the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. 6. Repeat steps 1 to 5 for auditing each s3 bucket to identify other trails that are missing the capability to log Data events. 7. Change the AWS region by updating the `--region` command parameter and perform the audit process for other regions.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html"
}
@@ -732,8 +735,8 @@
"Description": "CloudTrail log file validation creates a digitally signed digest file containing a hash of each log that CloudTrail writes to S3. These digest files can be used to determine whether a log file was changed, deleted, or unchanged after CloudTrail delivered the log. It is recommended that file validation be enabled on all CloudTrails.",
"RationaleStatement": "Enabling log file validation will provide additional integrity checking of CloudTrail logs.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to enable log file validation on a given trail:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. Click on `Trails` on the left navigation pane\n3. Click on target trail\n4. Within the `General details` section click `edit`\n5. Under the `Advanced settings` section\n6. Check the enable box under `Log file validation` \n7. Click `Save changes` \n\n**From Command Line:**\n```\naws cloudtrail update-trail --name <trail_name> --enable-log-file-validation\n```\nNote that periodic validation of logs using these digests can be performed by running the following command:\n```\naws cloudtrail validate-logs --trail-arn <trail_arn> --start-time <start_time> --end-time <end_time>\n```",
"AuditProcedure": "Perform the following on each trail to determine if log file validation is enabled:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. Click on `Trails` on the left navigation pane\n3. For Every Trail:\n- Click on a trail via the link in the _Name_ column\n- Under the `General details` section, ensure `Log file validation` is set to `Enabled` \n\n**From Command Line:**\n```\naws cloudtrail describe-trails\n```\nEnsure `LogFileValidationEnabled` is set to `true` for each trail",
"RemediationProcedure": "Perform the following to enable log file validation on a given trail: **From Console:** 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. Click on target trail 4. Within the `General details` section click `edit` 5. Under the `Advanced settings` section 6. Check the enable box under `Log file validation` 7. Click `Save changes` **From Command Line:** ``` aws cloudtrail update-trail --name <trail_name> --enable-log-file-validation ``` Note that periodic validation of logs using these digests can be performed by running the following command: ``` aws cloudtrail validate-logs --trail-arn <trail_arn> --start-time <start_time> --end-time <end_time> ```",
"AuditProcedure": "Perform the following on each trail to determine if log file validation is enabled: **From Console:** 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. For Every Trail: - Click on a trail via the link in the _Name_ column - Under the `General details` section, ensure `Log file validation` is set to `Enabled` **From Command Line:** ``` aws cloudtrail describe-trails ``` Ensure `LogFileValidationEnabled` is set to `true` for each trail",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-enabling.html"
}
@@ -753,8 +756,8 @@
"Description": "CloudTrail logs a record of every API call made in your AWS account. These logs file are stored in an S3 bucket. It is recommended that the bucket policy or access control list (ACL) applied to the S3 bucket that CloudTrail logs to prevent public access to the CloudTrail logs.",
"RationaleStatement": "Allowing public access to CloudTrail log content may aid an adversary in identifying weaknesses in the affected account's use or configuration.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to remove any public access that has been granted to the bucket via an ACL or S3 bucket policy:\n\n1. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home)\n2. Right-click on the bucket and click Properties\n3. In the `Properties` pane, click the `Permissions` tab.\n4. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted.\n5. Select the row that grants permission to `Everyone` or `Any Authenticated User` \n6. Uncheck all the permissions granted to `Everyone` or `Any Authenticated User` (click `x` to delete the row).\n7. Click `Save` to save the ACL.\n8. If the `Edit bucket policy` button is present, click it.\n9. Remove any `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}.",
"AuditProcedure": "Perform the following to determine if any public access is granted to an S3 bucket via an ACL or S3 bucket policy:\n\n**From Console:**\n\n1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home)\n2. In the `API activity history` pane on the left, click `Trails` \n3. In the `Trails` pane, note the bucket names in the `S3 bucket` column\n4. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home)\n5. For each bucket noted in step 3, right-click on the bucket and click `Properties` \n6. In the `Properties` pane, click the `Permissions` tab.\n7. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted.\n8. Ensure no rows exists that have the `Grantee` set to `Everyone` or the `Grantee` set to `Any Authenticated User.` \n9. If the `Edit bucket policy` button is present, click it to review the bucket policy.\n10. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}\n\n**From Command Line:**\n\n1. Get the name of the S3 bucket that CloudTrail is logging to:\n```\n aws cloudtrail describe-trails --query 'trailList[*].S3BucketName'\n```\n2. Ensure the `AllUsers` principal is not granted privileges to that `<bucket>` :\n```\n aws s3api get-bucket-acl --bucket <s3_bucket_for_cloudtrail> --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/AllUsers` ]'\n```\n3. Ensure the `AuthenticatedUsers` principal is not granted privileges to that `<bucket>`:\n```\n aws s3api get-bucket-acl --bucket <s3_bucket_for_cloudtrail> --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/Authenticated Users` ]'\n```\n4. Get the S3 Bucket Policy\n```\n aws s3api get-bucket-policy --bucket <s3_bucket_for_cloudtrail> \n```\n5. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}\n\n**Note:** Principal set to \"\\*\" or {\"AWS\" : \"\\*\"} allows anonymous access.",
"RemediationProcedure": "Perform the following to remove any public access that has been granted to the bucket via an ACL or S3 bucket policy: 1. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home) 2. Right-click on the bucket and click Properties 3. In the `Properties` pane, click the `Permissions` tab. 4. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. 5. Select the row that grants permission to `Everyone` or `Any Authenticated User` 6. Uncheck all the permissions granted to `Everyone` or `Any Authenticated User` (click `x` to delete the row). 7. Click `Save` to save the ACL. 8. If the `Edit bucket policy` button is present, click it. 9. Remove any `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}.",
"AuditProcedure": "Perform the following to determine if any public access is granted to an S3 bucket via an ACL or S3 bucket policy: **From Console:** 1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home) 2. In the `API activity history` pane on the left, click `Trails` 3. In the `Trails` pane, note the bucket names in the `S3 bucket` column 4. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home) 5. For each bucket noted in step 3, right-click on the bucket and click `Properties` 6. In the `Properties` pane, click the `Permissions` tab. 7. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. 8. Ensure no rows exists that have the `Grantee` set to `Everyone` or the `Grantee` set to `Any Authenticated User.` 9. If the `Edit bucket policy` button is present, click it to review the bucket policy. 10. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"} **From Command Line:** 1. Get the name of the S3 bucket that CloudTrail is logging to: ``` aws cloudtrail describe-trails --query 'trailList[*].S3BucketName' ``` 2. Ensure the `AllUsers` principal is not granted privileges to that `<bucket>` : ``` aws s3api get-bucket-acl --bucket <s3_bucket_for_cloudtrail> --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/AllUsers` ]' ``` 3. Ensure the `AuthenticatedUsers` principal is not granted privileges to that `<bucket>`: ``` aws s3api get-bucket-acl --bucket <s3_bucket_for_cloudtrail> --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/Authenticated Users` ]' ``` 4. Get the S3 Bucket Policy ``` aws s3api get-bucket-policy --bucket <s3_bucket_for_cloudtrail> ``` 5. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"} **Note:** Principal set to \"\\*\" or {\"AWS\" : \"\\*\"} allows anonymous access.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html"
}
@@ -771,11 +774,11 @@
"Section": "3. Logging",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "AWS CloudTrail is a web service that records AWS API calls made in a given AWS account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs within a specified S3 bucket for long term analysis, realtime analysis can be performed by configuring CloudTrail to send logs to CloudWatch Logs. For a trail that is enabled in all regions in an account, CloudTrail sends log files from all those regions to a CloudWatch Logs log group. It is recommended that CloudTrail logs be sent to CloudWatch Logs.\n\nNote: The intent of this recommendation is to ensure AWS account activity is being captured, monitored, and appropriately alarmed on. CloudWatch Logs is a native way to accomplish this using AWS services but does not preclude the use of an alternate solution.",
"Description": "AWS CloudTrail is a web service that records AWS API calls made in a given AWS account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs within a specified S3 bucket for long term analysis, realtime analysis can be performed by configuring CloudTrail to send logs to CloudWatch Logs. For a trail that is enabled in all regions in an account, CloudTrail sends log files from all those regions to a CloudWatch Logs log group. It is recommended that CloudTrail logs be sent to CloudWatch Logs. Note: The intent of this recommendation is to ensure AWS account activity is being captured, monitored, and appropriately alarmed on. CloudWatch Logs is a native way to accomplish this using AWS services but does not preclude the use of an alternate solution.",
"RationaleStatement": "Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user, API, resource, and IP address, and provides opportunity to establish alarms and notifications for anomalous or sensitivity account activity.",
"ImpactStatement": "Note: By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods:\n\n1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html",
"RemediationProcedure": "Perform the following to establish the prescribed state:\n\n**From Console:**\n\n1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/`\n2. Select the `Trail` the needs to be updated.\n3. Scroll down to `CloudWatch Logs`\n4. Click `Edit`\n5. Under `CloudWatch Logs` click the box `Enabled`\n6. Under `Log Group` pick new or select an existing log group\n7. Edit the `Log group name` to match the CloudTrail or pick the existing CloudWatch Group.\n8. Under `IAM Role` pick new or select an existing.\n9. Edit the `Role name` to match the CloudTrail or pick the existing IAM Role.\n10. Click `Save changes.\n\n**From Command Line:**\n```\naws cloudtrail update-trail --name <trail_name> --cloudwatch-logs-log-group-arn <cloudtrail_log_group_arn> --cloudwatch-logs-role-arn <cloudtrail_cloudwatchLogs_role_arn>\n```",
"AuditProcedure": "Perform the following to ensure CloudTrail is configured as prescribed:\n\n**From Console:**\n\n1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/`\n2. Under `Trails` , click on the CloudTrail you wish to evaluate\n3. Under the `CloudWatch Logs` section.\n4. Ensure a `CloudWatch Logs` log group is configured and listed.\n5. Under `General details` confirm `Last log file delivered` has a recent (~one day old) timestamp.\n\n**From Command Line:**\n\n1. Run the following command to get a listing of existing trails:\n```\n aws cloudtrail describe-trails\n```\n2. Ensure `CloudWatchLogsLogGroupArn` is not empty and note the value of the `Name` property.\n3. Using the noted value of the `Name` property, run the following command:\n```\n aws cloudtrail get-trail-status --name <trail_name>\n```\n4. Ensure the `LatestcloudwatchLogdDeliveryTime` property is set to a recent (~one day old) timestamp.\n\nIf the `CloudWatch Logs` log group is not setup and the delivery time is not recent refer to the remediation below.",
"ImpactStatement": "Note: By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods: 1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html",
"RemediationProcedure": "Perform the following to establish the prescribed state: **From Console:** 1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/` 2. Select the `Trail` the needs to be updated. 3. Scroll down to `CloudWatch Logs` 4. Click `Edit` 5. Under `CloudWatch Logs` click the box `Enabled` 6. Under `Log Group` pick new or select an existing log group 7. Edit the `Log group name` to match the CloudTrail or pick the existing CloudWatch Group. 8. Under `IAM Role` pick new or select an existing. 9. Edit the `Role name` to match the CloudTrail or pick the existing IAM Role. 10. Click `Save changes. **From Command Line:** ``` aws cloudtrail update-trail --name <trail_name> --cloudwatch-logs-log-group-arn <cloudtrail_log_group_arn> --cloudwatch-logs-role-arn <cloudtrail_cloudwatchLogs_role_arn> ```",
"AuditProcedure": "Perform the following to ensure CloudTrail is configured as prescribed: **From Console:** 1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/` 2. Under `Trails` , click on the CloudTrail you wish to evaluate 3. Under the `CloudWatch Logs` section. 4. Ensure a `CloudWatch Logs` log group is configured and listed. 5. Under `General details` confirm `Last log file delivered` has a recent (~one day old) timestamp. **From Command Line:** 1. Run the following command to get a listing of existing trails: ``` aws cloudtrail describe-trails ``` 2. Ensure `CloudWatchLogsLogGroupArn` is not empty and note the value of the `Name` property. 3. Using the noted value of the `Name` property, run the following command: ``` aws cloudtrail get-trail-status --name <trail_name> ``` 4. Ensure the `LatestcloudwatchLogdDeliveryTime` property is set to a recent (~one day old) timestamp. If the `CloudWatch Logs` log group is not setup and the delivery time is not recent refer to the remediation below.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-user-guide.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/how-cloudtrail-works.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-aws-service-specific-topics.html"
}
@@ -795,8 +798,8 @@
"Description": "AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items (AWS resources), any configuration changes between resources. It is recommended AWS Config be enabled in all regions.",
"RationaleStatement": "The AWS configuration item history captured by AWS Config enables security analysis, resource change tracking, and compliance auditing.",
"ImpactStatement": "It is recommended AWS Config be enabled in all regions.",
"RemediationProcedure": "To implement AWS Config configuration:\n\n**From Console:**\n\n1. Select the region you want to focus on in the top right of the console\n2. Click `Services` \n3. Click `Config` \n4. Define which resources you want to record in the selected region\n5. Choose to include global resources (IAM resources)\n6. Specify an S3 bucket in the same account or in another managed AWS account\n7. Create an SNS Topic from the same AWS account or another managed AWS account\n\n**From Command Line:**\n\n1. Ensure there is an appropriate S3 bucket, SNS topic, and IAM role per the [AWS Config Service prerequisites](http://docs.aws.amazon.com/config/latest/developerguide/gs-cli-prereq.html).\n2. Run this command to set up the configuration recorder\n```\naws configservice subscribe --s3-bucket my-config-bucket --sns-topic arn:aws:sns:us-east-1:012345678912:my-config-notice --iam-role arn:aws:iam::012345678912:role/myConfigRole\n```\n3. Run this command to start the configuration recorder:\n```\nstart-configuration-recorder --configuration-recorder-name <value>\n```",
"AuditProcedure": "Process to evaluate AWS Config configuration per region\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the AWS Config console at [https://console.aws.amazon.com/config/](https://console.aws.amazon.com/config/).\n2. On the top right of the console select target Region.\n3. If presented with Setup AWS Config - follow remediation procedure:\n4. On the Resource inventory page, Click on edit (the gear icon). The Set Up AWS Config page appears.\n5. Ensure 1 or both check-boxes under \"All Resources\" is checked.\n - Include global resources related to IAM resources - which needs to be enabled in 1 region only\n6. Ensure the correct S3 bucket has been defined.\n7. Ensure the correct SNS topic has been defined.\n8. Repeat steps 2 to 7 for each region.\n\n**From Command Line:**\n\n1. Run this command to show all AWS Config recorders and their properties:\n```\naws configservice describe-configuration-recorders\n```\n2. Evaluate the output to ensure that there's at least one recorder for which `recordingGroup` object includes `\"allSupported\": true` AND `\"includeGlobalResourceTypes\": true`\n\nNote: There is one more parameter \"ResourceTypes\" in recordingGroup object. We don't need to check the same as whenever we set \"allSupported\": true, AWS enforces resource types to be empty (\"ResourceTypes\":[])\n\nSample Output:\n\n```\n{\n \"ConfigurationRecorders\": [\n {\n \"recordingGroup\": {\n \"allSupported\": true,\n \"resourceTypes\": [],\n \"includeGlobalResourceTypes\": true\n },\n \"roleARN\": \"arn:aws:iam::<AWS_Account_ID>:role/service-role/<config-role-name>\",\n \"name\": \"default\"\n }\n ]\n}\n```\n\n3. Run this command to show the status for all AWS Config recorders:\n```\naws configservice describe-configuration-recorder-status\n```\n4. In the output, find recorders with `name` key matching the recorders that met criteria in step 2. Ensure that at least one of them includes `\"recording\": true` and `\"lastStatus\": \"SUCCESS\"`",
"RemediationProcedure": "To implement AWS Config configuration: **From Console:** 1. Select the region you want to focus on in the top right of the console 2. Click `Services` 3. Click `Config` 4. Define which resources you want to record in the selected region 5. Choose to include global resources (IAM resources) 6. Specify an S3 bucket in the same account or in another managed AWS account 7. Create an SNS Topic from the same AWS account or another managed AWS account **From Command Line:** 1. Ensure there is an appropriate S3 bucket, SNS topic, and IAM role per the [AWS Config Service prerequisites](http://docs.aws.amazon.com/config/latest/developerguide/gs-cli-prereq.html). 2. Run this command to set up the configuration recorder ``` aws configservice subscribe --s3-bucket my-config-bucket --sns-topic arn:aws:sns:us-east-1:012345678912:my-config-notice --iam-role arn:aws:iam::012345678912:role/myConfigRole ``` 3. Run this command to start the configuration recorder: ``` start-configuration-recorder --configuration-recorder-name <value> ```",
"AuditProcedure": "Process to evaluate AWS Config configuration per region **From Console:** 1. Sign in to the AWS Management Console and open the AWS Config console at [https://console.aws.amazon.com/config/](https://console.aws.amazon.com/config/). 2. On the top right of the console select target Region. 3. If presented with Setup AWS Config - follow remediation procedure: 4. On the Resource inventory page, Click on edit (the gear icon). The Set Up AWS Config page appears. 5. Ensure 1 or both check-boxes under \"All Resources\" is checked. - Include global resources related to IAM resources - which needs to be enabled in 1 region only 6. Ensure the correct S3 bucket has been defined. 7. Ensure the correct SNS topic has been defined. 8. Repeat steps 2 to 7 for each region. **From Command Line:** 1. Run this command to show all AWS Config recorders and their properties: ``` aws configservice describe-configuration-recorders ``` 2. Evaluate the output to ensure that there's at least one recorder for which `recordingGroup` object includes `\"allSupported\": true` AND `\"includeGlobalResourceTypes\": true` Note: There is one more parameter \"ResourceTypes\" in recordingGroup object. We don't need to check the same as whenever we set \"allSupported\": true, AWS enforces resource types to be empty (\"ResourceTypes\":[]) Sample Output: ``` { \"ConfigurationRecorders\": [ { \"recordingGroup\": { \"allSupported\": true, \"resourceTypes\": [], \"includeGlobalResourceTypes\": true }, \"roleARN\": \"arn:aws:iam::<AWS_Account_ID>:role/service-role/<config-role-name>\", \"name\": \"default\" } ] } ``` 3. Run this command to show the status for all AWS Config recorders: ``` aws configservice describe-configuration-recorder-status ``` 4. In the output, find recorders with `name` key matching the recorders that met criteria in step 2. Ensure that at least one of them includes `\"recording\": true` and `\"lastStatus\": \"SUCCESS\"`",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/cli/latest/reference/configservice/describe-configuration-recorder-status.html"
}
@@ -816,8 +819,8 @@
"Description": "S3 Bucket Access Logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that bucket access logging be enabled on the CloudTrail S3 bucket.",
"RationaleStatement": "By enabling S3 bucket logging on target S3 buckets, it is possible to capture all events which may affect objects within any target buckets. Configuring logs to be placed in a separate bucket allows access to log information which can be useful in security and incident response workflows.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to enable S3 bucket logging:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3).\n2. Under `All Buckets` click on the target S3 bucket\n3. Click on `Properties` in the top right of the console\n4. Under `Bucket:` <s3\\_bucket\\_for\\_cloudtrail> click on `Logging` \n5. Configure bucket logging\n - Click on the `Enabled` checkbox\n - Select Target Bucket from list\n - Enter a Target Prefix\n6. Click `Save`.\n\n**From Command Line:**\n\n1. Get the name of the S3 bucket that CloudTrail is logging to:\n```\naws cloudtrail describe-trails --region <region-name> --query trailList[*].S3BucketName\n```\n2. Copy and add target bucket name at `<Logging_BucketName>`, Prefix for logfile at `<LogFilePrefix>` and optionally add an email address in the following template and save it as `<FileName.Json>`:\n```\n{\n \"LoggingEnabled\": {\n \"TargetBucket\": \"<Logging_BucketName>\",\n \"TargetPrefix\": \"<LogFilePrefix>\",\n \"TargetGrants\": [\n {\n \"Grantee\": {\n \"Type\": \"AmazonCustomerByEmail\",\n \"EmailAddress\": \"<EmailID>\"\n },\n \"Permission\": \"FULL_CONTROL\"\n }\n ]\n } \n}\n```\n3. Run the `put-bucket-logging` command with bucket name and `<FileName.Json>` as input, for more information refer at [put-bucket-logging](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-logging.html):\n```\naws s3api put-bucket-logging --bucket <BucketName> --bucket-logging-status file://<FileName.Json>\n```",
"AuditProcedure": "Perform the following ensure the CloudTrail S3 bucket has access logging is enabled:\n\n**From Console:**\n\n1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home)\n2. In the API activity history pane on the left, click Trails\n3. In the Trails pane, note the bucket names in the S3 bucket column\n4. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3).\n5. Under `All Buckets` click on a target S3 bucket\n6. Click on `Properties` in the top right of the console\n7. Under `Bucket:` _ `<bucket_name>` _ click on `Logging` \n8. Ensure `Enabled` is checked.\n\n**From Command Line:**\n\n1. Get the name of the S3 bucket that CloudTrail is logging to:\n``` \naws cloudtrail describe-trails --query 'trailList[*].S3BucketName' \n```\n2. Ensure Bucket Logging is enabled:\n```\naws s3api get-bucket-logging --bucket <s3_bucket_for_cloudtrail>\n```\nEnsure command does not returns empty output.\n\nSample Output for a bucket with logging enabled:\n\n```\n{\n \"LoggingEnabled\": {\n \"TargetPrefix\": \"<Prefix_Test>\",\n \"TargetBucket\": \"<Bucket_name_for_Storing_Logs>\"\n }\n}\n```",
"RemediationProcedure": "Perform the following to enable S3 bucket logging: **From Console:** 1. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 2. Under `All Buckets` click on the target S3 bucket 3. Click on `Properties` in the top right of the console 4. Under `Bucket:` <s3\\_bucket\\_for\\_cloudtrail> click on `Logging` 5. Configure bucket logging - Click on the `Enabled` checkbox - Select Target Bucket from list - Enter a Target Prefix 6. Click `Save`. **From Command Line:** 1. Get the name of the S3 bucket that CloudTrail is logging to: ``` aws cloudtrail describe-trails --region <region-name> --query trailList[*].S3BucketName ``` 2. Copy and add target bucket name at `<Logging_BucketName>`, Prefix for logfile at `<LogFilePrefix>` and optionally add an email address in the following template and save it as `<FileName.Json>`: ``` { \"LoggingEnabled\": { \"TargetBucket\": \"<Logging_BucketName>\", \"TargetPrefix\": \"<LogFilePrefix>\", \"TargetGrants\": [ { \"Grantee\": { \"Type\": \"AmazonCustomerByEmail\", \"EmailAddress\": \"<EmailID>\" }, \"Permission\": \"FULL_CONTROL\" } ] } } ``` 3. Run the `put-bucket-logging` command with bucket name and `<FileName.Json>` as input, for more information refer at [put-bucket-logging](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-logging.html): ``` aws s3api put-bucket-logging --bucket <BucketName> --bucket-logging-status file://<FileName.Json> ```",
"AuditProcedure": "Perform the following ensure the CloudTrail S3 bucket has access logging is enabled: **From Console:** 1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home) 2. In the API activity history pane on the left, click Trails 3. In the Trails pane, note the bucket names in the S3 bucket column 4. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 5. Under `All Buckets` click on a target S3 bucket 6. Click on `Properties` in the top right of the console 7. Under `Bucket:` _ `<bucket_name>` _ click on `Logging` 8. Ensure `Enabled` is checked. **From Command Line:** 1. Get the name of the S3 bucket that CloudTrail is logging to: ``` aws cloudtrail describe-trails --query 'trailList[*].S3BucketName' ``` 2. Ensure Bucket Logging is enabled: ``` aws s3api get-bucket-logging --bucket <s3_bucket_for_cloudtrail> ``` Ensure command does not returns empty output. Sample Output for a bucket with logging enabled: ``` { \"LoggingEnabled\": { \"TargetPrefix\": \"<Prefix_Test>\", \"TargetBucket\": \"<Bucket_name_for_Storing_Logs>\" } } ```",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html"
}
@@ -837,9 +840,9 @@
"Description": "AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.",
"RationaleStatement": "Configuring CloudTrail to use SSE-KMS provides additional confidentiality controls on log data as a given user must have S3 read permission on the corresponding log bucket and must be granted decrypt permission by the CMK policy.",
"ImpactStatement": "Customer created keys incur an additional cost. See https://aws.amazon.com/kms/pricing/ for more information.",
"RemediationProcedure": "Perform the following to configure CloudTrail to use SSE-KMS:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. In the left navigation pane, choose `Trails` .\n3. Click on a Trail\n4. Under the `S3` section click on the edit button (pencil icon)\n5. Click `Advanced` \n6. Select an existing CMK from the `KMS key Id` drop-down menu\n - Note: Ensure the CMK is located in the same region as the S3 bucket\n - Note: You will need to apply a KMS Key policy on the selected CMK in order for CloudTrail as a service to encrypt and decrypt log files using the CMK provided. Steps are provided [here](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-kms-key-policy-for-cloudtrail.html) for editing the selected CMK Key policy\n7. Click `Save` \n8. You will see a notification message stating that you need to have decrypt permissions on the specified KMS key to decrypt log files.\n9. Click `Yes` \n\n**From Command Line:**\n```\naws cloudtrail update-trail --name <trail_name> --kms-id <cloudtrail_kms_key>\naws kms put-key-policy --key-id <cloudtrail_kms_key> --policy <cloudtrail_kms_key_policy>\n```",
"AuditProcedure": "Perform the following to determine if CloudTrail is configured to use SSE-KMS:\n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail)\n2. In the left navigation pane, choose `Trails` .\n3. Select a Trail\n4. Under the `S3` section, ensure `Encrypt log files` is set to `Yes` and a KMS key ID is specified in the `KSM Key Id` field.\n\n**From Command Line:**\n\n1. Run the following command:\n```\n aws cloudtrail describe-trails \n```\n2. For each trail listed, SSE-KMS is enabled if the trail has a `KmsKeyId` property defined.",
"AdditionalInformation": "3 statements which need to be added to the CMK policy:\n\n1\\. Enable Cloudtrail to describe CMK properties\n```\n<pre class=\"programlisting\" style=\"font-style: normal;\">{\n \"Sid\": \"Allow CloudTrail access\",\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"cloudtrail.amazonaws.com\"\n },\n \"Action\": \"kms:DescribeKey\",\n \"Resource\": \"*\"\n}\n```\n2\\. Granting encrypt permissions\n```\n<pre class=\"programlisting\" style=\"font-style: normal;\">{\n \"Sid\": \"Allow CloudTrail to encrypt logs\",\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"cloudtrail.amazonaws.com\"\n },\n \"Action\": \"kms:GenerateDataKey*\",\n \"Resource\": \"*\",\n \"Condition\": {\n \"StringLike\": {\n \"kms:EncryptionContext:aws:cloudtrail:arn\": [\n \"arn:aws:cloudtrail:*:aws-account-id:trail/*\"\n ]\n }\n }\n}\n```\n3\\. Granting decrypt permissions\n```\n<pre class=\"programlisting\" style=\"font-style: normal;\">{\n \"Sid\": \"Enable CloudTrail log decrypt permissions\",\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"AWS\": \"arn:aws:iam::aws-account-id:user/username\"\n },\n \"Action\": \"kms:Decrypt\",\n \"Resource\": \"*\",\n \"Condition\": {\n \"Null\": {\n \"kms:EncryptionContext:aws:cloudtrail:arn\": \"false\"\n }\n }\n}\n```",
"RemediationProcedure": "Perform the following to configure CloudTrail to use SSE-KMS: **From Console:** 1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Click on a Trail 4. Under the `S3` section click on the edit button (pencil icon) 5. Click `Advanced` 6. Select an existing CMK from the `KMS key Id` drop-down menu - Note: Ensure the CMK is located in the same region as the S3 bucket - Note: You will need to apply a KMS Key policy on the selected CMK in order for CloudTrail as a service to encrypt and decrypt log files using the CMK provided. Steps are provided [here](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-kms-key-policy-for-cloudtrail.html) for editing the selected CMK Key policy 7. Click `Save` 8. You will see a notification message stating that you need to have decrypt permissions on the specified KMS key to decrypt log files. 9. Click `Yes` **From Command Line:** ``` aws cloudtrail update-trail --name <trail_name> --kms-id <cloudtrail_kms_key> aws kms put-key-policy --key-id <cloudtrail_kms_key> --policy <cloudtrail_kms_key_policy> ```",
"AuditProcedure": "Perform the following to determine if CloudTrail is configured to use SSE-KMS: **From Console:** 1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Select a Trail 4. Under the `S3` section, ensure `Encrypt log files` is set to `Yes` and a KMS key ID is specified in the `KSM Key Id` field. **From Command Line:** 1. Run the following command: ``` aws cloudtrail describe-trails ``` 2. For each trail listed, SSE-KMS is enabled if the trail has a `KmsKeyId` property defined.",
"AdditionalInformation": "3 statements which need to be added to the CMK policy: 1\\. Enable Cloudtrail to describe CMK properties ``` <pre class=\"programlisting\" style=\"font-style: normal;\">{ \"Sid\": \"Allow CloudTrail access\", \"Effect\": \"Allow\", \"Principal\": { \"Service\": \"cloudtrail.amazonaws.com\" }, \"Action\": \"kms:DescribeKey\", \"Resource\": \"*\" } ``` 2\\. Granting encrypt permissions ``` <pre class=\"programlisting\" style=\"font-style: normal;\">{ \"Sid\": \"Allow CloudTrail to encrypt logs\", \"Effect\": \"Allow\", \"Principal\": { \"Service\": \"cloudtrail.amazonaws.com\" }, \"Action\": \"kms:GenerateDataKey*\", \"Resource\": \"*\", \"Condition\": { \"StringLike\": { \"kms:EncryptionContext:aws:cloudtrail:arn\": [ \"arn:aws:cloudtrail:*:aws-account-id:trail/*\" ] } } } ``` 3\\. Granting decrypt permissions ``` <pre class=\"programlisting\" style=\"font-style: normal;\">{ \"Sid\": \"Enable CloudTrail log decrypt permissions\", \"Effect\": \"Allow\", \"Principal\": { \"AWS\": \"arn:aws:iam::aws-account-id:user/username\" }, \"Action\": \"kms:Decrypt\", \"Resource\": \"*\", \"Condition\": { \"Null\": { \"kms:EncryptionContext:aws:cloudtrail:arn\": \"false\" } } } ```",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html:https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html"
}
]
@@ -856,10 +859,10 @@
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "AWS Key Management Service (KMS) allows customers to rotate the backing key which is key material stored within the KMS which is tied to the key ID of the Customer Created customer master key (CMK). It is the backing key that is used to perform cryptographic operations such as encryption and decryption. Automated key rotation currently retains all prior backing keys so that decryption of encrypted data can take place transparently. It is recommended that CMK key rotation be enabled for symmetric keys. Key rotation can not be enabled for any asymmetric CMK.",
"RationaleStatement": "Rotating encryption keys helps reduce the potential impact of a compromised key as data encrypted with a new key cannot be accessed with a previous key that may have been exposed.\nKeys should be rotated every year, or upon event that would result in the compromise of that key.",
"RationaleStatement": "Rotating encryption keys helps reduce the potential impact of a compromised key as data encrypted with a new key cannot be accessed with a previous key that may have been exposed. Keys should be rotated every year, or upon event that would result in the compromise of that key.",
"ImpactStatement": "Creation, management, and storage of CMKs may require additional time from and administrator.",
"RemediationProcedure": "**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam).\n2. In the left navigation pane, choose `Customer managed keys` .\n3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT`\n4. Underneath the \"General configuration\" panel open the tab \"Key rotation\"\n5. Check the \"Automatically rotate this KMS key every year.\" checkbox\n\n**From Command Line:**\n\n1. Run the following command to enable key rotation:\n```\n aws kms enable-key-rotation --key-id <kms_key_id>\n```",
"AuditProcedure": "**From Console:**\n\n1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam).\n2. In the left navigation pane, choose `Customer managed keys`\n3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT`\n4. Underneath the `General configuration` panel open the tab `Key rotation`\n5. Ensure that the checkbox `Automatically rotate this KMS key every year.` is activated\n6. Repeat steps 3 - 5 for all customer managed CMKs where \"Key spec = SYMMETRIC_DEFAULT\"\n\n**From Command Line:**\n\n1. Run the following command to get a list of all keys and their associated `KeyIds` \n```\n aws kms list-keys\n```\n2. For each key, note the KeyId and run the following command\n```\ndescribe-key --key-id <kms_key_id>\n```\n3. If the response contains \"KeySpec = SYMMETRIC_DEFAULT\" run the following command\n```\n aws kms get-key-rotation-status --key-id <kms_key_id>\n```\n4. Ensure `KeyRotationEnabled` is set to `true`\n5. Repeat steps 2 - 4 for all remaining CMKs",
"RemediationProcedure": "**From Console:** 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam). 2. In the left navigation pane, choose `Customer managed keys` . 3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT` 4. Underneath the \"General configuration\" panel open the tab \"Key rotation\" 5. Check the \"Automatically rotate this KMS key every year.\" checkbox **From Command Line:** 1. Run the following command to enable key rotation: ``` aws kms enable-key-rotation --key-id <kms_key_id> ```",
"AuditProcedure": "**From Console:** 1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam). 2. In the left navigation pane, choose `Customer managed keys` 3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT` 4. Underneath the `General configuration` panel open the tab `Key rotation` 5. Ensure that the checkbox `Automatically rotate this KMS key every year.` is activated 6. Repeat steps 3 - 5 for all customer managed CMKs where \"Key spec = SYMMETRIC_DEFAULT\" **From Command Line:** 1. Run the following command to get a list of all keys and their associated `KeyIds` ``` aws kms list-keys ``` 2. For each key, note the KeyId and run the following command ``` describe-key --key-id <kms_key_id> ``` 3. If the response contains \"KeySpec = SYMMETRIC_DEFAULT\" run the following command ``` aws kms get-key-rotation-status --key-id <kms_key_id> ``` 4. Ensure `KeyRotationEnabled` is set to `true` 5. Repeat steps 2 - 4 for all remaining CMKs",
"AdditionalInformation": "",
"References": "https://aws.amazon.com/kms/pricing/:https://csrc.nist.gov/publications/detail/sp/800-57-part-1/rev-5/final"
}
@@ -878,9 +881,9 @@
"AssessmentStatus": "Automated",
"Description": "VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. After you've created a flow log, you can view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow Logs be enabled for packet \"Rejects\" for VPCs.",
"RationaleStatement": "VPC Flow Logs provide visibility into network traffic that traverses the VPC and can be used to detect anomalous traffic or insight during security workflows.",
"ImpactStatement": "By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods:\n\n1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html",
"RemediationProcedure": "Perform the following to determine if VPC Flow logs is enabled:\n\n**From Console:**\n\n1. Sign into the management console\n2. Select `Services` then `VPC` \n3. In the left navigation pane, select `Your VPCs` \n4. Select a VPC\n5. In the right pane, select the `Flow Logs` tab.\n6. If no Flow Log exists, click `Create Flow Log` \n7. For Filter, select `Reject`\n8. Enter in a `Role` and `Destination Log Group` \n9. Click `Create Log Flow` \n10. Click on `CloudWatch Logs Group` \n\n**Note:** Setting the filter to \"Reject\" will dramatically reduce the logging data accumulation for this recommendation and provide sufficient information for the purposes of breach detection, research and remediation. However, during periods of least privilege security group engineering, setting this the filter to \"All\" can be very helpful in discovering existing traffic flows required for proper operation of an already running environment.\n\n**From Command Line:**\n\n1. Create a policy document and name it as `role_policy_document.json` and paste the following content:\n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"test\",\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"ec2.amazonaws.com\"\n },\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n}\n```\n2. Create another policy document and name it as `iam_policy.json` and paste the following content:\n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\":[\n \"logs:CreateLogGroup\",\n \"logs:CreateLogStream\",\n \"logs:DescribeLogGroups\",\n \"logs:DescribeLogStreams\",\n \"logs:PutLogEvents\",\n \"logs:GetLogEvents\",\n \"logs:FilterLogEvents\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n```\n3. Run the below command to create an IAM role:\n```\naws iam create-role --role-name <aws_support_iam_role> --assume-role-policy-document file://<file-path>role_policy_document.json \n```\n4. Run the below command to create an IAM policy:\n```\naws iam create-policy --policy-name <ami-policy-name> --policy-document file://<file-path>iam-policy.json\n```\n5. Run `attach-group-policy` command using the IAM policy ARN returned at the previous step to attach the policy to the IAM role (if the command succeeds, no output is returned):\n```\naws iam attach-group-policy --policy-arn arn:aws:iam::<aws-account-id>:policy/<iam-policy-name> --group-name <group-name>\n```\n6. Run `describe-vpcs` to get the VpcId available in the selected region:\n```\naws ec2 describe-vpcs --region <region>\n```\n7. The command output should return the VPC Id available in the selected region.\n8. Run `create-flow-logs` to create a flow log for the vpc:\n```\naws ec2 create-flow-logs --resource-type VPC --resource-ids <vpc-id> --traffic-type REJECT --log-group-name <log-group-name> --deliver-logs-permission-arn <iam-role-arn>\n```\n9. Repeat step 8 for other vpcs available in the selected region.\n10. Change the region by updating --region and repeat remediation procedure for other vpcs.",
"AuditProcedure": "Perform the following to determine if VPC Flow logs are enabled:\n\n**From Console:**\n\n1. Sign into the management console\n2. Select `Services` then `VPC` \n3. In the left navigation pane, select `Your VPCs` \n4. Select a VPC\n5. In the right pane, select the `Flow Logs` tab.\n6. Ensure a Log Flow exists that has `Active` in the `Status` column.\n\n**From Command Line:**\n\n1. Run `describe-vpcs` command (OSX/Linux/UNIX) to list the VPC networks available in the current AWS region:\n```\naws ec2 describe-vpcs --region <region> --query Vpcs[].VpcId\n```\n2. The command output returns the `VpcId` available in the selected region.\n3. Run `describe-flow-logs` command (OSX/Linux/UNIX) using the VPC ID to determine if the selected virtual network has the Flow Logs feature enabled:\n```\naws ec2 describe-flow-logs --filter \"Name=resource-id,Values=<vpc-id>\"\n```\n4. If there are no Flow Logs created for the selected VPC, the command output will return an `empty list []`.\n5. Repeat step 3 for other VPCs available in the same region.\n6. Change the region by updating `--region` and repeat steps 1 - 5 for all the VPCs.",
"ImpactStatement": "By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods: 1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html",
"RemediationProcedure": "Perform the following to determine if VPC Flow logs is enabled: **From Console:** 1. Sign into the management console 2. Select `Services` then `VPC` 3. In the left navigation pane, select `Your VPCs` 4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. If no Flow Log exists, click `Create Flow Log` 7. For Filter, select `Reject` 8. Enter in a `Role` and `Destination Log Group` 9. Click `Create Log Flow` 10. Click on `CloudWatch Logs Group` **Note:** Setting the filter to \"Reject\" will dramatically reduce the logging data accumulation for this recommendation and provide sufficient information for the purposes of breach detection, research and remediation. However, during periods of least privilege security group engineering, setting this the filter to \"All\" can be very helpful in discovering existing traffic flows required for proper operation of an already running environment. **From Command Line:** 1. Create a policy document and name it as `role_policy_document.json` and paste the following content: ``` { \"Version\": \"2012-10-17\", \"Statement\": [ { \"Sid\": \"test\", \"Effect\": \"Allow\", \"Principal\": { \"Service\": \"ec2.amazonaws.com\" }, \"Action\": \"sts:AssumeRole\" } ] } ``` 2. Create another policy document and name it as `iam_policy.json` and paste the following content: ``` { \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\":[ \"logs:CreateLogGroup\", \"logs:CreateLogStream\", \"logs:DescribeLogGroups\", \"logs:DescribeLogStreams\", \"logs:PutLogEvents\", \"logs:GetLogEvents\", \"logs:FilterLogEvents\" ], \"Resource\": \"*\" } ] } ``` 3. Run the below command to create an IAM role: ``` aws iam create-role --role-name <aws_support_iam_role> --assume-role-policy-document file://<file-path>role_policy_document.json ``` 4. Run the below command to create an IAM policy: ``` aws iam create-policy --policy-name <ami-policy-name> --policy-document file://<file-path>iam-policy.json ``` 5. Run `attach-group-policy` command using the IAM policy ARN returned at the previous step to attach the policy to the IAM role (if the command succeeds, no output is returned): ``` aws iam attach-group-policy --policy-arn arn:aws:iam::<aws-account-id>:policy/<iam-policy-name> --group-name <group-name> ``` 6. Run `describe-vpcs` to get the VpcId available in the selected region: ``` aws ec2 describe-vpcs --region <region> ``` 7. The command output should return the VPC Id available in the selected region. 8. Run `create-flow-logs` to create a flow log for the vpc: ``` aws ec2 create-flow-logs --resource-type VPC --resource-ids <vpc-id> --traffic-type REJECT --log-group-name <log-group-name> --deliver-logs-permission-arn <iam-role-arn> ``` 9. Repeat step 8 for other vpcs available in the selected region. 10. Change the region by updating --region and repeat remediation procedure for other vpcs.",
"AuditProcedure": "Perform the following to determine if VPC Flow logs are enabled: **From Console:** 1. Sign into the management console 2. Select `Services` then `VPC` 3. In the left navigation pane, select `Your VPCs` 4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. Ensure a Log Flow exists that has `Active` in the `Status` column. **From Command Line:** 1. Run `describe-vpcs` command (OSX/Linux/UNIX) to list the VPC networks available in the current AWS region: ``` aws ec2 describe-vpcs --region <region> --query Vpcs[].VpcId ``` 2. The command output returns the `VpcId` available in the selected region. 3. Run `describe-flow-logs` command (OSX/Linux/UNIX) using the VPC ID to determine if the selected virtual network has the Flow Logs feature enabled: ``` aws ec2 describe-flow-logs --filter \"Name=resource-id,Values=<vpc-id>\" ``` 4. If there are no Flow Logs created for the selected VPC, the command output will return an `empty list []`. 5. Repeat step 3 for other VPCs available in the same region. 6. Change the region by updating `--region` and repeat steps 1 - 5 for all the VPCs.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html"
}
@@ -899,10 +902,10 @@
"AssessmentStatus": "Automated",
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.",
"RationaleStatement": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
"ImpactStatement": "This alert may be triggered by normal read-only console activities that attempt to opportunistically gather optional information, but gracefully fail if they don't have permissions.\n\nIf an excessive number of alerts are being generated then an organization may wish to consider adding read access to the limited IAM user permissions simply to quiet the alerts.\n\nIn some cases doing this may allow the users to actually view some areas of the system - any additional access given should be reviewed for alignment with the original limited IAM user intent.",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for unauthorized API calls and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name \"cloudtrail_log_group_name\" --filter-name \"<unauthorized_api_calls_metric>\" --metric-transformations metricName=unauthorized_api_calls_metric,metricNamespace=CISBenchmark,metricValue=1 --filter-pattern \"{ ($.errorCode = \"*UnauthorizedOperation\") || ($.errorCode = \"AccessDenied*\") || ($.sourceIPAddress!=\"delivery.logs.amazonaws.com\") || ($.eventName!=\"HeadBucket\") }\"\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n**Note**: Capture the TopicArn displayed when creating the SNS Topic in Step 2.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn from step 2> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name \"unauthorized_api_calls_alarm\" --metric-name \"unauthorized_api_calls_metric\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with \"Name\":` note `<cloudtrail__name>`\n\n- From value associated with \"CloudWatchLogsLogGroupArn\" note <cloudtrail_log_group_name>\n\nExample: for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*, <cloudtrail_log_group_name> would be NewGroup\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <\"Name\" as shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>` that you captured in step 1:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{ ($.errorCode = *UnauthorizedOperation) || ($.errorCode = AccessDenied*) || ($.sourceIPAddress!=delivery.logs.amazonaws.com) || ($.eventName!=HeadBucket) }\",\n```\n\n4. Note the \"filterName\" `<unauthorized_api_calls_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<unauthorized_api_calls_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName == `unauthorized_api_calls_metric`]\"\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"ImpactStatement": "This alert may be triggered by normal read-only console activities that attempt to opportunistically gather optional information, but gracefully fail if they don't have permissions. If an excessive number of alerts are being generated then an organization may wish to consider adding read access to the limited IAM user permissions simply to quiet the alerts. In some cases doing this may allow the users to actually view some areas of the system - any additional access given should be reviewed for alignment with the original limited IAM user intent.",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for unauthorized API calls and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"cloudtrail_log_group_name\" --filter-name \"<unauthorized_api_calls_metric>\" --metric-transformations metricName=unauthorized_api_calls_metric,metricNamespace=CISBenchmark,metricValue=1 --filter-pattern \"{ ($.errorCode = \"*UnauthorizedOperation\") || ($.errorCode = \"AccessDenied*\") || ($.sourceIPAddress!=\"delivery.logs.amazonaws.com\") || ($.eventName!=\"HeadBucket\") }\" ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. **Note**: Capture the TopicArn displayed when creating the SNS Topic in Step 2. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn from step 2> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"unauthorized_api_calls_alarm\" --metric-name \"unauthorized_api_calls_metric\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with \"Name\":` note `<cloudtrail__name>` - From value associated with \"CloudWatchLogsLogGroupArn\" note <cloudtrail_log_group_name> Example: for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*, <cloudtrail_log_group_name> would be NewGroup - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <\"Name\" as shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>` that you captured in step 1: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.errorCode = *UnauthorizedOperation) || ($.errorCode = AccessDenied*) || ($.sourceIPAddress!=delivery.logs.amazonaws.com) || ($.eventName!=HeadBucket) }\", ``` 4. Note the \"filterName\" `<unauthorized_api_calls_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<unauthorized_api_calls_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName == `unauthorized_api_calls_metric`]\" ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://aws.amazon.com/sns/:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -921,9 +924,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Security Groups are a stateful packet filter that controls ingress and egress traffic within a VPC. It is recommended that a metric filter and alarm be established for detecting changes to Security Groups.",
"RationaleStatement": "Monitoring changes to security group will help ensure that resources and services are not unintentionally exposed.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for security groups changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name \"<cloudtrail_log_group_name>\" --filter-name \"<security_group_changes_metric>\" --metric-transformations metricName= \"<security_group_changes_metric>\" ,metricNamespace=\"CISBenchmark\",metricValue=1 --filter-pattern \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\"\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name \"<sns_topic_name>\"\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn \"<sns_topic_arn>\" --protocol <protocol_for_sns> --notification-endpoint \"<sns_subscription_endpoints>\"\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name \"<security_group_changes_alarm>\" --metric-name \"<security_group_changes_metric>\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions \"<sns_topic_arn>\"\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\"\n```\n4. Note the `<security_group_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<security_group_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName== '<security_group_changes_metric>']\"\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for security groups changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"<cloudtrail_log_group_name>\" --filter-name \"<security_group_changes_metric>\" --metric-transformations metricName= \"<security_group_changes_metric>\" ,metricNamespace=\"CISBenchmark\",metricValue=1 --filter-pattern \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name \"<sns_topic_name>\" ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn \"<sns_topic_arn>\" --protocol <protocol_for_sns> --notification-endpoint \"<sns_subscription_endpoints>\" ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"<security_group_changes_alarm>\" --metric-name \"<security_group_changes_metric>\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions \"<sns_topic_arn>\" ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ``` 4. Note the `<security_group_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<security_group_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName== '<security_group_changes_metric>']\" ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -942,9 +945,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets within a VPC. It is recommended that a metric filter and alarm be established for changes made to NACLs.",
"RationaleStatement": "Monitoring changes to NACLs will help ensure that AWS resources and services are not unintentionally exposed.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for NACL changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<nacl_changes_metric>` --metric-transformations metricName= `<nacl_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<nacl_changes_alarm>` --metric-name `<nacl_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }\"\n```\n4. Note the `<nacl_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<nacl_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<nacl_changes_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for NACL changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<nacl_changes_metric>` --metric-transformations metricName= `<nacl_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<nacl_changes_alarm>` --metric-name `<nacl_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }\" ``` 4. Note the `<nacl_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<nacl_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<nacl_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -963,9 +966,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Network gateways are required to send/receive traffic to a destination outside of a VPC. It is recommended that a metric filter and alarm be established for changes to network gateways.",
"RationaleStatement": "Monitoring changes to network gateways will help ensure that all ingress/egress traffic traverses the VPC border via a controlled path.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for network gateways changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<network_gw_changes_metric>` --metric-transformations metricName= `<network_gw_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<network_gw_changes_alarm>` --metric-name `<network_gw_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }\"\n```\n4. Note the `<network_gw_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<network_gw_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<network_gw_changes_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for network gateways changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<network_gw_changes_metric>` --metric-transformations metricName= `<network_gw_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<network_gw_changes_alarm>` --metric-name `<network_gw_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }\" ``` 4. Note the `<network_gw_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<network_gw_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<network_gw_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -984,9 +987,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Routing tables are used to route network traffic between subnets and to network gateways. It is recommended that a metric filter and alarm be established for changes to route tables.",
"RationaleStatement": "Monitoring changes to route tables will help ensure that all VPC traffic flows through an expected path.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for route table changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<route_table_changes_metric>` --metric-transformations metricName= `<route_table_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<route_table_changes_alarm>` --metric-name `<route_table_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }\"\n```\n\n4. Note the `<route_table_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<route_table_changes_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<route_table_changes_metric>`]'\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for route table changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<route_table_changes_metric>` --metric-transformations metricName= `<route_table_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<route_table_changes_alarm>` --metric-name `<route_table_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }\" ``` 4. Note the `<route_table_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<route_table_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<route_table_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1005,9 +1008,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is possible to have more than 1 VPC within an account, in addition it is also possible to create a peer connection between 2 VPCs enabling network traffic to route between VPCs. It is recommended that a metric filter and alarm be established for changes made to VPCs.",
"RationaleStatement": "Monitoring changes to VPC will help ensure VPC traffic flow is not getting impacted.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for VPC changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<vpc_changes_metric>` --metric-transformations metricName= `<vpc_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<vpc_changes_alarm>` --metric-name `<vpc_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }\"\n```\n\n4. Note the `<vpc_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<vpc_changes_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<vpc_changes_metric>`]'\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for VPC changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<vpc_changes_metric>` --metric-transformations metricName= `<vpc_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<vpc_changes_alarm>` --metric-name `<vpc_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }\" ``` 4. Note the `<vpc_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<vpc_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<vpc_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1026,8 +1029,8 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for AWS Organizations changes made in the master AWS Account.",
"RationaleStatement": "Monitoring AWS Organizations changes can help you prevent any unwanted, accidental or intentional modifications that may lead to unauthorized access or other security breaches. This monitoring technique helps you to ensure that any unexpected changes performed within your AWS Organizations can be investigated and any unwanted changes can be rolled back.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for AWS Organizations changes and the `<cloudtrail_log_group_name>` taken from audit step 1:\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<organizations_changes>` --metric-transformations metricName= `<organizations_changes>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }'\n```\n**Note:** You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify:\n```\naws sns create-topic --name <sns_topic_name>\n```\n**Note:** you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2:\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n**Note:** you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2:\n```\naws cloudwatch put-metric-alarm --alarm-name `<organizations_changes>` --metric-name `<organizations_changes>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "1. Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n- Identify the log group name configured for use with active multi-region CloudTrail:\n- List all CloudTrails: \n```\naws cloudtrail describe-trails\n```\n- Identify Multi region Cloudtrails, Trails with `\"IsMultiRegionTrail\"` set to true\n- From value associated with CloudWatchLogsLogGroupArn note <cloudtrail_log_group_name>\n **Example:** for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*, <cloudtrail_log_group_name> would be NewGroup\n\n- Ensure Identified Multi region CloudTrail is active:\n```\naws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>\n```\nEnsure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events:\n```\naws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>\n```\n- Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to true and `ReadWriteType` set to `All`.\n\n2. Get a list of all associated metric filters for this <cloudtrail_log_group_name>:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }\"\n```\n4. Note the `<organizations_changes>` value associated with the filterPattern found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<organizations_changes>` captured in step 4:\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<organizations_changes>`]'\n```\n6. Note the AlarmActions value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic:\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\nExample of valid \"SubscriptionArn\": \n```\n\"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for AWS Organizations changes and the `<cloudtrail_log_group_name>` taken from audit step 1: ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<organizations_changes>` --metric-transformations metricName= `<organizations_changes>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }' ``` **Note:** You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify: ``` aws sns create-topic --name <sns_topic_name> ``` **Note:** you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2: ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note:** you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2: ``` aws cloudwatch put-metric-alarm --alarm-name `<organizations_changes>` --metric-name `<organizations_changes>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "1. Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: - Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: ``` aws cloudtrail describe-trails ``` - Identify Multi region Cloudtrails, Trails with `\"IsMultiRegionTrail\"` set to true - From value associated with CloudWatchLogsLogGroupArn note <cloudtrail_log_group_name> **Example:** for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*, <cloudtrail_log_group_name> would be NewGroup - Ensure Identified Multi region CloudTrail is active: ``` aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail> ``` Ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events: ``` aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails> ``` - Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to true and `ReadWriteType` set to `All`. 2. Get a list of all associated metric filters for this <cloudtrail_log_group_name>: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }\" ``` 4. Note the `<organizations_changes>` value associated with the filterPattern found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<organizations_changes>` captured in step 4: ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<organizations_changes>`]' ``` 6. Note the AlarmActions value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic: ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. Example of valid \"SubscriptionArn\": ``` \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_security_incident-response.html"
}
@@ -1047,8 +1050,8 @@
"Description": "Security Hub collects security data from across AWS accounts, services, and supported third-party partner products and helps you analyze your security trends and identify the highest priority security issues. When you enable Security Hub, it begins to consume, aggregate, organize, and prioritize findings from AWS services that you have enabled, such as Amazon GuardDuty, Amazon Inspector, and Amazon Macie. You can also enable integrations with AWS partner security products.",
"RationaleStatement": "AWS Security Hub provides you with a comprehensive view of your security state in AWS and helps you check your environment against security industry standards and best practices - enabling you to quickly assess the security posture across your AWS accounts.",
"ImpactStatement": "It is recommended AWS Security Hub be enabled in all regions. AWS Security Hub requires AWS Config to be enabled.",
"RemediationProcedure": "To grant the permissions required to enable Security Hub, attach the Security Hub managed policy AWSSecurityHubFullAccess to an IAM user, group, or role.\n\nEnabling Security Hub\n\n**From Console:**\n\n1. Use the credentials of the IAM identity to sign in to the Security Hub console.\n2. When you open the Security Hub console for the first time, choose Enable AWS Security Hub.\n3. On the welcome page, Security standards list the security standards that Security Hub supports.\n4. Choose Enable Security Hub.\n\n**From Command Line:**\n\n1. Run the enable-security-hub command. To enable the default standards, include `--enable-default-standards`.\n```\naws securityhub enable-security-hub --enable-default-standards\n```\n\n2. To enable the security hub without the default standards, include `--no-enable-default-standards`.\n```\naws securityhub enable-security-hub --no-enable-default-standards\n```",
"AuditProcedure": "The process to evaluate AWS Security Hub configuration per region \n\n**From Console:**\n\n1. Sign in to the AWS Management Console and open the AWS Security Hub console at https://console.aws.amazon.com/securityhub/.\n2. On the top right of the console, select the target Region.\n3. If presented with the Security Hub > Summary page then Security Hub is set-up for the selected region.\n4. If presented with Setup Security Hub or Get Started With Security Hub - follow the online instructions.\n5. Repeat steps 2 to 4 for each region.",
"RemediationProcedure": "To grant the permissions required to enable Security Hub, attach the Security Hub managed policy AWSSecurityHubFullAccess to an IAM user, group, or role. Enabling Security Hub **From Console:** 1. Use the credentials of the IAM identity to sign in to the Security Hub console. 2. When you open the Security Hub console for the first time, choose Enable AWS Security Hub. 3. On the welcome page, Security standards list the security standards that Security Hub supports. 4. Choose Enable Security Hub. **From Command Line:** 1. Run the enable-security-hub command. To enable the default standards, include `--enable-default-standards`. ``` aws securityhub enable-security-hub --enable-default-standards ``` 2. To enable the security hub without the default standards, include `--no-enable-default-standards`. ``` aws securityhub enable-security-hub --no-enable-default-standards ```",
"AuditProcedure": "The process to evaluate AWS Security Hub configuration per region **From Console:** 1. Sign in to the AWS Management Console and open the AWS Security Hub console at https://console.aws.amazon.com/securityhub/. 2. On the top right of the console, select the target Region. 3. If presented with the Security Hub > Summary page then Security Hub is set-up for the selected region. 4. If presented with Setup Security Hub or Get Started With Security Hub - follow the online instructions. 5. Repeat steps 2 to 4 for each region.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-get-started.html:https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-enable.html#securityhub-enable-api:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/securityhub/enable-security-hub.html"
}
@@ -1068,9 +1071,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).",
"RationaleStatement": "Monitoring for single-factor console logins will increase visibility into accounts that are not protected by MFA.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for AWS Management Console sign-in without MFA and the `<cloudtrail_log_group_name>` taken from audit step 1.\n\nUse Command: \n\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<no_mfa_console_signin_metric>` --metric-transformations metricName= `<no_mfa_console_signin_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }'\n```\n\nOr (To reduce false positives incase Single Sign-On (SSO) is used in organization):\n\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<no_mfa_console_signin_metric>` --metric-transformations metricName= `<no_mfa_console_signin_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<no_mfa_console_signin_alarm>` --metric-name `<no_mfa_console_signin_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all `CloudTrails`:\n\n```\naws cloudtrail describe-trails\n```\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region `CloudTrail` is active\n\n```\naws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>\n```\n\nEnsure in the output that `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region 'Cloudtrail' captures all Management Events\n\n```\naws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>\n```\n\nEnsure in the output there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }\"\n```\n\nOr (To reduce false positives incase Single Sign-On (SSO) is used in organization):\n\n```\n\"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }\"\n```\n\n4. Note the `<no_mfa_console_signin_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<no_mfa_console_signin_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<no_mfa_console_signin_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored\n-Filter pattern set to `{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\"}` reduces false alarms raised when user logs in via SSO account.",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for AWS Management Console sign-in without MFA and the `<cloudtrail_log_group_name>` taken from audit step 1. Use Command: ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<no_mfa_console_signin_metric>` --metric-transformations metricName= `<no_mfa_console_signin_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }' ``` Or (To reduce false positives incase Single Sign-On (SSO) is used in organization): ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<no_mfa_console_signin_metric>` --metric-transformations metricName= `<no_mfa_console_signin_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<no_mfa_console_signin_alarm>` --metric-name `<no_mfa_console_signin_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all `CloudTrails`: ``` aws cloudtrail describe-trails ``` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region `CloudTrail` is active ``` aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail> ``` Ensure in the output that `IsLogging` is set to `TRUE` - Ensure identified Multi-region 'Cloudtrail' captures all Management Events ``` aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails> ``` Ensure in the output there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }\" ``` Or (To reduce false positives incase Single Sign-On (SSO) is used in organization): ``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }\" ``` 4. Note the `<no_mfa_console_signin_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<no_mfa_console_signin_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<no_mfa_console_signin_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored -Filter pattern set to `{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\"}` reduces false alarms raised when user logs in via SSO account.",
"References": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/viewing_metrics_with_cloudwatch.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1089,9 +1092,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for 'root' login attempts.",
"RationaleStatement": "Monitoring for 'root' account logins will provide visibility into the use of a fully privileged account and an opportunity to reduce the use of it.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for 'Root' account usage and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name `<cloudtrail_log_group_name>` --filter-name `<root_usage_metric>` --metric-transformations metricName= `<root_usage_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<root_usage_alarm>` --metric-name `<root_usage_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails:\n\n`aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }\"\n```\n\n4. Note the `<root_usage_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<root_usage_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<root_usage_metric>`]'\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "**Configuring log metric filter and alarm on Multi-region (global) CloudTrail**\n\n- ensures that activities from all regions (used as well as unused) are monitored\n\n- ensures that activities on all supported global services are monitored\n\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for 'Root' account usage and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `<cloudtrail_log_group_name>` --filter-name `<root_usage_metric>` --metric-transformations metricName= `<root_usage_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<root_usage_alarm>` --metric-name `<root_usage_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }\" ``` 4. Note the `<root_usage_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<root_usage_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<root_usage_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "**Configuring log metric filter and alarm on Multi-region (global) CloudTrail** - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1110,9 +1113,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established changes made to Identity and Access Management (IAM) policies.",
"RationaleStatement": "Monitoring changes to IAM policies will help ensure authentication and authorization controls remain intact.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for IAM policy changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name `<cloudtrail_log_group_name>` --filter-name `<iam_changes_metric>` --metric-transformations metricName= `<iam_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<iam_changes_alarm>` --metric-name `<iam_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails:\n\n`aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}\"\n```\n\n4. Note the `<iam_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<iam_changes_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<iam_changes_metric>`]'\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for IAM policy changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `<cloudtrail_log_group_name>` --filter-name `<iam_changes_metric>` --metric-transformations metricName= `<iam_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<iam_changes_alarm>` --metric-name `<iam_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}\" ``` 4. Note the `<iam_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<iam_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<iam_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1131,9 +1134,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
"RationaleStatement": "Monitoring changes to CloudTrail's configuration will help ensure sustained visibility to activities performed in the AWS account.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for cloudtrail configuration changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<cloudtrail_cfg_changes_metric>` --metric-transformations metricName= `<cloudtrail_cfg_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<cloudtrail_cfg_changes_alarm>` --metric-name `<cloudtrail_cfg_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n\n3. Ensure the output from the above command contains the following:\n\n```\n\"filterPattern\": \"{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }\"\n```\n\n4. Note the `<cloudtrail_cfg_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<cloudtrail_cfg_changes_metric>` captured in step 4.\n\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<cloudtrail_cfg_changes_metric>`]'\n```\n\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for cloudtrail configuration changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<cloudtrail_cfg_changes_metric>` --metric-transformations metricName= `<cloudtrail_cfg_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<cloudtrail_cfg_changes_alarm>` --metric-name `<cloudtrail_cfg_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }\" ``` 4. Note the `<cloudtrail_cfg_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<cloudtrail_cfg_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<cloudtrail_cfg_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1152,9 +1155,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for failed console authentication attempts.",
"RationaleStatement": "Monitoring failed console logins may decrease lead time to detect an attempt to brute force a credential, which may provide an indicator, such as source IP, that can be used in other event correlation.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for AWS management Console Login Failures and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<console_signin_failure_metric>` --metric-transformations metricName= `<console_signin_failure_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }'\n```\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<console_signin_failure_alarm>` --metric-name `<console_signin_failure_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }\"\n```\n\n4. Note the `<console_signin_failure_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<console_signin_failure_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<console_signin_failure_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for AWS management Console Login Failures and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<console_signin_failure_metric>` --metric-transformations metricName= `<console_signin_failure_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<console_signin_failure_alarm>` --metric-name `<console_signin_failure_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }\" ``` 4. Note the `<console_signin_failure_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<console_signin_failure_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<console_signin_failure_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1173,9 +1176,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for customer created CMKs which have changed state to disabled or scheduled deletion.",
"RationaleStatement": "Data encrypted with disabled or deleted keys will no longer be accessible.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for disabled or scheduled for deletion CMK's and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<disable_or_delete_cmk_changes_metric>` --metric-transformations metricName= `<disable_or_delete_cmk_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }'\n```\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<disable_or_delete_cmk_changes_alarm>` --metric-name `<disable_or_delete_cmk_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }\"\n```\n4. Note the `<disable_or_delete_cmk_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<disable_or_delete_cmk_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<disable_or_delete_cmk_changes_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for disabled or scheduled for deletion CMK's and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<disable_or_delete_cmk_changes_metric>` --metric-transformations metricName= `<disable_or_delete_cmk_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<disable_or_delete_cmk_changes_alarm>` --metric-name `<disable_or_delete_cmk_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }\" ``` 4. Note the `<disable_or_delete_cmk_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<disable_or_delete_cmk_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<disable_or_delete_cmk_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1194,9 +1197,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes to S3 bucket policies.",
"RationaleStatement": "Monitoring changes to S3 bucket policies may reduce time to detect and correct permissive policies on sensitive S3 buckets.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for S3 bucket policy changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<s3_bucket_policy_changes_metric>` --metric-transformations metricName= `<s3_bucket_policy_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to the topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<s3_bucket_policy_changes_alarm>` --metric-name `<s3_bucket_policy_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }\"\n```\n4. Note the `<s3_bucket_policy_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<s3_bucket_policy_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<s3_bucket_policy_changes_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for S3 bucket policy changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<s3_bucket_policy_changes_metric>` --metric-transformations metricName= `<s3_bucket_policy_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<s3_bucket_policy_changes_alarm>` --metric-name `<s3_bucket_policy_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }\" ``` 4. Note the `<s3_bucket_policy_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<s3_bucket_policy_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<s3_bucket_policy_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1215,9 +1218,9 @@
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
"RationaleStatement": "Monitoring changes to AWS Config configuration will help ensure sustained visibility of configuration items within the AWS account.",
"ImpactStatement": "",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:\n\n1. Create a metric filter based on filter pattern provided which checks for AWS Configuration changes and the `<cloudtrail_log_group_name>` taken from audit step 1.\n```\naws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<aws_config_changes_metric>` --metric-transformations metricName= `<aws_config_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }'\n```\n\n**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.\n\n2. Create an SNS topic that the alarm will notify\n```\naws sns create-topic --name <sns_topic_name>\n```\n\n**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.\n\n3. Create an SNS subscription to topic created in step 2\n```\naws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints>\n```\n\n**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.\n\n4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2\n```\naws cloudwatch put-metric-alarm --alarm-name `<aws_config_changes_alarm>` --metric-name `<aws_config_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>\n```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:\n\n1. Identify the log group name configured for use with active multi-region CloudTrail:\n\n- List all CloudTrails: `aws cloudtrail describe-trails`\n\n- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`\n\n- From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>`\n\nExample: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup`\n\n- Ensure Identified Multi region CloudTrail is active\n\n`aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>`\n\nensure `IsLogging` is set to `TRUE`\n\n- Ensure identified Multi-region Cloudtrail captures all Management Events\n\n`aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>`\n\nEnsure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`\n\n2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`:\n```\naws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\"\n```\n3. Ensure the output from the above command contains the following:\n```\n\"filterPattern\": \"{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }\"\n```\n4. Note the `<aws_config_changes_metric>` value associated with the `filterPattern` found in step 3.\n\n5. Get a list of CloudWatch alarms and filter on the `<aws_config_changes_metric>` captured in step 4.\n```\naws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<aws_config_changes_metric>`]'\n```\n6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.\n\n7. Ensure there is at least one active subscriber to the SNS topic\n```\naws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> \n```\nat least one subscription should have \"SubscriptionArn\" with valid aws ARN.\n```\nExample of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\"\n```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail\n- ensures that activities from all regions (used as well as unused) are monitored\n- ensures that activities on all supported global services are monitored\n- ensures that all management events across all regions are monitored",
"RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for AWS Configuration changes and the `<cloudtrail_log_group_name>` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> --filter-name `<aws_config_changes_metric>` --metric-transformations metricName= `<aws_config_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name <sns_topic_name> ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to topic created in step 2 ``` aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> --notification-endpoint <sns_subscription_endpoints> ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `<aws_config_changes_alarm>` --metric-name `<aws_config_changes_metric>` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn> ```",
"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: 1. Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails: `aws cloudtrail describe-trails` - Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true` - From value associated with CloudWatchLogsLogGroupArn note `<cloudtrail_log_group_name>` Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:<region>:<aws_account_number>:log-group:NewGroup:*`, `<cloudtrail_log_group_name>` would be `NewGroup` - Ensure Identified Multi region CloudTrail is active `aws cloudtrail get-trail-status --name <Name of a Multi-region CloudTrail>` ensure `IsLogging` is set to `TRUE` - Ensure identified Multi-region Cloudtrail captures all Management Events `aws cloudtrail get-event-selectors --trail-name <trailname shown in describe-trails>` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All` 2. Get a list of all associated metric filters for this `<cloudtrail_log_group_name>`: ``` aws logs describe-metric-filters --log-group-name \"<cloudtrail_log_group_name>\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }\" ``` 4. Note the `<aws_config_changes_metric>` value associated with the `filterPattern` found in step 3. 5. Get a list of CloudWatch alarms and filter on the `<aws_config_changes_metric>` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== `<aws_config_changes_metric>`]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value. 7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn <sns_topic_arn> ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns:<region>:<aws_account_number>:<SnsTopicName>:<SubscriptionID>\" ```",
"AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored",
"References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html"
}
]
@@ -1238,8 +1241,8 @@
"Description": "The Network Access Control List (NACL) function provide stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
"RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.",
"ImpactStatement": "",
"RemediationProcedure": "**From Console:**\n\nPerform the following:\n1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home\n2. In the left pane, click `Network ACLs`\n3. For each network ACL to remediate, perform the following:\n - Select the network ACL\n - Click the `Inbound Rules` tab\n - Click `Edit inbound rules`\n - Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule\n - Click `Save`",
"AuditProcedure": "**From Console:**\n\nPerform the following to determine if the account is configured as prescribed:\n1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home\n2. In the left pane, click `Network ACLs`\n3. For each network ACL, perform the following:\n - Select the network ACL\n - Click the `Inbound Rules` tab\n - Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` and shows `ALLOW`\n\n**Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports",
"RemediationProcedure": "**From Console:** Perform the following: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL to remediate, perform the following: - Select the network ACL - Click the `Inbound Rules` tab - Click `Edit inbound rules` - Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule - Click `Save`",
"AuditProcedure": "**From Console:** Perform the following to determine if the account is configured as prescribed: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL, perform the following: - Select the network ACL - Click the `Inbound Rules` tab - Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` and shows `ALLOW` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Security.html#VPC_Security_Comparison"
}
@@ -1261,8 +1264,8 @@
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
"RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.",
"ImpactStatement": "When updating an existing environment, ensure that administrators have access to remote server administration ports through another mechanism before removing access by deleting the 0.0.0.0/0 inbound rule.",
"RemediationProcedure": "Perform the following to implement the prescribed state:\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. In the left pane, click `Security Groups` \n3. For each security group, perform the following:\n1. Select the security group\n2. Click the `Inbound Rules` tab\n3. Click the `Edit inbound rules` button\n4. Identify the rules to be edited or removed\n5. Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule\n6. Click `Save rules`",
"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. In the left pane, click `Security Groups` \n3. For each security group, perform the following:\n1. Select the security group\n2. Click the `Inbound Rules` tab\n3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` \n\n**Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.",
"RemediationProcedure": "Perform the following to implement the prescribed state: 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups` 3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Click the `Edit inbound rules` button 4. Identify the rules to be edited or removed 5. Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule 6. Click `Save rules`",
"AuditProcedure": "Perform the following to determine if the account is configured as prescribed: 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups` 3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#deleting-security-group-rule"
}
@@ -1284,8 +1287,8 @@
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
"RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.",
"ImpactStatement": "When updating an existing environment, ensure that administrators have access to remote server administration ports through another mechanism before removing access by deleting the ::/0 inbound rule.",
"RemediationProcedure": "Perform the following to implement the prescribed state:\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. In the left pane, click `Security Groups` \n3. For each security group, perform the following:\n1. Select the security group\n2. Click the `Inbound Rules` tab\n3. Click the `Edit inbound rules` button\n4. Identify the rules to be edited or removed\n5. Either A) update the Source field to a range other than ::/0, or, B) Click `Delete` to remove the offending inbound rule\n6. Click `Save rules`",
"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. In the left pane, click `Security Groups` \n3. For each security group, perform the following:\n1. Select the security group\n2. Click the `Inbound Rules` tab\n3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `::/0` \n\n**Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.",
"RemediationProcedure": "Perform the following to implement the prescribed state: 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups` 3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Click the `Edit inbound rules` button 4. Identify the rules to be edited or removed 5. Either A) update the Source field to a range other than ::/0, or, B) Click `Delete` to remove the offending inbound rule 6. Click `Save rules`",
"AuditProcedure": "Perform the following to determine if the account is configured as prescribed: 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups` 3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `::/0` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#deleting-security-group-rule"
}
@@ -1302,11 +1305,11 @@
"Section": "5. Networking",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic.\n\nThe default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation.\n\n**NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.",
"Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic. The default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation. **NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.",
"RationaleStatement": "Configuring all VPC default security groups to restrict all traffic will encourage least privilege security group development and mindful placement of AWS resources into security groups which will in-turn reduce the exposure of those resources.",
"ImpactStatement": "Implementing this recommendation in an existing VPC containing operating resources requires extremely careful migration planning as the default security groups are likely to be enabling many ports that are unknown. Enabling VPC flow logging (of accepts) in an existing environment that is known to be breach free will reveal the current pattern of ports being used for each instance to communicate successfully.",
"RemediationProcedure": "Security Group Members\n\nPerform the following to implement the prescribed state:\n\n1. Identify AWS resources that exist within the default security group\n2. Create a set of least privilege security groups for those resources\n3. Place the resources in those security groups\n4. Remove the resources noted in #1 from the default security group\n\nSecurity Group State\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. Repeat the next steps for all VPCs - including the default VPC in each AWS region:\n3. In the left pane, click `Security Groups` \n4. For each default security group, perform the following:\n1. Select the `default` security group\n2. Click the `Inbound Rules` tab\n3. Remove any inbound rules\n4. Click the `Outbound Rules` tab\n5. Remove any Outbound rules\n\nRecommended:\n\nIAM groups allow you to edit the \"name\" field. After remediating default groups rules for all VPCs in all regions, edit this field to add text similar to \"DO NOT USE. DO NOT ADD RULES\"",
"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:\n\nSecurity Group State\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. Repeat the next steps for all VPCs - including the default VPC in each AWS region:\n3. In the left pane, click `Security Groups` \n4. For each default security group, perform the following:\n1. Select the `default` security group\n2. Click the `Inbound Rules` tab\n3. Ensure no rule exist\n4. Click the `Outbound Rules` tab\n5. Ensure no rules exist\n\nSecurity Group Members\n\n1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home)\n2. Repeat the next steps for all default groups in all VPCs - including the default VPC in each AWS region:\n3. In the left pane, click `Security Groups` \n4. Copy the id of the default security group.\n5. Change to the EC2 Management Console at https://console.aws.amazon.com/ec2/v2/home\n6. In the filter column type 'Security Group ID : < security group id from #4 >'",
"RemediationProcedure": "Security Group Members Perform the following to implement the prescribed state: 1. Identify AWS resources that exist within the default security group 2. Create a set of least privilege security groups for those resources 3. Place the resources in those security groups 4. Remove the resources noted in #1 from the default security group Security Group State 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups` 4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Remove any inbound rules 4. Click the `Outbound Rules` tab 5. Remove any Outbound rules Recommended: IAM groups allow you to edit the \"name\" field. After remediating default groups rules for all VPCs in all regions, edit this field to add text similar to \"DO NOT USE. DO NOT ADD RULES\"",
"AuditProcedure": "Perform the following to determine if the account is configured as prescribed: Security Group State 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups` 4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exist 4. Click the `Outbound Rules` tab 5. Ensure no rules exist Security Group Members 1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all default groups in all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups` 4. Copy the id of the default security group. 5. Change to the EC2 Management Console at https://console.aws.amazon.com/ec2/v2/home 6. In the filter column type 'Security Group ID : < security group id from #4 >'",
"AdditionalInformation": "",
"References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#default-security-group"
}
@@ -1326,8 +1329,8 @@
"Description": "Once a VPC peering connection is established, routing tables must be updated to establish any connections between the peered VPCs. These routes can be as specific as desired - even peering a VPC to only a single host on the other side of the connection.",
"RationaleStatement": "Being highly selective in peering routing tables is a very effective way of minimizing the impact of breach as resources outside of these routes are inaccessible to the peered VPC.",
"ImpactStatement": "",
"RemediationProcedure": "Remove and add route table entries to ensure that the least number of subnets or hosts as is required to accomplish the purpose for peering are routable.\n\n**From Command Line:**\n\n1. For each _<route\\_table\\_id>_ containing routes non compliant with your routing policy (which grants more than desired \"least access\"), delete the non compliant route:\n```\naws ec2 delete-route --route-table-id <route_table_id> --destination-cidr-block <non_compliant_destination_CIDR>\n```\n 2. Create a new compliant route:\n```\naws ec2 create-route --route-table-id <route_table_id> --destination-cidr-block <compliant_destination_CIDR> --vpc-peering-connection-id <peering_connection_id>\n```",
"AuditProcedure": "Review routing tables of peered VPCs for whether they route all subnets of each VPC and whether that is necessary to accomplish the intended purposes for peering the VPCs.\n\n**From Command Line:**\n\n1. List all the route tables from a VPC and check if \"GatewayId\" is pointing to a _<peering\\_connection\\_id>_ (e.g. pcx-1a2b3c4d) and if \"DestinationCidrBlock\" is as specific as desired.\n```\naws ec2 describe-route-tables --filter \"Name=vpc-id,Values=<vpc_id>\" --query \"RouteTables[*].{RouteTableId:RouteTableId, VpcId:VpcId, Routes:Routes, AssociatedSubnets:Associations[*].SubnetId}\"\n```",
"RemediationProcedure": "Remove and add route table entries to ensure that the least number of subnets or hosts as is required to accomplish the purpose for peering are routable. **From Command Line:** 1. For each _<route\\_table\\_id>_ containing routes non compliant with your routing policy (which grants more than desired \"least access\"), delete the non compliant route: ``` aws ec2 delete-route --route-table-id <route_table_id> --destination-cidr-block <non_compliant_destination_CIDR> ``` 2. Create a new compliant route: ``` aws ec2 create-route --route-table-id <route_table_id> --destination-cidr-block <compliant_destination_CIDR> --vpc-peering-connection-id <peering_connection_id> ```",
"AuditProcedure": "Review routing tables of peered VPCs for whether they route all subnets of each VPC and whether that is necessary to accomplish the intended purposes for peering the VPCs. **From Command Line:** 1. List all the route tables from a VPC and check if \"GatewayId\" is pointing to a _<peering\\_connection\\_id>_ (e.g. pcx-1a2b3c4d) and if \"DestinationCidrBlock\" is as specific as desired. ``` aws ec2 describe-route-tables --filter \"Name=vpc-id,Values=<vpc_id>\" --query \"RouteTables[*].{RouteTableId:RouteTableId, VpcId:VpcId, Routes:Routes, AssociatedSubnets:Associations[*].SubnetId}\" ```",
"AdditionalInformation": "If an organization has AWS transit gateway implemented in their VPC architecture they should look to apply the recommendation above for \"least access\" routing architecture at the AWS transit gateway level in combination with what must be implemented at the standard VPC route table. More specifically, to route traffic between two or more VPCs via a transit gateway VPCs must have an attachment to a transit gateway route table as well as a route, therefore to avoid routing traffic between VPCs an attachment to the transit gateway route table should only be added where there is an intention to route traffic between the VPCs. As transit gateways are able to host multiple route tables it is possible to group VPCs by attaching them to a common route table.",
"References": "https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/peering-configurations-partial-access.html:https://docs.aws.amazon.com/cli/latest/reference/ec2/create-vpc-peering-connection.html"
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -19,7 +19,7 @@
"ec2_instance_managed_by_ssm",
"ec2_instance_older_than_specific_days",
"ssm_managed_compliant_patching",
"ec2_elastic_ip_unassgined"
"ec2_elastic_ip_unassigned"
]
},
{
@@ -51,9 +51,9 @@
}
],
"Checks": [
"apigateway_client_certificate_enabled",
"apigateway_logging_enabled",
"apigateway_waf_acl_attached",
"apigateway_restapi_client_certificate_enabled",
"apigateway_restapi_logging_enabled",
"apigateway_restapi_waf_acl_attached",
"cloudtrail_multi_region_enabled",
"cloudtrail_s3_dataevents_read_enabled",
"cloudtrail_s3_dataevents_write_enabled",
@@ -88,14 +88,17 @@
"iam_password_policy_symbol",
"iam_password_policy_uppercase",
"iam_no_custom_policy_permissive_role_assumption",
"iam_policy_no_administrative_privileges",
"iam_aws_attached_policy_no_administrative_privileges",
"iam_customer_attached_policy_no_administrative_privileges",
"iam_inline_policy_no_administrative_privileges",
"iam_root_hardware_mfa_enabled",
"iam_root_mfa_enabled",
"iam_no_root_access_key",
"iam_rotate_access_key_90_days",
"iam_user_mfa_enabled_console_access",
"iam_user_mfa_enabled_console_access",
"iam_disable_90_days_credentials",
"iam_user_accesskey_unused",
"iam_user_console_access_unused",
"kms_cmk_rotation_enabled",
"awslambda_function_not_publicly_accessible",
"awslambda_function_not_publicly_accessible",
@@ -146,7 +149,7 @@
}
],
"Checks": [
"ec2_elastic_ip_unassgined",
"ec2_elastic_ip_unassigned",
"vpc_flow_logs_enabled"
]
},
@@ -181,7 +184,9 @@
"Checks": [
"elbv2_ssl_listeners",
"iam_no_custom_policy_permissive_role_assumption",
"iam_policy_no_administrative_privileges",
"iam_aws_attached_policy_no_administrative_privileges",
"iam_customer_attached_policy_no_administrative_privileges",
"iam_inline_policy_no_administrative_privileges",
"iam_no_root_access_key"
]
},
@@ -245,8 +250,8 @@
],
"Checks": [
"acm_certificates_expiration_check",
"apigateway_client_certificate_enabled",
"apigateway_logging_enabled",
"apigateway_restapi_client_certificate_enabled",
"apigateway_restapi_logging_enabled",
"efs_have_backup_enabled",
"cloudtrail_multi_region_enabled",
"cloudtrail_s3_dataevents_read_enabled",

Some files were not shown because too many files have changed in this diff Show More