diff --git a/api/CHANGELOG.md b/api/CHANGELOG.md
index 03b4bcfc7a..42fd1831f6 100644
--- a/api/CHANGELOG.md
+++ b/api/CHANGELOG.md
@@ -7,6 +7,7 @@ All notable changes to the **Prowler API** are documented in this file.
### 🚀 Added
- Attack Paths: Bedrock Code Interpreter and AttachRolePolicy privilege escalation queries [(#9885)](https://github.com/prowler-cloud/prowler/pull/9885)
+- Added memory optimizations for large compliance report generation [(#9444)](https://github.com/prowler-cloud/prowler/pull/9444)
### 🔄 Changed
diff --git a/api/src/backend/config/django/base.py b/api/src/backend/config/django/base.py
index 80b96952d7..c9e1b4750f 100644
--- a/api/src/backend/config/django/base.py
+++ b/api/src/backend/config/django/base.py
@@ -276,7 +276,7 @@ FINDINGS_MAX_DAYS_IN_RANGE = env.int("DJANGO_FINDINGS_MAX_DAYS_IN_RANGE", 7)
DJANGO_TMP_OUTPUT_DIRECTORY = env.str(
"DJANGO_TMP_OUTPUT_DIRECTORY", "/tmp/prowler_api_output"
)
-DJANGO_FINDINGS_BATCH_SIZE = env.str("DJANGO_FINDINGS_BATCH_SIZE", 1000)
+DJANGO_FINDINGS_BATCH_SIZE = env.int("DJANGO_FINDINGS_BATCH_SIZE", 1000)
DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET = env.str("DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET", "")
DJANGO_OUTPUT_S3_AWS_ACCESS_KEY_ID = env.str("DJANGO_OUTPUT_S3_AWS_ACCESS_KEY_ID", "")
diff --git a/api/src/backend/tasks/jobs/report.py b/api/src/backend/tasks/jobs/report.py
index 1fbf9161d6..de022e9afd 100644
--- a/api/src/backend/tasks/jobs/report.py
+++ b/api/src/backend/tasks/jobs/report.py
@@ -1,1023 +1,25 @@
-import io
-import os
-from collections import defaultdict
-from functools import partial
from pathlib import Path
from shutil import rmtree
-import matplotlib.pyplot as plt
from celery.utils.log import get_task_logger
from config.django.base import DJANGO_TMP_OUTPUT_DIRECTORY
-from reportlab.lib import colors
-from reportlab.lib.enums import TA_CENTER
-from reportlab.lib.pagesizes import letter
-from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
-from reportlab.lib.units import inch
-from reportlab.pdfbase import pdfmetrics
-from reportlab.pdfbase.ttfonts import TTFont
-from reportlab.pdfgen import canvas
-from reportlab.platypus import (
- Image,
- PageBreak,
- Paragraph,
- SimpleDocTemplate,
- Spacer,
- Table,
- TableStyle,
-)
from tasks.jobs.export import _generate_compliance_output_directory, _upload_to_s3
-from tasks.jobs.threatscore import compute_threatscore_metrics
-from tasks.jobs.threatscore_utils import (
- _aggregate_requirement_statistics_from_database,
- _calculate_requirements_data_from_statistics,
- _load_findings_for_requirement_checks,
+from tasks.jobs.reports import (
+ FRAMEWORK_REGISTRY,
+ ENSReportGenerator,
+ NIS2ReportGenerator,
+ ThreatScoreReportGenerator,
)
+from tasks.jobs.threatscore import compute_threatscore_metrics
+from tasks.jobs.threatscore_utils import _aggregate_requirement_statistics_from_database
from api.db_router import READ_REPLICA_ALIAS
from api.db_utils import rls_transaction
-from api.models import Provider, ScanSummary, StatusChoices, ThreatScoreSnapshot
-from api.utils import initialize_prowler_provider
-from prowler.lib.check.compliance_models import Compliance
+from api.models import Provider, ScanSummary, ThreatScoreSnapshot
from prowler.lib.outputs.finding import Finding as FindingOutput
-pdfmetrics.registerFont(
- TTFont(
- "PlusJakartaSans",
- os.path.join(
- os.path.dirname(__file__), "../assets/fonts/PlusJakartaSans-Regular.ttf"
- ),
- )
-)
-
-pdfmetrics.registerFont(
- TTFont(
- "FiraCode",
- os.path.join(os.path.dirname(__file__), "../assets/fonts/FiraCode-Regular.ttf"),
- )
-)
-
logger = get_task_logger(__name__)
-# Color constants
-COLOR_PROWLER_DARK_GREEN = colors.Color(0.1, 0.5, 0.2)
-COLOR_BLUE = colors.Color(0.2, 0.4, 0.6)
-COLOR_LIGHT_BLUE = colors.Color(0.3, 0.5, 0.7)
-COLOR_LIGHTER_BLUE = colors.Color(0.4, 0.6, 0.8)
-COLOR_BG_BLUE = colors.Color(0.95, 0.97, 1.0)
-COLOR_BG_LIGHT_BLUE = colors.Color(0.98, 0.99, 1.0)
-COLOR_GRAY = colors.Color(0.2, 0.2, 0.2)
-COLOR_LIGHT_GRAY = colors.Color(0.9, 0.9, 0.9)
-COLOR_BORDER_GRAY = colors.Color(0.7, 0.8, 0.9)
-COLOR_GRID_GRAY = colors.Color(0.7, 0.7, 0.7)
-COLOR_DARK_GRAY = colors.Color(0.4, 0.4, 0.4)
-COLOR_HEADER_DARK = colors.Color(0.1, 0.3, 0.5)
-COLOR_HEADER_MEDIUM = colors.Color(0.15, 0.35, 0.55)
-COLOR_WHITE = colors.white
-
-# Risk and status colors
-COLOR_HIGH_RISK = colors.Color(0.8, 0.2, 0.2)
-COLOR_MEDIUM_RISK = colors.Color(0.9, 0.6, 0.2)
-COLOR_LOW_RISK = colors.Color(0.9, 0.9, 0.2)
-COLOR_SAFE = colors.Color(0.2, 0.8, 0.2)
-
-# ENS specific colors
-COLOR_ENS_ALTO = colors.Color(0.8, 0.2, 0.2)
-COLOR_ENS_MEDIO = colors.Color(0.98, 0.75, 0.13)
-COLOR_ENS_BAJO = colors.Color(0.06, 0.72, 0.51)
-COLOR_ENS_OPCIONAL = colors.Color(0.42, 0.45, 0.50)
-COLOR_ENS_TIPO = colors.Color(0.2, 0.4, 0.6)
-COLOR_ENS_AUTO = colors.Color(0.30, 0.69, 0.31)
-COLOR_ENS_MANUAL = colors.Color(0.96, 0.60, 0.0)
-
-# NIS2 specific colors
-COLOR_NIS2_PRIMARY = colors.Color(0.12, 0.23, 0.54) # EU Blue #1E3A8A
-COLOR_NIS2_SECONDARY = colors.Color(0.23, 0.51, 0.96) # Light Blue #3B82F6
-COLOR_NIS2_BG_BLUE = colors.Color(0.96, 0.97, 0.99) # Very light blue background
-
-# Chart colors
-CHART_COLOR_GREEN_1 = "#4CAF50"
-CHART_COLOR_GREEN_2 = "#8BC34A"
-CHART_COLOR_YELLOW = "#FFEB3B"
-CHART_COLOR_ORANGE = "#FF9800"
-CHART_COLOR_RED = "#F44336"
-CHART_COLOR_BLUE = "#2196F3"
-
-# ENS dimension mappings
-DIMENSION_MAPPING = {
- "trazabilidad": ("T", colors.Color(0.26, 0.52, 0.96)),
- "autenticidad": ("A", colors.Color(0.30, 0.69, 0.31)),
- "integridad": ("I", colors.Color(0.61, 0.15, 0.69)),
- "confidencialidad": ("C", colors.Color(0.96, 0.26, 0.21)),
- "disponibilidad": ("D", colors.Color(1.0, 0.60, 0.0)),
-}
-
-# ENS tipo icons
-TIPO_ICONS = {
- "requisito": "⚠️",
- "refuerzo": "🛡️",
- "recomendacion": "💡",
- "medida": "📋",
-}
-
-# Dimension names for charts
-DIMENSION_NAMES = [
- "Trazabilidad",
- "Autenticidad",
- "Integridad",
- "Confidencialidad",
- "Disponibilidad",
-]
-
-DIMENSION_KEYS = [
- "trazabilidad",
- "autenticidad",
- "integridad",
- "confidencialidad",
- "disponibilidad",
-]
-
-# ENS nivel order
-ENS_NIVEL_ORDER = ["alto", "medio", "bajo", "opcional"]
-
-# ENS tipo order
-ENS_TIPO_ORDER = ["requisito", "refuerzo", "recomendacion", "medida"]
-
-# ThreatScore expected sections
-THREATSCORE_SECTIONS = [
- "1. IAM",
- "2. Attack Surface",
- "3. Logging and Monitoring",
- "4. Encryption",
-]
-
-# NIS2 main sections (simplified for chart display)
-NIS2_SECTIONS = [
- "1", # Policy on Security
- "2", # Risk Management
- "3", # Incident Handling
- "4", # Business Continuity
- "5", # Supply Chain Security
- "6", # Acquisition & Development
- "7", # Effectiveness Assessment
- "9", # Cryptography
- "11", # Access Control
- "12", # Asset Management
-]
-
-# Table column widths (in inches)
-COL_WIDTH_SMALL = 0.4 * inch
-COL_WIDTH_MEDIUM = 0.9 * inch
-COL_WIDTH_LARGE = 1.5 * inch
-COL_WIDTH_XLARGE = 2 * inch
-COL_WIDTH_XXLARGE = 3 * inch
-
-# Common padding values
-PADDING_SMALL = 4
-PADDING_MEDIUM = 6
-PADDING_LARGE = 8
-PADDING_XLARGE = 10
-
-
-# Cache for PDF styles to avoid recreating them on every call
-_PDF_STYLES_CACHE: dict[str, ParagraphStyle] | None = None
-
-
-# Helper functions for performance optimization
-def _get_color_for_risk_level(risk_level: int) -> colors.Color:
- """Get color based on risk level using optimized lookup."""
- if risk_level >= 4:
- return COLOR_HIGH_RISK
- elif risk_level >= 3:
- return COLOR_MEDIUM_RISK
- elif risk_level >= 2:
- return COLOR_LOW_RISK
- return COLOR_SAFE
-
-
-def _get_color_for_weight(weight: int) -> colors.Color:
- """Get color based on weight using optimized lookup."""
- if weight > 100:
- return COLOR_HIGH_RISK
- elif weight > 50:
- return COLOR_LOW_RISK
- return COLOR_SAFE
-
-
-def _get_color_for_compliance(percentage: float) -> colors.Color:
- """Get color based on compliance percentage."""
- if percentage >= 80:
- return COLOR_SAFE
- elif percentage >= 60:
- return COLOR_LOW_RISK
- return COLOR_HIGH_RISK
-
-
-def _get_chart_color_for_percentage(percentage: float) -> str:
- """Get chart color string based on percentage."""
- if percentage >= 80:
- return CHART_COLOR_GREEN_1
- elif percentage >= 60:
- return CHART_COLOR_GREEN_2
- elif percentage >= 40:
- return CHART_COLOR_YELLOW
- elif percentage >= 20:
- return CHART_COLOR_ORANGE
- return CHART_COLOR_RED
-
-
-def _get_ens_nivel_color(nivel: str) -> colors.Color:
- """Get ENS nivel color using optimized lookup."""
- nivel_lower = nivel.lower()
- if nivel_lower == "alto":
- return COLOR_ENS_ALTO
- elif nivel_lower == "medio":
- return COLOR_ENS_MEDIO
- elif nivel_lower == "bajo":
- return COLOR_ENS_BAJO
- return COLOR_ENS_OPCIONAL
-
-
-def _safe_getattr(obj, attr: str, default: str = "N/A") -> str:
- """Optimized getattr with default value."""
- return getattr(obj, attr, default)
-
-
-def _create_info_table_style() -> TableStyle:
- """Create a reusable table style for information/metadata tables.
-
- ReportLab TableStyle coordinate system:
- - Format: (COMMAND, (start_col, start_row), (end_col, end_row), value)
- - Coordinates use (column, row) format, starting at (0, 0) for top-left cell
- - Negative indices work like Python slicing: -1 means "last row/column"
- - (0, 0) to (0, -1) = entire first column (all rows)
- - (0, 0) to (-1, 0) = entire first row (all columns)
- - (0, 0) to (-1, -1) = entire table
- - Styles are applied in order; later rules override earlier ones
- """
- return TableStyle(
- [
- # Column 0 (labels): blue background with white text
- ("BACKGROUND", (0, 0), (0, -1), COLOR_BLUE),
- ("TEXTCOLOR", (0, 0), (0, -1), COLOR_WHITE),
- ("FONTNAME", (0, 0), (0, -1), "FiraCode"),
- # Column 1 (values): light blue background with gray text
- ("BACKGROUND", (1, 0), (1, -1), COLOR_BG_BLUE),
- ("TEXTCOLOR", (1, 0), (1, -1), COLOR_GRAY),
- ("FONTNAME", (1, 0), (1, -1), "PlusJakartaSans"),
- # Apply to entire table
- ("ALIGN", (0, 0), (-1, -1), "LEFT"),
- ("VALIGN", (0, 0), (-1, -1), "TOP"),
- ("FONTSIZE", (0, 0), (-1, -1), 11),
- ("GRID", (0, 0), (-1, -1), 1, COLOR_BORDER_GRAY),
- ("LEFTPADDING", (0, 0), (-1, -1), PADDING_XLARGE),
- ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_XLARGE),
- ("TOPPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ]
- )
-
-
-def _create_header_table_style(header_color: colors.Color = None) -> TableStyle:
- """Create a reusable table style for tables with headers.
-
- ReportLab TableStyle coordinate system:
- - Format: (COMMAND, (start_col, start_row), (end_col, end_row), value)
- - (0, 0) to (-1, 0) = entire first row (header row)
- - (1, 1) to (-1, -1) = all data cells (excludes header row and first column)
- - See _create_info_table_style() for full coordinate system documentation
- """
- if header_color is None:
- header_color = COLOR_BLUE
-
- return TableStyle(
- [
- # Header row (row 0): colored background with white text
- ("BACKGROUND", (0, 0), (-1, 0), header_color),
- ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
- ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
- ("FONTSIZE", (0, 0), (-1, 0), 10),
- # Apply to entire table
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- # Data cells (excluding header): smaller font
- ("FONTSIZE", (1, 1), (-1, -1), 9),
- # Apply to entire table
- ("GRID", (0, 0), (-1, -1), 1, COLOR_GRID_GRAY),
- ("LEFTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
- ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
- ("TOPPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
- ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
- ]
- )
-
-
-def _create_findings_table_style() -> TableStyle:
- """Create a reusable table style for findings tables.
-
- ReportLab TableStyle coordinate system:
- - Format: (COMMAND, (start_col, start_row), (end_col, end_row), value)
- - (0, 0) to (-1, 0) = entire first row (header row)
- - (0, 0) to (0, 0) = only the top-left cell
- - See _create_info_table_style() for full coordinate system documentation
- """
- return TableStyle(
- [
- # Header row (row 0): colored background with white text
- ("BACKGROUND", (0, 0), (-1, 0), COLOR_BLUE),
- ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
- ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
- # Only top-left cell centered (for index/number column)
- ("ALIGN", (0, 0), (0, 0), "CENTER"),
- # Apply to entire table
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTSIZE", (0, 0), (-1, -1), 9),
- ("GRID", (0, 0), (-1, -1), 0.1, COLOR_BORDER_GRAY),
- # Remove padding only from top-left cell
- ("LEFTPADDING", (0, 0), (0, 0), 0),
- ("RIGHTPADDING", (0, 0), (0, 0), 0),
- # Apply to entire table
- ("TOPPADDING", (0, 0), (-1, -1), PADDING_SMALL),
- ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_SMALL),
- ]
- )
-
-
-def _create_pdf_styles() -> dict[str, ParagraphStyle]:
- """
- Create and return PDF paragraph styles used throughout the report.
-
- Styles are cached on first call to improve performance.
-
- Returns:
- dict[str, ParagraphStyle]: A dictionary containing the following styles:
- - 'title': Title style with prowler green color
- - 'h1': Heading 1 style with blue color and background
- - 'h2': Heading 2 style with light blue color
- - 'h3': Heading 3 style for sub-headings
- - 'normal': Normal text style with left indent
- - 'normal_center': Normal text style without indent
- """
- global _PDF_STYLES_CACHE
-
- if _PDF_STYLES_CACHE is not None:
- return _PDF_STYLES_CACHE
-
- styles = getSampleStyleSheet()
-
- title_style = ParagraphStyle(
- "CustomTitle",
- parent=styles["Title"],
- fontSize=24,
- textColor=COLOR_PROWLER_DARK_GREEN,
- spaceAfter=20,
- fontName="PlusJakartaSans",
- alignment=TA_CENTER,
- )
-
- h1 = ParagraphStyle(
- "CustomH1",
- parent=styles["Heading1"],
- fontSize=18,
- textColor=COLOR_BLUE,
- spaceBefore=20,
- spaceAfter=12,
- fontName="PlusJakartaSans",
- leftIndent=0,
- borderWidth=2,
- borderColor=COLOR_BLUE,
- borderPadding=PADDING_LARGE,
- backColor=COLOR_BG_BLUE,
- )
-
- h2 = ParagraphStyle(
- "CustomH2",
- parent=styles["Heading2"],
- fontSize=14,
- textColor=COLOR_LIGHT_BLUE,
- spaceBefore=15,
- spaceAfter=8,
- fontName="PlusJakartaSans",
- leftIndent=10,
- borderWidth=1,
- borderColor=COLOR_BORDER_GRAY,
- borderPadding=5,
- backColor=COLOR_BG_LIGHT_BLUE,
- )
-
- h3 = ParagraphStyle(
- "CustomH3",
- parent=styles["Heading3"],
- fontSize=12,
- textColor=COLOR_LIGHTER_BLUE,
- spaceBefore=10,
- spaceAfter=6,
- fontName="PlusJakartaSans",
- leftIndent=20,
- )
-
- normal = ParagraphStyle(
- "CustomNormal",
- parent=styles["Normal"],
- fontSize=10,
- textColor=COLOR_GRAY,
- spaceBefore=PADDING_SMALL,
- spaceAfter=PADDING_SMALL,
- leftIndent=30,
- fontName="PlusJakartaSans",
- )
-
- normal_center = ParagraphStyle(
- "CustomNormalCenter",
- parent=styles["Normal"],
- fontSize=10,
- textColor=COLOR_GRAY,
- fontName="PlusJakartaSans",
- )
-
- _PDF_STYLES_CACHE = {
- "title": title_style,
- "h1": h1,
- "h2": h2,
- "h3": h3,
- "normal": normal,
- "normal_center": normal_center,
- }
-
- return _PDF_STYLES_CACHE
-
-
-def _create_risk_component(risk_level: int, weight: int, score: int = 0) -> Table:
- """
- Create a visual risk component table for the PDF report.
-
- Args:
- risk_level (int): The risk level (0-5), where higher values indicate higher risk.
- weight (int): The weight of the risk component.
- score (int): The calculated score. Defaults to 0.
-
- Returns:
- Table: A ReportLab Table object with colored cells representing risk, weight, and score.
- """
- risk_color = _get_color_for_risk_level(risk_level)
- weight_color = _get_color_for_weight(weight)
-
- data = [
- [
- "Risk Level:",
- str(risk_level),
- "Weight:",
- str(weight),
- "Score:",
- str(score),
- ]
- ]
-
- table = Table(
- data,
- colWidths=[
- 0.8 * inch,
- COL_WIDTH_SMALL,
- 0.6 * inch,
- COL_WIDTH_SMALL,
- 0.5 * inch,
- COL_WIDTH_SMALL,
- ],
- )
-
- table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, 0), COLOR_LIGHT_GRAY),
- ("BACKGROUND", (1, 0), (1, 0), risk_color),
- ("TEXTCOLOR", (1, 0), (1, 0), COLOR_WHITE),
- ("FONTNAME", (1, 0), (1, 0), "FiraCode"),
- ("BACKGROUND", (2, 0), (2, 0), COLOR_LIGHT_GRAY),
- ("BACKGROUND", (3, 0), (3, 0), weight_color),
- ("TEXTCOLOR", (3, 0), (3, 0), COLOR_WHITE),
- ("FONTNAME", (3, 0), (3, 0), "FiraCode"),
- ("BACKGROUND", (4, 0), (4, 0), COLOR_LIGHT_GRAY),
- ("BACKGROUND", (5, 0), (5, 0), COLOR_DARK_GRAY),
- ("TEXTCOLOR", (5, 0), (5, 0), COLOR_WHITE),
- ("FONTNAME", (5, 0), (5, 0), "FiraCode"),
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTSIZE", (0, 0), (-1, -1), 10),
- ("GRID", (0, 0), (-1, -1), 0.5, colors.black),
- ("LEFTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
- ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
- ("TOPPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ]
- )
- )
-
- return table
-
-
-def _create_status_component(status: str) -> Table:
- """
- Create a visual status component with colored background.
-
- Args:
- status (str): The status value (e.g., "PASS", "FAIL", "MANUAL").
-
- Returns:
- Table: A ReportLab Table object displaying the status with appropriate color coding.
- """
- status_upper = status.upper()
- if status_upper == "PASS":
- status_color = COLOR_SAFE
- elif status_upper == "FAIL":
- status_color = COLOR_HIGH_RISK
- else:
- status_color = COLOR_DARK_GRAY
-
- data = [["State:", status_upper]]
-
- table = Table(data, colWidths=[0.6 * inch, 0.8 * inch])
-
- table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, 0), COLOR_LIGHT_GRAY),
- ("FONTNAME", (0, 0), (0, 0), "PlusJakartaSans"),
- ("BACKGROUND", (1, 0), (1, 0), status_color),
- ("TEXTCOLOR", (1, 0), (1, 0), COLOR_WHITE),
- ("FONTNAME", (1, 0), (1, 0), "FiraCode"),
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTSIZE", (0, 0), (-1, -1), 12),
- ("GRID", (0, 0), (-1, -1), 0.5, colors.black),
- ("LEFTPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ("TOPPADDING", (0, 0), (-1, -1), PADDING_XLARGE),
- ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_XLARGE),
- ]
- )
- )
-
- return table
-
-
-def _create_ens_nivel_badge(nivel: str) -> Table:
- """
- Create a visual badge for ENS requirement level (Nivel).
-
- Args:
- nivel (str): The level value (e.g., "alto", "medio", "bajo", "opcional").
-
- Returns:
- Table: A ReportLab Table object displaying the level with appropriate color coding.
- """
- nivel_color = _get_ens_nivel_color(nivel)
- data = [[f"Nivel: {nivel.upper()}"]]
-
- table = Table(data, colWidths=[1.4 * inch])
-
- table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, 0), nivel_color),
- ("TEXTCOLOR", (0, 0), (0, 0), COLOR_WHITE),
- ("FONTNAME", (0, 0), (0, 0), "FiraCode"),
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTSIZE", (0, 0), (-1, -1), 11),
- ("GRID", (0, 0), (-1, -1), 0.5, colors.black),
- ("LEFTPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ("TOPPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ]
- )
- )
-
- return table
-
-
-def _create_ens_tipo_badge(tipo: str) -> Table:
- """
- Create a visual badge for ENS requirement type (Tipo).
-
- Args:
- tipo (str): The type value (e.g., "requisito", "refuerzo", "recomendacion", "medida").
-
- Returns:
- Table: A ReportLab Table object displaying the type with appropriate styling.
- """
- tipo_lower = tipo.lower()
- icon = TIPO_ICONS.get(tipo_lower, "")
-
- data = [[f"{icon} {tipo.capitalize()}"]]
-
- table = Table(data, colWidths=[1.8 * inch])
-
- table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, 0), COLOR_ENS_TIPO),
- ("TEXTCOLOR", (0, 0), (0, 0), COLOR_WHITE),
- ("FONTNAME", (0, 0), (0, 0), "PlusJakartaSans"),
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTSIZE", (0, 0), (-1, -1), 11),
- ("GRID", (0, 0), (-1, -1), 0.5, colors.black),
- ("LEFTPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ("TOPPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_LARGE),
- ]
- )
- )
-
- return table
-
-
-def _create_ens_dimension_badges(dimensiones: list[str]) -> Table:
- """
- Create visual badges for ENS security dimensions.
-
- Args:
- dimensiones (list[str]): List of dimension names (e.g., ["trazabilidad", "autenticidad"]).
-
- Returns:
- Table: A ReportLab Table object with color-coded badges for each dimension.
- """
- badges = [
- DIMENSION_MAPPING[dimension.lower()]
- for dimension in dimensiones
- if dimension.lower() in DIMENSION_MAPPING
- ]
-
- if not badges:
- data = [["N/A"]]
- table = Table(data, colWidths=[1 * inch])
- table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, 0), COLOR_LIGHT_GRAY),
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("FONTSIZE", (0, 0), (-1, -1), 10),
- ]
- )
- )
- return table
-
- data = [[badge[0] for badge in badges]]
- col_widths = [COL_WIDTH_SMALL] * len(badges)
-
- table = Table(data, colWidths=col_widths)
-
- styles = [
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTNAME", (0, 0), (-1, -1), "FiraCode"),
- ("FONTSIZE", (0, 0), (-1, -1), 10),
- ("TEXTCOLOR", (0, 0), (-1, -1), COLOR_WHITE),
- ("GRID", (0, 0), (-1, -1), 0.5, colors.black),
- ("LEFTPADDING", (0, 0), (-1, -1), PADDING_SMALL),
- ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_SMALL),
- ("TOPPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
- ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
- ]
-
- for idx, (_, badge_color) in enumerate(badges):
- styles.append(("BACKGROUND", (idx, 0), (idx, 0), badge_color))
-
- table.setStyle(TableStyle(styles))
-
- return table
-
-
-def _create_section_score_chart(
- requirements_list: list[dict], attributes_by_requirement_id: dict
-) -> io.BytesIO:
- """
- Create a bar chart showing compliance score by section using ThreatScore formula.
-
- Args:
- requirements_list (list[dict]): List of requirement dictionaries with status and findings data.
- attributes_by_requirement_id (dict): Mapping of requirement IDs to their attributes including risk level and weight.
-
- Returns:
- io.BytesIO: A BytesIO buffer containing the chart image in PNG format.
- """
- # Initialize all expected sections with default values
- sections_data = {
- section: {
- "numerator": 0,
- "denominator": 0,
- "has_findings": False,
- }
- for section in THREATSCORE_SECTIONS
- }
-
- # Collect data from requirements
- for requirement in requirements_list:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(requirement_id, {})
-
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if not metadata:
- continue
-
- m = metadata[0]
- section = _safe_getattr(m, "Section", "Unknown")
-
- # Add section if not in expected list (for flexibility)
- if section not in sections_data:
- sections_data[section] = {
- "numerator": 0,
- "denominator": 0,
- "has_findings": False,
- }
-
- # Get findings data
- passed_findings = requirement["attributes"].get("passed_findings", 0)
- total_findings = requirement["attributes"].get("total_findings", 0)
-
- if total_findings > 0:
- sections_data[section]["has_findings"] = True
- risk_level = _safe_getattr(m, "LevelOfRisk", 0)
- weight = _safe_getattr(m, "Weight", 0)
-
- # Calculate using ThreatScore formula from UI
- rate_i = passed_findings / total_findings
- rfac_i = 1 + 0.25 * risk_level
-
- sections_data[section]["numerator"] += (
- rate_i * total_findings * weight * rfac_i
- )
- sections_data[section]["denominator"] += total_findings * weight * rfac_i
-
- # Calculate percentages
- section_names = []
- compliance_percentages = []
-
- for section, data in sections_data.items():
- if data["has_findings"] and data["denominator"] > 0:
- compliance_percentage = (data["numerator"] / data["denominator"]) * 100
- else:
- compliance_percentage = 100 # No findings = 100% (PASS)
-
- section_names.append(section)
- compliance_percentages.append(compliance_percentage)
-
- # Sort alphabetically by section name
- sorted_data = sorted(zip(section_names, compliance_percentages), key=lambda x: x[0])
- if not sorted_data:
- section_names, compliance_percentages = [], []
- else:
- section_names, compliance_percentages = zip(*sorted_data)
-
- # Generate chart
- fig, ax = plt.subplots(figsize=(12, 8))
-
- # Use helper function for color selection
- colors_list = [_get_chart_color_for_percentage(p) for p in compliance_percentages]
-
- bars = ax.bar(section_names, compliance_percentages, color=colors_list)
-
- ax.set_ylabel("Compliance Score (%)", fontsize=12)
- ax.set_xlabel("Section", fontsize=12)
- ax.set_ylim(0, 100)
-
- for bar, percentage in zip(bars, compliance_percentages):
- height = bar.get_height()
- ax.text(
- bar.get_x() + bar.get_width() / 2.0,
- height + 1,
- f"{percentage:.1f}%",
- ha="center",
- va="bottom",
- fontweight="bold",
- )
-
- plt.xticks(rotation=45, ha="right")
- ax.grid(True, alpha=0.3, axis="y")
- plt.tight_layout()
-
- buffer = io.BytesIO()
- try:
- plt.savefig(buffer, format="png", dpi=300, bbox_inches="tight")
- buffer.seek(0)
- finally:
- plt.close(fig)
-
- return buffer
-
-
-def _add_pdf_footer(
- canvas_obj: canvas.Canvas, doc: SimpleDocTemplate, compliance_name: str
-) -> None:
- """
- Add footer with page number and branding to each page of the PDF.
-
- Args:
- canvas_obj (canvas.Canvas): The ReportLab canvas object for drawing.
- doc (SimpleDocTemplate): The document template containing page information.
- """
- canvas_obj.saveState()
- width, height = doc.pagesize
- page_num_text = (
- f"{'Página' if 'ens' in compliance_name.lower() else 'Page'} {doc.page}"
- )
- canvas_obj.setFont("PlusJakartaSans", 9)
- canvas_obj.setFillColorRGB(0.4, 0.4, 0.4)
- canvas_obj.drawString(30, 20, page_num_text)
- powered_text = "Powered by Prowler"
- text_width = canvas_obj.stringWidth(powered_text, "PlusJakartaSans", 9)
- canvas_obj.drawString(width - text_width - 30, 20, powered_text)
- canvas_obj.restoreState()
-
-
-def _create_marco_category_chart(
- requirements_list: list[dict], attributes_by_requirement_id: dict
-) -> io.BytesIO:
- """
- Create a bar chart showing compliance percentage by Marco (Section) and Categoría.
-
- Args:
- requirements_list (list[dict]): List of requirement dictionaries with status and findings data.
- attributes_by_requirement_id (dict): Mapping of requirement IDs to their attributes.
-
- Returns:
- io.BytesIO: A BytesIO buffer containing the chart image in PNG format.
- """
- # Collect data by Marco and Categoría
- marco_categoria_data = defaultdict(lambda: {"passed": 0, "total": 0})
-
- for requirement in requirements_list:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(requirement_id, {})
- requirement_status = requirement["attributes"].get(
- "status", StatusChoices.MANUAL
- )
-
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if not metadata:
- continue
-
- m = metadata[0]
- marco = _safe_getattr(m, "Marco")
- categoria = _safe_getattr(m, "Categoria")
-
- key = f"{marco} - {categoria}"
- marco_categoria_data[key]["total"] += 1
- if requirement_status == StatusChoices.PASS:
- marco_categoria_data[key]["passed"] += 1
-
- # Calculate percentages
- categories = []
- percentages = []
-
- for category, data in sorted(marco_categoria_data.items()):
- percentage = (data["passed"] / data["total"] * 100) if data["total"] > 0 else 0
- categories.append(category)
- percentages.append(percentage)
-
- if not categories:
- # Return empty chart if no data
- fig, ax = plt.subplots(figsize=(12, 6))
- ax.text(0.5, 0.5, "No data available", ha="center", va="center", fontsize=14)
- ax.set_xlim(0, 1)
- ax.set_ylim(0, 1)
- ax.axis("off")
- buffer = io.BytesIO()
- try:
- plt.savefig(buffer, format="png", dpi=300, bbox_inches="tight")
- buffer.seek(0)
- finally:
- plt.close(fig)
- return buffer
-
- # Create horizontal bar chart
- fig, ax = plt.subplots(figsize=(12, max(8, len(categories) * 0.4)))
-
- # Use helper function for color selection
- colors_list = [_get_chart_color_for_percentage(p) for p in percentages]
-
- y_pos = range(len(categories))
- bars = ax.barh(y_pos, percentages, color=colors_list)
-
- ax.set_yticks(y_pos)
- ax.set_yticklabels(categories, fontsize=16)
- ax.set_xlabel("Porcentaje de Cumplimiento (%)", fontsize=14)
- ax.set_xlim(0, 100)
-
- # Add percentage labels
- for bar, percentage in zip(bars, percentages):
- width = bar.get_width()
- ax.text(
- width + 1,
- bar.get_y() + bar.get_height() / 2.0,
- f"{percentage:.1f}%",
- ha="left",
- va="center",
- fontweight="bold",
- fontsize=10,
- )
-
- ax.grid(True, alpha=0.3, axis="x")
- plt.tight_layout()
-
- buffer = io.BytesIO()
- try:
- # Render canvas and save explicitly from the figure to avoid state bleed
- fig.canvas.draw()
- fig.savefig(buffer, format="png", dpi=300, bbox_inches="tight")
- buffer.seek(0, io.SEEK_END)
- finally:
- plt.close(fig)
-
- return buffer
-
-
-def _create_dimensions_radar_chart(
- requirements_list: list[dict], attributes_by_requirement_id: dict
-) -> io.BytesIO:
- """
- Create a radar/spider chart showing compliance percentage by security dimension.
-
- Args:
- requirements_list (list[dict]): List of requirement dictionaries with status and findings data.
- attributes_by_requirement_id (dict): Mapping of requirement IDs to their attributes.
-
- Returns:
- io.BytesIO: A BytesIO buffer containing the chart image in PNG format.
- """
- dimension_data = {key: {"passed": 0, "total": 0} for key in DIMENSION_KEYS}
-
- # Collect data for each dimension
- for requirement in requirements_list:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(requirement_id, {})
- requirement_status = requirement["attributes"].get(
- "status", StatusChoices.MANUAL
- )
-
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if not metadata:
- continue
-
- m = metadata[0]
- dimensiones_attr = getattr(m, "Dimensiones", None)
- dimensiones = dimensiones_attr or []
- if isinstance(dimensiones, str):
- dimensiones = [dimensiones]
-
- for dimension in dimensiones:
- dimension_lower = dimension.lower()
- if dimension_lower in dimension_data:
- dimension_data[dimension_lower]["total"] += 1
- if requirement_status == StatusChoices.PASS:
- dimension_data[dimension_lower]["passed"] += 1
-
- # Calculate percentages
- percentages = [
- (
- (dimension_data[key]["passed"] / dimension_data[key]["total"] * 100)
- if dimension_data[key]["total"] > 0
- else 100
- ) # No requirements = 100% (no failures)
- for key in DIMENSION_KEYS
- ]
-
- # Create radar chart
- num_dims = len(DIMENSION_NAMES)
- angles = [n / float(num_dims) * 2 * 3.14159 for n in range(num_dims)]
- percentages += percentages[:1]
- angles += angles[:1]
-
- fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection="polar"))
-
- ax.plot(angles, percentages, "o-", linewidth=2, color=CHART_COLOR_BLUE)
- ax.fill(angles, percentages, alpha=0.25, color=CHART_COLOR_BLUE)
- ax.set_xticks(angles[:-1])
- ax.set_xticklabels(DIMENSION_NAMES, fontsize=14)
- ax.set_ylim(0, 100)
- ax.set_yticks([20, 40, 60, 80, 100])
- ax.set_yticklabels(["20%", "40%", "60%", "80%", "100%"], fontsize=12)
- ax.grid(True, alpha=0.3)
-
- plt.tight_layout()
-
- buffer = io.BytesIO()
- try:
- fig.canvas.draw()
- fig.savefig(buffer, format="png", dpi=300, bbox_inches="tight")
- buffer.seek(0, io.SEEK_END)
- finally:
- plt.close(fig)
-
- return buffer
-
def generate_threatscore_report(
tenant_id: str,
@@ -1027,911 +29,39 @@ def generate_threatscore_report(
provider_id: str,
only_failed: bool = True,
min_risk_level: int = 4,
- provider_obj=None,
+ provider_obj: Provider | None = None,
requirement_statistics: dict[str, dict[str, int]] | None = None,
findings_cache: dict[str, list[FindingOutput]] | None = None,
) -> None:
"""
Generate a PDF compliance report based on Prowler ThreatScore framework.
- This function creates a comprehensive PDF report containing:
- - Compliance overview and metadata
- - Section-by-section compliance scores with charts
- - Overall ThreatScore calculation
- - Critical failed requirements
- - Detailed findings for each requirement
-
Args:
- tenant_id (str): The tenant ID for Row-Level Security context.
- scan_id (str): ID of the scan executed by Prowler.
- compliance_id (str): ID of the compliance framework (e.g., "prowler_threatscore_aws").
- output_path (str): Output PDF file path (e.g., "/tmp/threatscore_report.pdf").
- provider_id (str): Provider ID for the scan.
- only_failed (bool): If True, only requirements with status "FAIL" will be included
- in the detailed requirements section. Defaults to True.
- min_risk_level (int): Minimum risk level for critical failed requirements. Defaults to 4.
- provider_obj (Provider, optional): Pre-fetched Provider object to avoid duplicate queries.
- If None, the provider will be fetched from the database.
- requirement_statistics (dict, optional): Pre-aggregated requirement statistics to avoid
- duplicate database aggregations. If None, statistics will be aggregated from the database.
- findings_cache (dict, optional): Cache of already loaded findings to avoid duplicate queries.
- If None, findings will be loaded from the database. When provided, reduces database
- queries and transformation overhead when generating multiple reports.
-
- Raises:
- Exception: If any error occurs during PDF generation, it will be logged and re-raised.
+ tenant_id: The tenant ID for Row-Level Security context.
+ scan_id: ID of the scan executed by Prowler.
+ compliance_id: ID of the compliance framework (e.g., "prowler_threatscore_aws").
+ output_path: Output PDF file path.
+ provider_id: Provider ID for the scan.
+ only_failed: If True, only include failed requirements in detailed section.
+ min_risk_level: Minimum risk level for critical failed requirements.
+ provider_obj: Pre-fetched Provider object to avoid duplicate queries.
+ requirement_statistics: Pre-aggregated requirement statistics.
+ findings_cache: Cache of already loaded findings to avoid duplicate queries.
"""
- logger.info(
- f"Generating the report for the scan {scan_id} with provider {provider_id}"
+ generator = ThreatScoreReportGenerator(FRAMEWORK_REGISTRY["prowler_threatscore"])
+ generator._min_risk_level = min_risk_level
+
+ generator.generate(
+ tenant_id=tenant_id,
+ scan_id=scan_id,
+ compliance_id=compliance_id,
+ output_path=output_path,
+ provider_id=provider_id,
+ provider_obj=provider_obj,
+ requirement_statistics=requirement_statistics,
+ findings_cache=findings_cache,
+ only_failed=only_failed,
)
- try:
- # Get PDF styles
- pdf_styles = _create_pdf_styles()
- title_style = pdf_styles["title"]
- h1 = pdf_styles["h1"]
- h2 = pdf_styles["h2"]
- h3 = pdf_styles["h3"]
- normal = pdf_styles["normal"]
- normal_center = pdf_styles["normal_center"]
-
- # Get compliance and provider information
- with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
- # Use provided provider_obj or fetch from database
- if provider_obj is None:
- provider_obj = Provider.objects.get(id=provider_id)
-
- prowler_provider = initialize_prowler_provider(provider_obj)
- provider_type = provider_obj.provider
-
- frameworks_bulk = Compliance.get_bulk(provider_type)
- compliance_obj = frameworks_bulk[compliance_id]
- compliance_framework = _safe_getattr(compliance_obj, "Framework")
- compliance_version = _safe_getattr(compliance_obj, "Version")
- compliance_name = _safe_getattr(compliance_obj, "Name")
- compliance_description = _safe_getattr(compliance_obj, "Description", "")
-
- # Aggregate requirement statistics from database (memory-efficient)
- # Use provided requirement_statistics or fetch from database
- if requirement_statistics is None:
- logger.info(f"Aggregating requirement statistics for scan {scan_id}")
- requirement_statistics_by_check_id = (
- _aggregate_requirement_statistics_from_database(tenant_id, scan_id)
- )
- else:
- logger.info(
- f"Reusing pre-aggregated requirement statistics for scan {scan_id}"
- )
- requirement_statistics_by_check_id = requirement_statistics
-
- # Calculate requirements data using aggregated statistics
- attributes_by_requirement_id, requirements_list = (
- _calculate_requirements_data_from_statistics(
- compliance_obj, requirement_statistics_by_check_id
- )
- )
-
- # Initialize PDF document
- doc = SimpleDocTemplate(
- output_path,
- pagesize=letter,
- title=f"Prowler ThreatScore Report - {compliance_framework}",
- author="Prowler",
- subject=f"Compliance Report for {compliance_framework}",
- creator="Prowler Engineering Team",
- keywords=f"compliance,{compliance_framework},security,framework,prowler",
- )
-
- elements = []
-
- # Add logo
- img_path = os.path.join(
- os.path.dirname(__file__), "../assets/img/prowler_logo.png"
- )
- logo = Image(
- img_path,
- width=5 * inch,
- height=1 * inch,
- )
- elements.append(logo)
-
- elements.append(Spacer(1, 0.5 * inch))
- elements.append(Paragraph("Prowler ThreatScore Report", title_style))
- elements.append(Spacer(1, 0.5 * inch))
-
- # Add compliance information table
- provider_alias = provider_obj.alias or "N/A"
- info_data = [
- ["Framework:", compliance_framework],
- ["ID:", compliance_id],
- ["Name:", Paragraph(compliance_name, normal_center)],
- ["Version:", compliance_version],
- ["Provider:", provider_type.upper()],
- ["Account ID:", provider_obj.uid],
- ["Alias:", provider_alias],
- ["Scan ID:", scan_id],
- ["Description:", Paragraph(compliance_description, normal_center)],
- ]
- info_table = Table(info_data, colWidths=[COL_WIDTH_XLARGE, 4 * inch])
- info_table.setStyle(_create_info_table_style())
-
- elements.append(info_table)
- elements.append(PageBreak())
-
- # Add compliance score chart
- elements.append(Paragraph("Compliance Score by Sections", h1))
- elements.append(Spacer(1, 0.2 * inch))
-
- chart_buffer = _create_section_score_chart(
- requirements_list, attributes_by_requirement_id
- )
- chart_image = Image(chart_buffer, width=7 * inch, height=5.5 * inch)
- elements.append(chart_image)
-
- # Calculate overall ThreatScore using the same formula as the UI
- numerator = 0
- denominator = 0
- has_findings = False
-
- for requirement in requirements_list:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(
- requirement_id, {}
- )
-
- # Get findings data
- passed_findings = requirement["attributes"].get("passed_findings", 0)
- total_findings = requirement["attributes"].get("total_findings", 0)
-
- # Skip if no findings (avoid division by zero)
- if total_findings == 0:
- continue
-
- has_findings = True
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if metadata and len(metadata) > 0:
- m = metadata[0]
- risk_level = getattr(m, "LevelOfRisk", 0)
- weight = getattr(m, "Weight", 0)
-
- # Calculate using ThreatScore formula from UI
- rate_i = passed_findings / total_findings
- rfac_i = 1 + 0.25 * risk_level
-
- numerator += rate_i * total_findings * weight * rfac_i
- denominator += total_findings * weight * rfac_i
-
- # Calculate ThreatScore (percentualScore)
- # If no findings exist, consider it 100% (PASS)
- if not has_findings:
- overall_compliance = 100
- elif denominator > 0:
- overall_compliance = (numerator / denominator) * 100
- else:
- overall_compliance = 0
-
- elements.append(Spacer(1, 0.3 * inch))
-
- summary_data = [
- ["ThreatScore:", f"{overall_compliance:.2f}%"],
- ]
-
- compliance_color = _get_color_for_compliance(overall_compliance)
-
- summary_table = Table(summary_data, colWidths=[2.5 * inch, 2 * inch])
- summary_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, 0), colors.Color(0.1, 0.3, 0.5)),
- ("TEXTCOLOR", (0, 0), (0, 0), colors.white),
- ("FONTNAME", (0, 0), (0, 0), "FiraCode"),
- ("FONTSIZE", (0, 0), (0, 0), 12),
- ("BACKGROUND", (1, 0), (1, 0), compliance_color),
- ("TEXTCOLOR", (1, 0), (1, 0), colors.white),
- ("FONTNAME", (1, 0), (1, 0), "FiraCode"),
- ("FONTSIZE", (1, 0), (1, 0), 16),
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("GRID", (0, 0), (-1, -1), 1.5, colors.Color(0.5, 0.6, 0.7)),
- ("LEFTPADDING", (0, 0), (-1, -1), 12),
- ("RIGHTPADDING", (0, 0), (-1, -1), 12),
- ("TOPPADDING", (0, 0), (-1, -1), 10),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 10),
- ]
- )
- )
-
- elements.append(summary_table)
- elements.append(PageBreak())
-
- # Add requirements index
- elements.append(Paragraph("Requirements Index", h1))
-
- sections = {}
- for (
- requirement_id,
- requirement_attributes,
- ) in attributes_by_requirement_id.items():
- meta = requirement_attributes["attributes"]["req_attributes"][0]
- section = getattr(meta, "Section", "N/A")
- subsection = getattr(meta, "SubSection", "N/A")
- title = getattr(meta, "Title", "N/A")
-
- if section not in sections:
- sections[section] = {}
- if subsection not in sections[section]:
- sections[section][subsection] = []
-
- sections[section][subsection].append({"id": requirement_id, "title": title})
-
- section_num = 1
- for section_name, subsections in sections.items():
- elements.append(Paragraph(f"{section_num}. {section_name}", h2))
-
- subsection_num = 1
- for subsection_name, requirements in subsections.items():
- elements.append(Paragraph(f"{subsection_name}", h3))
-
- req_num = 1
- for req in requirements:
- elements.append(Paragraph(f"{req['id']} - {req['title']}", normal))
- req_num += 1
-
- subsection_num += 1
-
- section_num += 1
- elements.append(Spacer(1, 0.1 * inch))
-
- elements.append(PageBreak())
-
- # Add critical failed requirements section
- elements.append(Paragraph("Top Requirements by Level of Risk", h1))
- elements.append(Spacer(1, 0.1 * inch))
- elements.append(
- Paragraph(
- f"Critical Failed Requirements (Risk Level ≥ {min_risk_level})", h2
- )
- )
- elements.append(Spacer(1, 0.2 * inch))
-
- critical_failed_requirements = []
- for requirement in requirements_list:
- requirement_status = requirement["attributes"]["status"]
- if requirement_status == StatusChoices.FAIL:
- requirement_id = requirement["id"]
- metadata = (
- attributes_by_requirement_id.get(requirement_id, {})
- .get("attributes", {})
- .get("req_attributes", [{}])[0]
- )
- if metadata:
- risk_level = getattr(metadata, "LevelOfRisk", 0)
- weight = getattr(metadata, "Weight", 0)
-
- if risk_level >= min_risk_level:
- critical_failed_requirements.append(
- {
- "requirement": requirement,
- "attributes": attributes_by_requirement_id[
- requirement_id
- ],
- "risk_level": risk_level,
- "weight": weight,
- "metadata": metadata,
- }
- )
-
- critical_failed_requirements.sort(
- key=lambda x: (x["risk_level"], x["weight"]), reverse=True
- )
-
- if not critical_failed_requirements:
- elements.append(
- Paragraph(
- "✅ No critical failed requirements found. Great job!", normal
- )
- )
- else:
- elements.append(
- Paragraph(
- f"Found {len(critical_failed_requirements)} critical failed requirements that require immediate attention:",
- normal,
- )
- )
- elements.append(Spacer(1, 0.5 * inch))
-
- table_data = [["Risk", "Weight", "Requirement ID", "Title", "Section"]]
-
- for idx, critical_failed_requirement in enumerate(
- critical_failed_requirements
- ):
- requirement_id = critical_failed_requirement["requirement"]["id"]
- risk_level = critical_failed_requirement["risk_level"]
- weight = critical_failed_requirement["weight"]
- title = getattr(critical_failed_requirement["metadata"], "Title", "N/A")
- section = getattr(
- critical_failed_requirement["metadata"], "Section", "N/A"
- )
-
- if len(title) > 50:
- title = title[:47] + "..."
-
- table_data.append(
- [str(risk_level), str(weight), requirement_id, title, section]
- )
-
- critical_table = Table(
- table_data,
- colWidths=[0.7 * inch, 0.9 * inch, 1.3 * inch, 3.1 * inch, 1.5 * inch],
- )
-
- critical_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (-1, 0), colors.Color(0.8, 0.2, 0.2)),
- ("TEXTCOLOR", (0, 0), (-1, 0), colors.white),
- ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
- ("FONTSIZE", (0, 0), (-1, 0), 10),
- ("BACKGROUND", (0, 1), (0, -1), colors.Color(0.8, 0.2, 0.2)),
- ("TEXTCOLOR", (0, 1), (0, -1), colors.white),
- ("FONTNAME", (0, 1), (0, -1), "FiraCode"),
- ("ALIGN", (0, 1), (0, -1), "CENTER"),
- ("FONTSIZE", (0, 1), (0, -1), 12),
- ("ALIGN", (1, 1), (1, -1), "CENTER"),
- ("FONTNAME", (1, 1), (1, -1), "FiraCode"),
- ("FONTNAME", (2, 1), (2, -1), "FiraCode"),
- ("FONTSIZE", (2, 1), (2, -1), 9),
- ("FONTNAME", (3, 1), (-1, -1), "PlusJakartaSans"),
- ("FONTSIZE", (3, 1), (-1, -1), 8),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("GRID", (0, 0), (-1, -1), 1, colors.Color(0.7, 0.7, 0.7)),
- ("LEFTPADDING", (0, 0), (-1, -1), 6),
- ("RIGHTPADDING", (0, 0), (-1, -1), 6),
- ("TOPPADDING", (0, 0), (-1, -1), 8),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 8),
- (
- "BACKGROUND",
- (1, 1),
- (-1, -1),
- colors.Color(0.98, 0.98, 0.98),
- ),
- ]
- )
- )
-
- for idx, critical_failed_requirement in enumerate(
- critical_failed_requirements
- ):
- row_idx = idx + 1
- weight = critical_failed_requirement["weight"]
-
- if weight >= 150:
- weight_color = colors.Color(0.8, 0.2, 0.2)
- elif weight >= 100:
- weight_color = colors.Color(0.9, 0.6, 0.2)
- else:
- weight_color = colors.Color(0.9, 0.9, 0.2)
-
- critical_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (1, row_idx), (1, row_idx), weight_color),
- ("TEXTCOLOR", (1, row_idx), (1, row_idx), colors.white),
- ]
- )
- )
-
- elements.append(critical_table)
- elements.append(Spacer(1, 0.2 * inch))
-
- # Get styles for warning
- styles = getSampleStyleSheet()
- warning_text = """
- IMMEDIATE ACTION REQUIRED:
- These requirements have the highest risk levels and have failed compliance checks.
- Please prioritize addressing these issues to improve your security posture.
- """
-
- warning_style = ParagraphStyle(
- "Warning",
- parent=styles["Normal"],
- fontSize=11,
- textColor=colors.Color(0.8, 0.2, 0.2),
- spaceBefore=10,
- spaceAfter=10,
- leftIndent=20,
- rightIndent=20,
- fontName="PlusJakartaSans",
- backColor=colors.Color(1.0, 0.95, 0.95),
- borderWidth=2,
- borderColor=colors.Color(0.8, 0.2, 0.2),
- borderPadding=10,
- )
-
- elements.append(Paragraph(warning_text, warning_style))
-
- elements.append(PageBreak())
-
- # Add detailed requirements section
- def get_weight_for_requirement(requirement_dict):
- requirement_id = requirement_dict["id"]
- requirement_attributes = attributes_by_requirement_id.get(
- requirement_id, {}
- )
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if metadata:
- return getattr(metadata[0], "Weight", 0)
- return 0
-
- sorted_requirements = sorted(
- requirements_list, key=get_weight_for_requirement, reverse=True
- )
-
- if only_failed:
- sorted_requirements = [
- requirement
- for requirement in sorted_requirements
- if requirement["attributes"]["status"] == StatusChoices.FAIL
- ]
-
- # Collect all check IDs for requirements that will be displayed
- # This allows us to load only the findings we actually need (memory optimization)
- check_ids_to_load = []
- for requirement in sorted_requirements:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(
- requirement_id, {}
- )
- check_ids = requirement_attributes.get("attributes", {}).get("checks", [])
- check_ids_to_load.extend(check_ids)
-
- # Load findings on-demand only for the checks that will be displayed
- logger.info(
- f"Loading findings on-demand for {len(sorted_requirements)} requirements"
- )
- findings_by_check_id = _load_findings_for_requirement_checks(
- tenant_id, scan_id, check_ids_to_load, prowler_provider, findings_cache
- )
-
- for requirement in sorted_requirements:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(
- requirement_id, {}
- )
- requirement_description = requirement["attributes"]["description"]
- requirement_status = requirement["attributes"]["status"]
-
- elements.append(
- Paragraph(
- f"{requirement_id}: {requirement_attributes.get('description', requirement_description)}",
- h1,
- )
- )
-
- status_component = _create_status_component(requirement_status)
- elements.append(status_component)
- elements.append(Spacer(1, 0.1 * inch))
-
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if metadata and len(metadata) > 0:
- m = metadata[0]
- elements.append(Paragraph("Title: ", h3))
- elements.append(Paragraph(f"{getattr(m, 'Title', 'N/A')}", normal))
- elements.append(Paragraph("Section: ", h3))
- elements.append(Paragraph(f"{getattr(m, 'Section', 'N/A')}", normal))
- elements.append(Paragraph("SubSection: ", h3))
- elements.append(Paragraph(f"{getattr(m, 'SubSection', 'N/A')}", normal))
- elements.append(Paragraph("Description: ", h3))
- elements.append(
- Paragraph(f"{getattr(m, 'AttributeDescription', 'N/A')}", normal)
- )
- elements.append(Paragraph("Additional Information: ", h3))
- elements.append(
- Paragraph(f"{getattr(m, 'AdditionalInformation', 'N/A')}", normal)
- )
- elements.append(Spacer(1, 0.1 * inch))
-
- risk_level = getattr(m, "LevelOfRisk", 0)
- weight = getattr(m, "Weight", 0)
-
- if requirement_status == StatusChoices.PASS:
- score = risk_level * weight
- else:
- score = 0
-
- risk_component = _create_risk_component(risk_level, weight, score)
- elements.append(risk_component)
- elements.append(Spacer(1, 0.1 * inch))
-
- # Get findings for this requirement's checks (loaded on-demand earlier)
- requirement_check_ids = requirement_attributes.get("attributes", {}).get(
- "checks", []
- )
- for check_id in requirement_check_ids:
- elements.append(Paragraph(f"Check: {check_id}", h2))
- elements.append(Spacer(1, 0.1 * inch))
-
- # Get findings for this check (already loaded on-demand)
- check_findings = findings_by_check_id.get(check_id, [])
-
- if not check_findings:
- elements.append(
- Paragraph("- No information for this finding currently", normal)
- )
- else:
- findings_table_data = [
- [
- "Finding",
- "Resource name",
- "Severity",
- "Status",
- "Region",
- ]
- ]
- for finding_output in check_findings:
- check_metadata = getattr(finding_output, "metadata", {})
- finding_title = getattr(
- check_metadata,
- "CheckTitle",
- getattr(finding_output, "check_id", ""),
- )
- resource_name = getattr(finding_output, "resource_name", "")
- if not resource_name:
- resource_name = getattr(finding_output, "resource_uid", "")
- severity = getattr(check_metadata, "Severity", "").capitalize()
- finding_status = getattr(finding_output, "status", "").upper()
- region = getattr(finding_output, "region", "global")
-
- findings_table_data.append(
- [
- Paragraph(finding_title, normal_center),
- Paragraph(resource_name, normal_center),
- Paragraph(severity, normal_center),
- Paragraph(finding_status, normal_center),
- Paragraph(region, normal_center),
- ]
- )
- findings_table = Table(
- findings_table_data,
- colWidths=[
- 2.5 * inch,
- 3 * inch,
- 0.9 * inch,
- 0.9 * inch,
- 0.9 * inch,
- ],
- )
- findings_table.setStyle(
- TableStyle(
- [
- (
- "BACKGROUND",
- (0, 0),
- (-1, 0),
- colors.Color(0.2, 0.4, 0.6),
- ),
- ("TEXTCOLOR", (0, 0), (-1, 0), colors.white),
- ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
- ("ALIGN", (0, 0), (0, 0), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTSIZE", (0, 0), (-1, -1), 9),
- (
- "GRID",
- (0, 0),
- (-1, -1),
- 0.1,
- colors.Color(0.7, 0.8, 0.9),
- ),
- ("LEFTPADDING", (0, 0), (0, 0), 0),
- ("RIGHTPADDING", (0, 0), (0, 0), 0),
- ("TOPPADDING", (0, 0), (-1, -1), 4),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 4),
- ]
- )
- )
- elements.append(findings_table)
- elements.append(Spacer(1, 0.1 * inch))
-
- elements.append(PageBreak())
-
- # Build the PDF
- doc.build(
- elements,
- onFirstPage=partial(_add_pdf_footer, compliance_name=compliance_name),
- onLaterPages=partial(_add_pdf_footer, compliance_name=compliance_name),
- )
- except Exception as e:
- tb_lineno = e.__traceback__.tb_lineno if e.__traceback__ else "unknown"
- logger.info(f"Error building the document, line {tb_lineno} -- {e}")
- raise e
-
-
-def _create_nis2_section_chart(
- requirements_list: list[dict], attributes_by_requirement_id: dict
-) -> io.BytesIO:
- """
- Create a horizontal bar chart showing compliance percentage by NIS2 section.
-
- Args:
- requirements_list (list[dict]): List of requirement dictionaries with status and findings data.
- attributes_by_requirement_id (dict): Mapping of requirement IDs to their attributes.
-
- Returns:
- io.BytesIO: A BytesIO buffer containing the chart image in PNG format.
- """
- # Initialize sections data
- sections_data = defaultdict(lambda: {"passed": 0, "total": 0})
-
- # Collect data from requirements
- for requirement in requirements_list:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(requirement_id, {})
-
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if not metadata:
- continue
-
- m = metadata[0]
- section_full = _safe_getattr(m, "Section", "")
-
- # Extract section number (e.g., "1" from "1 POLICY ON...")
- section_number = section_full.split()[0] if section_full else "Unknown"
-
- # Get findings data
- passed_findings = requirement["attributes"].get("passed_findings", 0)
- total_findings = requirement["attributes"].get("total_findings", 0)
-
- if total_findings > 0:
- sections_data[section_number]["passed"] += passed_findings
- sections_data[section_number]["total"] += total_findings
-
- # Calculate percentages and prepare data for chart
- section_names = []
- compliance_percentages = []
-
- # Get section titles for display
- section_titles = {
- "1": "1. Policy on Security",
- "2": "2. Risk Management",
- "3": "3. Incident Handling",
- "4": "4. Business Continuity",
- "5": "5. Supply Chain",
- "6": "6. Acquisition & Dev",
- "7": "7. Effectiveness",
- "9": "9. Cryptography",
- "11": "11. Access Control",
- "12": "12. Asset Management",
- }
-
- # Sort by section number
- for section_num in sorted(
- sections_data.keys(), key=lambda x: int(x) if x.isdigit() else 999
- ):
- data = sections_data[section_num]
- if data["total"] > 0:
- compliance_percentage = (data["passed"] / data["total"]) * 100
- else:
- compliance_percentage = 100 # No findings = 100% (PASS)
-
- section_title = section_titles.get(section_num, f"{section_num}. Unknown")
- section_names.append(section_title)
- compliance_percentages.append(compliance_percentage)
-
- # Generate horizontal bar chart
- fig, ax = plt.subplots(figsize=(10, 8))
-
- # Use color helper for compliance percentage
- colors_list = [_get_chart_color_for_percentage(p) for p in compliance_percentages]
-
- bars = ax.barh(section_names, compliance_percentages, color=colors_list)
-
- ax.set_xlabel("Compliance (%)", fontsize=12)
- ax.set_xlim(0, 100)
-
- # Add percentage labels
- for bar, percentage in zip(bars, compliance_percentages):
- width = bar.get_width()
- ax.text(
- width + 1,
- bar.get_y() + bar.get_height() / 2.0,
- f"{percentage:.1f}%",
- ha="left",
- va="center",
- fontweight="bold",
- )
-
- ax.grid(True, alpha=0.3, axis="x")
- plt.tight_layout()
-
- buffer = io.BytesIO()
- try:
- fig.canvas.draw()
- fig.savefig(buffer, format="png", dpi=300, bbox_inches="tight")
- buffer.seek(0, io.SEEK_END)
- finally:
- plt.close(fig)
-
- return buffer
-
-
-def _create_nis2_subsection_table(
- requirements_list: list[dict], attributes_by_requirement_id: dict
-) -> Table:
- """
- Create a table showing compliance by subsection.
-
- Args:
- requirements_list (list[dict]): List of requirement dictionaries.
- attributes_by_requirement_id (dict): Mapping of requirement IDs to their attributes.
-
- Returns:
- Table: A ReportLab table showing subsection breakdown.
- """
- # Collect data by subsection
- subsections_data = defaultdict(lambda: {"passed": 0, "failed": 0, "manual": 0})
-
- for requirement in requirements_list:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(requirement_id, {})
-
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if not metadata:
- continue
-
- m = metadata[0]
- subsection = _safe_getattr(m, "SubSection", "Unknown")
- status = requirement["attributes"].get("status", StatusChoices.MANUAL)
-
- if status == StatusChoices.PASS:
- subsections_data[subsection]["passed"] += 1
- elif status == StatusChoices.FAIL:
- subsections_data[subsection]["failed"] += 1
- else:
- subsections_data[subsection]["manual"] += 1
-
- # Create table data
- table_data = [["SubSection", "Total", "Pass", "Fail", "Manual", "Compliance %"]]
-
- for subsection in sorted(subsections_data.keys()):
- data = subsections_data[subsection]
- total = data["passed"] + data["failed"] + data["manual"]
- compliance = (
- (data["passed"] / (data["passed"] + data["failed"]) * 100)
- if (data["passed"] + data["failed"]) > 0
- else 100
- )
-
- if len(subsection) > 100:
- subsection = subsection[:80] + "..."
-
- table_data.append(
- [
- subsection, # No truncate - let it wrap naturally
- str(total),
- str(data["passed"]),
- str(data["failed"]),
- str(data["manual"]),
- f"{compliance:.1f}%",
- ]
- )
-
- # Create table with wider SubSection column
- table = Table(
- table_data,
- colWidths=[
- 4.5 * inch,
- 0.6 * inch,
- 0.6 * inch,
- 0.6 * inch,
- 0.7 * inch,
- 1 * inch,
- ],
- )
- table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (-1, 0), COLOR_NIS2_PRIMARY),
- ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("ALIGN", (0, 1), (0, -1), "LEFT"),
- ("FONTNAME", (0, 0), (-1, 0), "PlusJakartaSans"),
- ("FONTSIZE", (0, 0), (-1, 0), 10),
- ("FONTSIZE", (0, 1), (-1, -1), 9),
- ("BOTTOMPADDING", (0, 0), (-1, 0), 8),
- ("TOPPADDING", (0, 0), (-1, 0), 8),
- ("GRID", (0, 0), (-1, -1), 0.5, COLOR_BORDER_GRAY),
- ("ROWBACKGROUNDS", (0, 1), (-1, -1), [COLOR_WHITE, COLOR_NIS2_BG_BLUE]),
- ]
- )
- )
-
- return table
-
-
-def _create_nis2_requirements_index(
- requirements_list: list[dict], attributes_by_requirement_id: dict, h2, h3, normal
-) -> list:
- """
- Create a hierarchical requirements index organized by Section and SubSection.
-
- Args:
- requirements_list (list[dict]): List of requirement dictionaries.
- attributes_by_requirement_id (dict): Mapping of requirement IDs to their attributes.
- h2, h3, normal: Paragraph styles.
-
- Returns:
- list: List of ReportLab elements for the index.
- """
- elements = []
-
- # Organize requirements by section and subsection
- sections_hierarchy = defaultdict(lambda: defaultdict(list))
-
- for requirement in requirements_list:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(requirement_id, {})
-
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if not metadata:
- continue
-
- m = metadata[0]
- section = _safe_getattr(m, "Section", "Unknown")
- subsection = _safe_getattr(m, "SubSection", "Unknown")
- status = requirement["attributes"].get("status", StatusChoices.MANUAL)
-
- # Status indicator
- if status == StatusChoices.PASS:
- status_indicator = "✓"
- elif status == StatusChoices.FAIL:
- status_indicator = "✗"
- else:
- status_indicator = "⊙"
-
- description = requirement["attributes"].get(
- "description", "No description available"
- )
- sections_hierarchy[section][subsection].append(
- {
- "id": requirement_id,
- "description": (
- description[:100] + "..." if len(description) > 100 else description
- ),
- "status_indicator": status_indicator,
- }
- )
-
- # Build the index
- for section in sorted(sections_hierarchy.keys()):
- # Section header
- elements.append(Paragraph(section, h2))
-
- subsections = sections_hierarchy[section]
- for subsection in sorted(subsections.keys()):
- # Subsection header
- elements.append(Paragraph(f" {subsection}", h3))
-
- # Requirements
- for req in subsections[subsection]:
- req_text = (
- f" {req['status_indicator']} {req['id']} - {req['description']}"
- )
- elements.append(Paragraph(req_text, normal))
-
- elements.append(Spacer(1, 0.1 * inch))
-
- return elements
def generate_ens_report(
@@ -1941,952 +71,37 @@ def generate_ens_report(
output_path: str,
provider_id: str,
include_manual: bool = True,
- provider_obj=None,
+ provider_obj: Provider | None = None,
requirement_statistics: dict[str, dict[str, int]] | None = None,
findings_cache: dict[str, list[FindingOutput]] | None = None,
) -> None:
"""
Generate a PDF compliance report for ENS RD2022 framework.
- This function creates a comprehensive PDF report containing:
- - Compliance overview and metadata
- - Executive summary with overall compliance score
- - Marco/Categoría analysis with charts
- - Security dimensions radar chart
- - Requirement type distribution
- - Execution mode distribution
- - Critical failed requirements (nivel alto)
- - Requirements index
- - Detailed findings for failed and manual requirements
-
Args:
- tenant_id (str): The tenant ID for Row-Level Security context.
- scan_id (str): ID of the scan executed by Prowler.
- compliance_id (str): ID of the compliance framework (e.g., "ens_rd2022_aws").
- output_path (str): Output PDF file path (e.g., "/tmp/ens_report.pdf").
- provider_id (str): Provider ID for the scan.
- include_manual (bool): If True, include requirements with manual execution mode
- in the detailed requirements section. Defaults to True.
- provider_obj (Provider, optional): Pre-fetched Provider object to avoid duplicate queries.
- If None, the provider will be fetched from the database.
- requirement_statistics (dict, optional): Pre-aggregated requirement statistics to avoid
- duplicate database aggregations. If None, statistics will be aggregated from the database.
- findings_cache (dict, optional): Cache of already loaded findings to avoid duplicate queries.
- If None, findings will be loaded from the database. When provided, reduces database
- queries and transformation overhead when generating multiple reports.
-
- Raises:
- Exception: If any error occurs during PDF generation, it will be logged and re-raised.
+ tenant_id: The tenant ID for Row-Level Security context.
+ scan_id: ID of the scan executed by Prowler.
+ compliance_id: ID of the compliance framework (e.g., "ens_rd2022_aws").
+ output_path: Output PDF file path.
+ provider_id: Provider ID for the scan.
+ include_manual: If True, include manual requirements in detailed section.
+ provider_obj: Pre-fetched Provider object to avoid duplicate queries.
+ requirement_statistics: Pre-aggregated requirement statistics.
+ findings_cache: Cache of already loaded findings to avoid duplicate queries.
"""
- logger.info(f"Generating ENS report for scan {scan_id} with provider {provider_id}")
- try:
- # Get PDF styles
- pdf_styles = _create_pdf_styles()
- title_style = pdf_styles["title"]
- h1 = pdf_styles["h1"]
- h2 = pdf_styles["h2"]
- h3 = pdf_styles["h3"]
- normal = pdf_styles["normal"]
- normal_center = pdf_styles["normal_center"]
-
- # Get compliance and provider information
- with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
- # Use provided provider_obj or fetch from database
- if provider_obj is None:
- provider_obj = Provider.objects.get(id=provider_id)
-
- prowler_provider = initialize_prowler_provider(provider_obj)
- provider_type = provider_obj.provider
-
- frameworks_bulk = Compliance.get_bulk(provider_type)
- compliance_obj = frameworks_bulk[compliance_id]
- compliance_framework = _safe_getattr(compliance_obj, "Framework")
- compliance_version = _safe_getattr(compliance_obj, "Version")
- compliance_name = _safe_getattr(compliance_obj, "Name")
- compliance_description = _safe_getattr(compliance_obj, "Description", "")
-
- # Aggregate requirement statistics from database (memory-efficient)
- # Use provided requirement_statistics or fetch from database
- if requirement_statistics is None:
- logger.info(f"Aggregating requirement statistics for scan {scan_id}")
- requirement_statistics_by_check_id = (
- _aggregate_requirement_statistics_from_database(tenant_id, scan_id)
- )
- else:
- logger.info(
- f"Reusing pre-aggregated requirement statistics for scan {scan_id}"
- )
- requirement_statistics_by_check_id = requirement_statistics
-
- # Calculate requirements data using aggregated statistics
- attributes_by_requirement_id, requirements_list = (
- _calculate_requirements_data_from_statistics(
- compliance_obj, requirement_statistics_by_check_id
- )
- )
-
- # Count manual requirements before filtering
- manual_requirements_count = sum(
- 1
- for req in requirements_list
- if req["attributes"]["status"] == StatusChoices.MANUAL
- )
- total_requirements_count = len(requirements_list)
-
- # Filter out manual requirements for the report
- requirements_list = [
- req
- for req in requirements_list
- if req["attributes"]["status"] != StatusChoices.MANUAL
- ]
-
- logger.info(
- f"Filtered {manual_requirements_count} manual requirements out of {total_requirements_count} total requirements"
- )
-
- # Initialize PDF document
- doc = SimpleDocTemplate(
- output_path,
- pagesize=letter,
- title="Informe de Cumplimiento ENS - Prowler",
- author="Prowler",
- subject=f"Informe de Cumplimiento para {compliance_framework}",
- creator="Prowler Engineering Team",
- keywords=f"compliance,{compliance_framework},security,ens,prowler",
- )
-
- elements = []
-
- # SECTION 1: PORTADA (Cover Page)
- # Create logos side by side
- prowler_logo_path = os.path.join(
- os.path.dirname(__file__), "../assets/img/prowler_logo.png"
- )
- ens_logo_path = os.path.join(
- os.path.dirname(__file__), "../assets/img/ens_logo.png"
- )
-
- prowler_logo = Image(
- prowler_logo_path,
- width=3.5 * inch,
- height=0.7 * inch,
- )
- ens_logo = Image(
- ens_logo_path,
- width=1.5 * inch,
- height=2 * inch,
- )
-
- # Create table with both logos
- logos_table = Table(
- [[prowler_logo, ens_logo]], colWidths=[4 * inch, 2.5 * inch]
- )
- logos_table.setStyle(
- TableStyle(
- [
- ("ALIGN", (0, 0), (0, 0), "LEFT"),
- ("ALIGN", (1, 0), (1, 0), "RIGHT"),
- ("VALIGN", (0, 0), (0, 0), "MIDDLE"), # Prowler logo middle
- ("VALIGN", (1, 0), (1, 0), "TOP"), # ENS logo top
- ]
- )
- )
- elements.append(logos_table)
- elements.append(Spacer(1, 0.3 * inch))
- elements.append(
- Paragraph("Informe de Cumplimiento ENS RD 311/2022", title_style)
- )
- elements.append(Spacer(1, 0.5 * inch))
-
- # Add compliance information table
- provider_alias = provider_obj.alias or "N/A"
- info_data = [
- ["Framework:", compliance_framework],
- ["ID:", compliance_id],
- ["Nombre:", Paragraph(compliance_name, normal_center)],
- ["Versión:", compliance_version],
- ["Proveedor:", provider_type.upper()],
- ["Account ID:", provider_obj.uid],
- ["Alias:", provider_alias],
- ["Scan ID:", scan_id],
- ["Descripción:", Paragraph(compliance_description, normal_center)],
- ]
- info_table = Table(info_data, colWidths=[2 * inch, 4 * inch])
- info_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, -1), colors.Color(0.2, 0.4, 0.6)),
- ("TEXTCOLOR", (0, 0), (0, -1), colors.white),
- ("FONTNAME", (0, 0), (0, -1), "FiraCode"),
- ("BACKGROUND", (1, 0), (1, -1), colors.Color(0.95, 0.97, 1.0)),
- ("TEXTCOLOR", (1, 0), (1, -1), colors.Color(0.2, 0.2, 0.2)),
- ("FONTNAME", (1, 0), (1, -1), "PlusJakartaSans"),
- ("ALIGN", (0, 0), (-1, -1), "LEFT"),
- ("VALIGN", (0, 0), (-1, -1), "TOP"),
- ("FONTSIZE", (0, 0), (-1, -1), 11),
- ("GRID", (0, 0), (-1, -1), 1, colors.Color(0.7, 0.8, 0.9)),
- ("LEFTPADDING", (0, 0), (-1, -1), 10),
- ("RIGHTPADDING", (0, 0), (-1, -1), 10),
- ("TOPPADDING", (0, 0), (-1, -1), 8),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 8),
- ]
- )
- )
- elements.append(info_table)
- elements.append(Spacer(1, 0.5 * inch))
-
- # Add warning about excluded manual requirements
- warning_text = (
- f"AVISO: Este informe no incluye los requisitos de ejecución manual. "
- f"El compliance {compliance_id} contiene un total de "
- f"{manual_requirements_count} requisitos manuales que no han sido evaluados "
- f"automáticamente y por tanto no están reflejados en las estadísticas de este reporte. "
- f"El análisis se basa únicamente en los {len(requirements_list)} requisitos automatizados."
- )
- warning_paragraph = Paragraph(warning_text, normal)
- warning_table = Table([[warning_paragraph]], colWidths=[6 * inch])
- warning_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, 0), colors.Color(1.0, 0.95, 0.7)),
- ("TEXTCOLOR", (0, 0), (0, 0), colors.Color(0.4, 0.3, 0.0)),
- ("ALIGN", (0, 0), (0, 0), "LEFT"),
- ("VALIGN", (0, 0), (0, 0), "MIDDLE"),
- ("BOX", (0, 0), (-1, -1), 2, colors.Color(0.9, 0.7, 0.0)),
- ("LEFTPADDING", (0, 0), (-1, -1), 15),
- ("RIGHTPADDING", (0, 0), (-1, -1), 15),
- ("TOPPADDING", (0, 0), (-1, -1), 12),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 12),
- ]
- )
- )
- elements.append(warning_table)
- elements.append(Spacer(1, 0.5 * inch))
-
- # Add legend explaining ENS values
- elements.append(Paragraph("Leyenda de Valores ENS", h2))
- elements.append(Spacer(1, 0.2 * inch))
-
- legend_text = """
- Nivel (Criticidad del requisito):
- • Alto: Requisitos críticos que deben cumplirse prioritariamente
- • Medio: Requisitos importantes con impacto moderado
- • Bajo: Requisitos complementarios de menor criticidad
- • Opcional: Recomendaciones adicionales no obligatorias
-
- Tipo (Clasificación del requisito):
- • Requisito: Obligación establecida por el ENS
- • Refuerzo: Medida adicional que refuerza un requisito
- • Recomendación: Buena práctica sugerida
- • Medida: Acción concreta de implementación
-
- Modo de Ejecución:
- • Automático: El requisito puede verificarse automáticamente mediante escaneo
- • Manual: Requiere verificación manual por parte de un auditor
-
- Dimensiones de Seguridad:
- • C (Confidencialidad): Protección contra accesos no autorizados a la información
- • I (Integridad): Garantía de exactitud y completitud de la información
- • T (Trazabilidad): Capacidad de rastrear acciones y eventos
- • A (Autenticidad): Verificación de identidad de usuarios y sistemas
- • D (Disponibilidad): Acceso a la información cuando se necesita
-
- Estados de Cumplimiento:
- • CUMPLE (PASS): El requisito se cumple satisfactoriamente
- • NO CUMPLE (FAIL): El requisito no se cumple y requiere corrección
- • MANUAL: Requiere revisión manual para determinar cumplimiento
- """
- legend_paragraph = Paragraph(legend_text, normal)
- legend_table = Table([[legend_paragraph]], colWidths=[6.5 * inch])
- legend_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, 0), colors.Color(0.95, 0.97, 1.0)),
- ("TEXTCOLOR", (0, 0), (0, 0), colors.Color(0.2, 0.2, 0.2)),
- ("ALIGN", (0, 0), (0, 0), "LEFT"),
- ("VALIGN", (0, 0), (0, 0), "TOP"),
- ("BOX", (0, 0), (-1, -1), 1.5, colors.Color(0.5, 0.6, 0.8)),
- ("LEFTPADDING", (0, 0), (-1, -1), 15),
- ("RIGHTPADDING", (0, 0), (-1, -1), 15),
- ("TOPPADDING", (0, 0), (-1, -1), 12),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 12),
- ]
- )
- )
- elements.append(legend_table)
- elements.append(PageBreak())
-
- # SECTION 2: RESUMEN EJECUTIVO (Executive Summary)
- elements.append(Paragraph("Resumen Ejecutivo", h1))
- elements.append(Spacer(1, 0.2 * inch))
-
- # Calculate overall compliance (simple PASS/TOTAL)
- total_requirements = len(requirements_list)
- passed_requirements = sum(
- 1
- for req in requirements_list
- if req["attributes"]["status"] == StatusChoices.PASS
- )
- failed_requirements = sum(
- 1
- for req in requirements_list
- if req["attributes"]["status"] == StatusChoices.FAIL
- )
-
- overall_compliance = (
- (passed_requirements / total_requirements * 100)
- if total_requirements > 0
- else 0
- )
-
- if overall_compliance >= 80:
- compliance_color = colors.Color(0.2, 0.8, 0.2)
- elif overall_compliance >= 60:
- compliance_color = colors.Color(0.8, 0.8, 0.2)
- else:
- compliance_color = colors.Color(0.8, 0.2, 0.2)
-
- summary_data = [
- ["Nivel de Cumplimiento Global:", f"{overall_compliance:.2f}%"],
- ]
-
- summary_table = Table(summary_data, colWidths=[3 * inch, 2 * inch])
- summary_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, 0), colors.Color(0.1, 0.3, 0.5)),
- ("TEXTCOLOR", (0, 0), (0, 0), colors.white),
- ("FONTNAME", (0, 0), (0, 0), "FiraCode"),
- ("FONTSIZE", (0, 0), (0, 0), 12),
- ("BACKGROUND", (1, 0), (1, 0), compliance_color),
- ("TEXTCOLOR", (1, 0), (1, 0), colors.white),
- ("FONTNAME", (1, 0), (1, 0), "FiraCode"),
- ("FONTSIZE", (1, 0), (1, 0), 16),
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("GRID", (0, 0), (-1, -1), 1.5, colors.Color(0.5, 0.6, 0.7)),
- ("LEFTPADDING", (0, 0), (-1, -1), 12),
- ("RIGHTPADDING", (0, 0), (-1, -1), 12),
- ("TOPPADDING", (0, 0), (-1, -1), 10),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 10),
- ]
- )
- )
- elements.append(summary_table)
- elements.append(Spacer(1, 0.3 * inch))
-
- # Summary counts table
- counts_data = [
- ["Estado", "Cantidad", "Porcentaje"],
- [
- "CUMPLE",
- str(passed_requirements),
- (
- f"{(passed_requirements / total_requirements * 100):.1f}%"
- if total_requirements > 0
- else "0.0%"
- ),
- ],
- [
- "NO CUMPLE",
- str(failed_requirements),
- (
- f"{(failed_requirements / total_requirements * 100):.1f}%"
- if total_requirements > 0
- else "0.0%"
- ),
- ],
- ["TOTAL", str(total_requirements), "100%"],
- ]
-
- counts_table = Table(counts_data, colWidths=[2 * inch, 1.5 * inch, 1.5 * inch])
- counts_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (-1, 0), colors.Color(0.2, 0.4, 0.6)),
- ("TEXTCOLOR", (0, 0), (-1, 0), colors.white),
- ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
- ("BACKGROUND", (0, 1), (0, 1), colors.Color(0.2, 0.8, 0.2)),
- ("TEXTCOLOR", (0, 1), (0, 1), colors.white),
- ("BACKGROUND", (0, 2), (0, 2), colors.Color(0.8, 0.2, 0.2)),
- ("TEXTCOLOR", (0, 2), (0, 2), colors.white),
- ("BACKGROUND", (0, 3), (0, 3), colors.Color(0.4, 0.4, 0.4)),
- ("TEXTCOLOR", (0, 3), (0, 3), colors.white),
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTSIZE", (0, 0), (-1, -1), 10),
- ("GRID", (0, 0), (-1, -1), 1, colors.Color(0.7, 0.7, 0.7)),
- ("LEFTPADDING", (0, 0), (-1, -1), 8),
- ("RIGHTPADDING", (0, 0), (-1, -1), 8),
- ("TOPPADDING", (0, 0), (-1, -1), 6),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 6),
- ]
- )
- )
- elements.append(counts_table)
- elements.append(Spacer(1, 0.3 * inch))
-
- # Summary by Nivel
- nivel_data = defaultdict(lambda: {"passed": 0, "total": 0})
- for requirement in requirements_list:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(
- requirement_id, {}
- )
- requirement_status = requirement["attributes"]["status"]
-
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if not metadata:
- continue
-
- m = metadata[0]
- nivel = _safe_getattr(m, "Nivel")
- nivel_data[nivel]["total"] += 1
- if requirement_status == StatusChoices.PASS:
- nivel_data[nivel]["passed"] += 1
-
- elements.append(Paragraph("Cumplimiento por Nivel", h2))
- nivel_table_data = [["Nivel", "Cumplidos", "Total", "Porcentaje"]]
- for nivel in ENS_NIVEL_ORDER:
- if nivel in nivel_data:
- data = nivel_data[nivel]
- percentage = (
- (data["passed"] / data["total"] * 100) if data["total"] > 0 else 0
- )
- nivel_table_data.append(
- [
- nivel.capitalize(),
- str(data["passed"]),
- str(data["total"]),
- f"{percentage:.1f}%",
- ]
- )
-
- nivel_table = Table(
- nivel_table_data, colWidths=[1.5 * inch, 1.5 * inch, 1.5 * inch, 1.5 * inch]
- )
- nivel_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (-1, 0), colors.Color(0.2, 0.4, 0.6)),
- ("TEXTCOLOR", (0, 0), (-1, 0), colors.white),
- ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTSIZE", (0, 0), (-1, -1), 10),
- ("GRID", (0, 0), (-1, -1), 1, colors.Color(0.7, 0.7, 0.7)),
- ("LEFTPADDING", (0, 0), (-1, -1), 8),
- ("RIGHTPADDING", (0, 0), (-1, -1), 8),
- ("TOPPADDING", (0, 0), (-1, -1), 6),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 6),
- ]
- )
- )
- elements.append(nivel_table)
- elements.append(PageBreak())
-
- # SECTION 3: ANÁLISIS POR MARCOS (Marco Analysis)
- elements.append(Paragraph("Análisis por Marcos y Categorías", h1))
- elements.append(Spacer(1, 0.2 * inch))
-
- chart_buffer = _create_marco_category_chart(
- requirements_list, attributes_by_requirement_id
- )
- chart_image = Image(chart_buffer, width=7 * inch, height=5 * inch)
- elements.append(chart_image)
- elements.append(PageBreak())
-
- # SECTION 4: DIMENSIONES DE SEGURIDAD (Security Dimensions)
- elements.append(Paragraph("Análisis por Dimensiones de Seguridad", h1))
- elements.append(Spacer(1, 0.2 * inch))
-
- radar_buffer = _create_dimensions_radar_chart(
- requirements_list, attributes_by_requirement_id
- )
- radar_image = Image(radar_buffer, width=6 * inch, height=6 * inch)
- elements.append(radar_image)
- elements.append(PageBreak())
-
- # SECTION 5: DISTRIBUCIÓN POR TIPO (Type Distribution)
- elements.append(Paragraph("Distribución por Tipo de Requisito", h1))
- elements.append(Spacer(1, 0.2 * inch))
-
- tipo_data = defaultdict(lambda: {"passed": 0, "total": 0})
- for requirement in requirements_list:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(
- requirement_id, {}
- )
- requirement_status = requirement["attributes"]["status"]
-
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if not metadata:
- continue
-
- m = metadata[0]
- tipo = _safe_getattr(m, "Tipo")
- tipo_data[tipo]["total"] += 1
- if requirement_status == StatusChoices.PASS:
- tipo_data[tipo]["passed"] += 1
-
- tipo_table_data = [["Tipo", "Cumplidos", "Total", "Porcentaje"]]
- for tipo in ENS_TIPO_ORDER:
- if tipo in tipo_data:
- data = tipo_data[tipo]
- percentage = (
- (data["passed"] / data["total"] * 100) if data["total"] > 0 else 0
- )
- tipo_table_data.append(
- [
- tipo.capitalize(),
- str(data["passed"]),
- str(data["total"]),
- f"{percentage:.1f}%",
- ]
- )
-
- tipo_table = Table(
- tipo_table_data, colWidths=[2 * inch, 1.5 * inch, 1.5 * inch, 1.5 * inch]
- )
- tipo_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (-1, 0), colors.Color(0.2, 0.4, 0.6)),
- ("TEXTCOLOR", (0, 0), (-1, 0), colors.white),
- ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTSIZE", (0, 0), (-1, -1), 10),
- ("GRID", (0, 0), (-1, -1), 1, colors.Color(0.7, 0.7, 0.7)),
- ("LEFTPADDING", (0, 0), (-1, -1), 8),
- ("RIGHTPADDING", (0, 0), (-1, -1), 8),
- ("TOPPADDING", (0, 0), (-1, -1), 6),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 6),
- ]
- )
- )
- elements.append(tipo_table)
- elements.append(PageBreak())
-
- # SECTION 6: REQUISITOS CRÍTICOS NO CUMPLIDOS (Critical Failed Requirements)
- elements.append(Paragraph("Requisitos Críticos No Cumplidos", h1))
- elements.append(Spacer(1, 0.2 * inch))
-
- critical_failed = []
- for requirement in requirements_list:
- requirement_status = requirement["attributes"]["status"]
- if requirement_status == StatusChoices.FAIL:
- requirement_id = requirement["id"]
- req_attributes = attributes_by_requirement_id.get(
- requirement_id, {}
- ).get("attributes", {})
- metadata_list = req_attributes.get("req_attributes", [])
- if metadata_list:
- metadata = metadata_list[0]
- nivel = _safe_getattr(metadata, "Nivel", "")
- if nivel.lower() == "alto":
- critical_failed.append(
- {
- "requirement": requirement,
- "metadata": metadata,
- }
- )
-
- if not critical_failed:
- elements.append(
- Paragraph(
- "✅ No se encontraron requisitos críticos no cumplidos.", normal
- )
- )
- else:
- elements.append(
- Paragraph(
- f"Se encontraron {len(critical_failed)} requisitos de nivel Alto que no cumplen:",
- normal,
- )
- )
- elements.append(Spacer(1, 0.3 * inch))
-
- critical_table_data = [["ID", "Descripción", "Marco", "Categoría"]]
- for item in critical_failed:
- requirement_id = item["requirement"]["id"]
- description = item["requirement"]["attributes"]["description"]
- marco = _safe_getattr(item["metadata"], "Marco")
- categoria = _safe_getattr(item["metadata"], "Categoria")
-
- if len(description) > 60:
- description = description[:57] + "..."
-
- critical_table_data.append(
- [requirement_id, description, marco, categoria]
- )
-
- critical_table = Table(
- critical_table_data,
- colWidths=[1.5 * inch, 3.3 * inch, 1.5 * inch, 2 * inch],
- )
- critical_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (-1, 0), colors.Color(0.8, 0.2, 0.2)),
- ("TEXTCOLOR", (0, 0), (-1, 0), colors.white),
- ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
- ("FONTSIZE", (0, 0), (-1, 0), 9),
- ("FONTNAME", (0, 1), (0, -1), "FiraCode"),
- ("FONTSIZE", (0, 1), (-1, -1), 8),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("GRID", (0, 0), (-1, -1), 1, colors.Color(0.7, 0.7, 0.7)),
- ("LEFTPADDING", (0, 0), (-1, -1), 6),
- ("RIGHTPADDING", (0, 0), (-1, -1), 6),
- ("TOPPADDING", (0, 0), (-1, -1), 6),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 6),
- (
- "BACKGROUND",
- (1, 1),
- (-1, -1),
- colors.Color(0.98, 0.98, 0.98),
- ),
- ]
- )
- )
- elements.append(critical_table)
-
- elements.append(PageBreak())
-
- # SECTION 7: ÍNDICE DE REQUISITOS (Requirements Index)
- elements.append(Paragraph("Índice de Requisitos", h1))
- elements.append(Spacer(1, 0.2 * inch))
-
- # Group by Marco → Categoría
- marco_categoria_index = defaultdict(lambda: defaultdict(list))
- for (
- requirement_id,
- requirement_attributes,
- ) in attributes_by_requirement_id.items():
- metadata = requirement_attributes["attributes"]["req_attributes"][0]
- marco = getattr(metadata, "Marco", "N/A")
- categoria = getattr(metadata, "Categoria", "N/A")
- id_grupo = getattr(metadata, "IdGrupoControl", "N/A")
-
- marco_categoria_index[marco][categoria].append(
- {
- "id": requirement_id,
- "id_grupo": id_grupo,
- "description": requirement_attributes["description"],
- }
- )
-
- for marco, categorias in sorted(marco_categoria_index.items()):
- elements.append(Paragraph(f"Marco: {marco.capitalize()}", h2))
- for categoria, requirements in sorted(categorias.items()):
- elements.append(Paragraph(f"Categoría: {categoria.capitalize()}", h3))
- for req in requirements:
- desc = req["description"]
- if len(desc) > 80:
- desc = desc[:77] + "..."
- elements.append(Paragraph(f"{req['id']} - {desc}", normal))
- elements.append(Spacer(1, 0.05 * inch))
-
- elements.append(PageBreak())
-
- # SECTION 8: DETALLE DE REQUISITOS (Detailed Requirements)
- elements.append(Paragraph("Detalle de Requisitos", h1))
- elements.append(Spacer(1, 0.2 * inch))
-
- # Filter: NO CUMPLE + MANUAL (if include_manual)
- filtered_requirements = [
- req
- for req in requirements_list
- if req["attributes"]["status"] == StatusChoices.FAIL
- or (include_manual and req["attributes"]["status"] == StatusChoices.MANUAL)
- ]
-
- if not filtered_requirements:
- elements.append(
- Paragraph("✅ Todos los requisitos automáticos cumplen.", normal)
- )
- else:
- elements.append(
- Paragraph(
- f"Se muestran {len(filtered_requirements)} requisitos que requieren atención:",
- normal,
- )
- )
- elements.append(Spacer(1, 0.2 * inch))
-
- # Collect check IDs to load
- check_ids_to_load = []
- for requirement in filtered_requirements:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(
- requirement_id, {}
- )
- check_ids = requirement_attributes.get("attributes", {}).get(
- "checks", []
- )
- check_ids_to_load.extend(check_ids)
-
- # Load findings on-demand
- logger.info(
- f"Loading findings on-demand for {len(filtered_requirements)} requirements"
- )
- findings_by_check_id = _load_findings_for_requirement_checks(
- tenant_id, scan_id, check_ids_to_load, prowler_provider, findings_cache
- )
-
- for requirement in filtered_requirements:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(
- requirement_id, {}
- )
- requirement_status = requirement["attributes"]["status"]
- requirement_description = requirement_attributes.get("description", "")
-
- # Requirement ID header in a box
- req_id_paragraph = Paragraph(requirement_id, h2)
- req_id_table = Table([[req_id_paragraph]], colWidths=[6.5 * inch])
- req_id_table.setStyle(
- TableStyle(
- [
- (
- "BACKGROUND",
- (0, 0),
- (0, 0),
- colors.Color(0.15, 0.35, 0.55),
- ),
- ("TEXTCOLOR", (0, 0), (0, 0), colors.white),
- ("ALIGN", (0, 0), (0, 0), "CENTER"),
- ("VALIGN", (0, 0), (0, 0), "MIDDLE"),
- ("LEFTPADDING", (0, 0), (-1, -1), 15),
- ("RIGHTPADDING", (0, 0), (-1, -1), 15),
- ("TOPPADDING", (0, 0), (-1, -1), 10),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 10),
- ("BOX", (0, 0), (-1, -1), 2, colors.Color(0.2, 0.4, 0.6)),
- ]
- )
- )
- elements.append(req_id_table)
- elements.append(Spacer(1, 0.15 * inch))
-
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if metadata and len(metadata) > 0:
- m = metadata[0]
-
- # Create all badges
- status_component = _create_status_component(requirement_status)
- nivel = getattr(m, "Nivel", "N/A")
- nivel_badge = _create_ens_nivel_badge(nivel)
- tipo = getattr(m, "Tipo", "N/A")
- tipo_badge = _create_ens_tipo_badge(tipo)
-
- # Organize badges in a horizontal table (2 rows x 2 cols)
- badges_table = Table(
- [[status_component, nivel_badge], [tipo_badge]],
- colWidths=[3.25 * inch, 3.25 * inch],
- )
- badges_table.setStyle(
- TableStyle(
- [
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("LEFTPADDING", (0, 0), (-1, -1), 5),
- ("RIGHTPADDING", (0, 0), (-1, -1), 5),
- ("TOPPADDING", (0, 0), (-1, -1), 5),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 5),
- ]
- )
- )
- elements.append(badges_table)
- elements.append(Spacer(1, 0.15 * inch))
-
- # Dimensiones badges (if present)
- dimensiones = getattr(m, "Dimensiones", [])
- if dimensiones:
- dim_label = Paragraph("Dimensiones:", normal)
- dim_badges = _create_ens_dimension_badges(dimensiones)
- dim_table = Table(
- [[dim_label, dim_badges]], colWidths=[1.5 * inch, 5 * inch]
- )
- dim_table.setStyle(
- TableStyle(
- [
- ("ALIGN", (0, 0), (0, 0), "LEFT"),
- ("ALIGN", (1, 0), (1, 0), "LEFT"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ]
- )
- )
- elements.append(dim_table)
- elements.append(Spacer(1, 0.15 * inch))
-
- # Requirement details in a clean table
- details_data = [
- ["Descripción:", Paragraph(requirement_description, normal)],
- ["Marco:", Paragraph(getattr(m, "Marco", "N/A"), normal)],
- [
- "Categoría:",
- Paragraph(getattr(m, "Categoria", "N/A"), normal),
- ],
- [
- "ID Grupo Control:",
- Paragraph(getattr(m, "IdGrupoControl", "N/A"), normal),
- ],
- [
- "Descripción del Control:",
- Paragraph(getattr(m, "DescripcionControl", "N/A"), normal),
- ],
- ]
- details_table = Table(
- details_data, colWidths=[2.2 * inch, 4.5 * inch]
- )
- details_table.setStyle(
- TableStyle(
- [
- (
- "BACKGROUND",
- (0, 0),
- (0, -1),
- colors.Color(0.9, 0.93, 0.96),
- ),
- (
- "TEXTCOLOR",
- (0, 0),
- (0, -1),
- colors.Color(0.2, 0.2, 0.2),
- ),
- ("FONTNAME", (0, 0), (0, -1), "FiraCode"),
- ("FONTSIZE", (0, 0), (-1, -1), 10),
- ("ALIGN", (0, 0), (0, -1), "LEFT"),
- ("VALIGN", (0, 0), (-1, -1), "TOP"),
- (
- "GRID",
- (0, 0),
- (-1, -1),
- 0.5,
- colors.Color(0.7, 0.8, 0.9),
- ),
- ("LEFTPADDING", (0, 0), (-1, -1), 8),
- ("RIGHTPADDING", (0, 0), (-1, -1), 8),
- ("TOPPADDING", (0, 0), (-1, -1), 6),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 6),
- ]
- )
- )
- elements.append(details_table)
- elements.append(Spacer(1, 0.2 * inch))
-
- # Findings for checks
- requirement_check_ids = requirement_attributes.get(
- "attributes", {}
- ).get("checks", [])
- for check_id in requirement_check_ids:
- elements.append(Paragraph(f"Check: {check_id}", h2))
- elements.append(Spacer(1, 0.1 * inch))
-
- check_findings = findings_by_check_id.get(check_id, [])
-
- if not check_findings:
- elements.append(
- Paragraph(
- "- No hay información disponible para este check",
- normal,
- )
- )
- else:
- findings_table_data = [
- ["Finding", "Resource name", "Severity", "Status", "Region"]
- ]
- for finding_output in check_findings:
- check_metadata = getattr(finding_output, "metadata", {})
- finding_title = getattr(
- check_metadata,
- "CheckTitle",
- getattr(finding_output, "check_id", ""),
- )
- resource_name = getattr(finding_output, "resource_name", "")
- if not resource_name:
- resource_name = getattr(
- finding_output, "resource_uid", ""
- )
- severity = getattr(
- check_metadata, "Severity", ""
- ).capitalize()
- finding_status = getattr(
- finding_output, "status", ""
- ).upper()
- region = getattr(finding_output, "region", "global")
-
- findings_table_data.append(
- [
- Paragraph(finding_title, normal_center),
- Paragraph(resource_name, normal_center),
- Paragraph(severity, normal_center),
- Paragraph(finding_status, normal_center),
- Paragraph(region, normal_center),
- ]
- )
-
- findings_table = Table(
- findings_table_data,
- colWidths=[
- 2.5 * inch,
- 3 * inch,
- 0.9 * inch,
- 0.9 * inch,
- 0.9 * inch,
- ],
- )
- findings_table.setStyle(
- TableStyle(
- [
- (
- "BACKGROUND",
- (0, 0),
- (-1, 0),
- colors.Color(0.2, 0.4, 0.6),
- ),
- ("TEXTCOLOR", (0, 0), (-1, 0), colors.white),
- ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
- ("ALIGN", (0, 0), (0, 0), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTSIZE", (0, 0), (-1, -1), 9),
- (
- "GRID",
- (0, 0),
- (-1, -1),
- 0.1,
- colors.Color(0.7, 0.8, 0.9),
- ),
- ("LEFTPADDING", (0, 0), (0, 0), 0),
- ("RIGHTPADDING", (0, 0), (0, 0), 0),
- ("TOPPADDING", (0, 0), (-1, -1), 4),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 4),
- ]
- )
- )
- elements.append(findings_table)
-
- elements.append(Spacer(1, 0.1 * inch))
-
- elements.append(PageBreak())
-
- # Build the PDF
- logger.info("Building PDF...")
- doc.build(
- elements,
- onFirstPage=partial(_add_pdf_footer, compliance_name=compliance_name),
- onLaterPages=partial(_add_pdf_footer, compliance_name=compliance_name),
- )
- except Exception as e:
- tb_lineno = e.__traceback__.tb_lineno if e.__traceback__ else "unknown"
- logger.error(f"Error building ENS report, line {tb_lineno} -- {e}")
- raise e
+ generator = ENSReportGenerator(FRAMEWORK_REGISTRY["ens"])
+
+ generator.generate(
+ tenant_id=tenant_id,
+ scan_id=scan_id,
+ compliance_id=compliance_id,
+ output_path=output_path,
+ provider_id=provider_id,
+ provider_obj=provider_obj,
+ requirement_statistics=requirement_statistics,
+ findings_cache=findings_cache,
+ include_manual=include_manual,
+ )
def generate_nis2_report(
@@ -2897,552 +112,39 @@ def generate_nis2_report(
provider_id: str,
only_failed: bool = True,
include_manual: bool = False,
- provider_obj=None,
+ provider_obj: Provider | None = None,
requirement_statistics: dict[str, dict[str, int]] | None = None,
findings_cache: dict[str, list[FindingOutput]] | None = None,
) -> None:
"""
Generate a PDF compliance report for NIS2 Directive (EU) 2022/2555.
- This function creates a comprehensive PDF report containing:
- - Compliance overview and metadata
- - Executive summary with overall compliance score
- - Section analysis with horizontal bar chart
- - SubSection breakdown table
- - Critical failed requirements
- - Requirements index organized by section and subsection
- - Detailed findings for failed requirements
-
Args:
- tenant_id (str): The tenant ID for Row-Level Security context.
- scan_id (str): ID of the scan executed by Prowler.
- compliance_id (str): ID of the compliance framework (e.g., "nis2_aws").
- output_path (str): Output PDF file path (e.g., "/tmp/nis2_report.pdf").
- provider_id (str): Provider ID for the scan.
- only_failed (bool): If True, only requirements with status "FAIL" will be included
- in the detailed requirements section. Defaults to True.
- include_manual (bool): If True, includes MANUAL requirements in the detailed findings
- section along with FAIL requirements. Defaults to True.
- provider_obj (Provider, optional): Pre-fetched Provider object to avoid duplicate queries.
- If None, the provider will be fetched from the database.
- requirement_statistics (dict, optional): Pre-aggregated requirement statistics to avoid
- duplicate database aggregations. If None, statistics will be aggregated from the database.
- findings_cache (dict, optional): Cache of already loaded findings to avoid duplicate queries.
- If None, findings will be loaded from the database.
-
- Raises:
- Exception: If any error occurs during PDF generation, it will be logged and re-raised.
+ tenant_id: The tenant ID for Row-Level Security context.
+ scan_id: ID of the scan executed by Prowler.
+ compliance_id: ID of the compliance framework (e.g., "nis2_aws").
+ output_path: Output PDF file path.
+ provider_id: Provider ID for the scan.
+ only_failed: If True, only include failed requirements in detailed section.
+ include_manual: If True, include manual requirements in detailed section.
+ provider_obj: Pre-fetched Provider object to avoid duplicate queries.
+ requirement_statistics: Pre-aggregated requirement statistics.
+ findings_cache: Cache of already loaded findings to avoid duplicate queries.
"""
- logger.info(
- f"Generating NIS2 report for scan {scan_id} with provider {provider_id}"
+ generator = NIS2ReportGenerator(FRAMEWORK_REGISTRY["nis2"])
+
+ generator.generate(
+ tenant_id=tenant_id,
+ scan_id=scan_id,
+ compliance_id=compliance_id,
+ output_path=output_path,
+ provider_id=provider_id,
+ provider_obj=provider_obj,
+ requirement_statistics=requirement_statistics,
+ findings_cache=findings_cache,
+ only_failed=only_failed,
+ include_manual=include_manual,
)
- try:
- # Get PDF styles
- pdf_styles = _create_pdf_styles()
- title_style = pdf_styles["title"]
- h1 = pdf_styles["h1"]
- h2 = pdf_styles["h2"]
- h3 = pdf_styles["h3"]
- normal = pdf_styles["normal"]
- normal_center = pdf_styles["normal_center"]
-
- # Get compliance and provider information
- with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
- # Use provided provider_obj or fetch from database
- if provider_obj is None:
- provider_obj = Provider.objects.get(id=provider_id)
-
- prowler_provider = initialize_prowler_provider(provider_obj)
- provider_type = provider_obj.provider
-
- frameworks_bulk = Compliance.get_bulk(provider_type)
- compliance_obj = frameworks_bulk[compliance_id]
- compliance_framework = _safe_getattr(compliance_obj, "Framework")
- compliance_version = _safe_getattr(compliance_obj, "Version")
- compliance_name = _safe_getattr(compliance_obj, "Name")
- compliance_description = _safe_getattr(compliance_obj, "Description", "")
-
- # Aggregate requirement statistics from database
- if requirement_statistics is None:
- logger.info(f"Aggregating requirement statistics for scan {scan_id}")
- requirement_statistics_by_check_id = (
- _aggregate_requirement_statistics_from_database(tenant_id, scan_id)
- )
- else:
- logger.info(
- f"Reusing pre-aggregated requirement statistics for scan {scan_id}"
- )
- requirement_statistics_by_check_id = requirement_statistics
-
- # Calculate requirements data using aggregated statistics
- attributes_by_requirement_id, requirements_list = (
- _calculate_requirements_data_from_statistics(
- compliance_obj, requirement_statistics_by_check_id
- )
- )
-
- # Initialize PDF document
- doc = SimpleDocTemplate(
- output_path,
- pagesize=letter,
- title="NIS2 Compliance Report - Prowler",
- author="Prowler",
- subject=f"Compliance Report for {compliance_framework}",
- creator="Prowler Engineering Team",
- keywords=f"compliance,{compliance_framework},security,nis2,prowler,eu",
- )
-
- elements = []
-
- # SECTION 1: Cover Page
- # Create logos side by side
- prowler_logo_path = os.path.join(
- os.path.dirname(__file__), "../assets/img/prowler_logo.png"
- )
- nis2_logo_path = os.path.join(
- os.path.dirname(__file__), "../assets/img/nis2_logo.png"
- )
-
- prowler_logo = Image(
- prowler_logo_path,
- width=3.5 * inch,
- height=0.7 * inch,
- )
- nis2_logo = Image(
- nis2_logo_path,
- width=2.3 * inch,
- height=1.5 * inch,
- )
-
- # Create table with both logos
- logos_table = Table(
- [[prowler_logo, nis2_logo]], colWidths=[4 * inch, 2.5 * inch]
- )
- logos_table.setStyle(
- TableStyle(
- [
- ("ALIGN", (0, 0), (0, 0), "LEFT"),
- ("ALIGN", (1, 0), (1, 0), "RIGHT"),
- ("VALIGN", (0, 0), (0, 0), "MIDDLE"), # Prowler logo middle
- ("VALIGN", (1, 0), (1, 0), "MIDDLE"), # NIS2 logo middle
- ]
- )
- )
- elements.append(logos_table)
- elements.append(Spacer(1, 0.3 * inch))
-
- # Title
- title = Paragraph(
- "NIS2 Compliance Report
Directive (EU) 2022/2555",
- title_style,
- )
- elements.append(title)
- elements.append(Spacer(1, 0.3 * inch))
-
- # Compliance metadata table
- provider_alias = provider_obj.alias or "N/A"
- metadata_data = [
- ["Framework:", compliance_framework],
- ["Name:", Paragraph(compliance_name, normal_center)],
- ["Version:", compliance_version or "N/A"],
- ["Provider:", provider_type.upper()],
- ["Account ID:", provider_obj.uid],
- ["Alias:", provider_alias],
- ["Scan ID:", scan_id],
- ["Description:", Paragraph(compliance_description, normal_center)],
- ]
-
- metadata_table = Table(metadata_data, colWidths=[COL_WIDTH_XLARGE, 4 * inch])
- metadata_table.setStyle(_create_info_table_style())
- elements.append(metadata_table)
- elements.append(PageBreak())
-
- # SECTION 2: Executive Summary
- elements.append(Paragraph("Executive Summary", h1))
- elements.append(Spacer(1, 0.1 * inch))
-
- # Calculate overall statistics
- total_requirements = len(requirements_list)
- passed_requirements = sum(
- 1
- for req in requirements_list
- if req["attributes"].get("status") == StatusChoices.PASS
- )
- failed_requirements = sum(
- 1
- for req in requirements_list
- if req["attributes"].get("status") == StatusChoices.FAIL
- )
- manual_requirements = sum(
- 1
- for req in requirements_list
- if req["attributes"].get("status") == StatusChoices.MANUAL
- )
-
- overall_compliance = (
- (passed_requirements / (passed_requirements + failed_requirements) * 100)
- if (passed_requirements + failed_requirements) > 0
- else 100
- )
-
- # Summary statistics table
- summary_data = [
- ["Metric", "Value"],
- ["Total Requirements", str(total_requirements)],
- ["Passed ✓", str(passed_requirements)],
- ["Failed ✗", str(failed_requirements)],
- ["Manual ⊙", str(manual_requirements)],
- ["Overall Compliance", f"{overall_compliance:.1f}%"],
- ]
-
- summary_table = Table(summary_data, colWidths=[3 * inch, 2 * inch])
- summary_table.setStyle(
- TableStyle(
- [
- # Header row
- ("BACKGROUND", (0, 0), (-1, 0), COLOR_NIS2_PRIMARY),
- ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
- # Status-specific colors for left column
- ("BACKGROUND", (0, 2), (0, 2), COLOR_SAFE), # Passed row
- ("TEXTCOLOR", (0, 2), (0, 2), COLOR_WHITE),
- ("BACKGROUND", (0, 3), (0, 3), COLOR_HIGH_RISK), # Failed row
- ("TEXTCOLOR", (0, 3), (0, 3), COLOR_WHITE),
- ("BACKGROUND", (0, 4), (0, 4), COLOR_DARK_GRAY), # Manual row
- ("TEXTCOLOR", (0, 4), (0, 4), COLOR_WHITE),
- # General styling
- ("ALIGN", (0, 0), (-1, -1), "CENTER"),
- ("FONTNAME", (0, 0), (-1, 0), "PlusJakartaSans"),
- ("FONTSIZE", (0, 0), (-1, 0), 12),
- ("FONTSIZE", (0, 1), (-1, -1), 10),
- ("BOTTOMPADDING", (0, 0), (-1, 0), 10),
- ("GRID", (0, 0), (-1, -1), 0.5, COLOR_BORDER_GRAY),
- # Alternating backgrounds for right column
- (
- "ROWBACKGROUNDS",
- (1, 1),
- (1, -1),
- [COLOR_WHITE, COLOR_NIS2_BG_BLUE],
- ),
- ]
- )
- )
- elements.append(summary_table)
- elements.append(PageBreak())
-
- # SECTION 3: Compliance by Section Analysis
- elements.append(Paragraph("Compliance by Section", h1))
- elements.append(Spacer(1, 0.1 * inch))
-
- elements.append(
- Paragraph(
- "The following chart shows compliance percentage for each main section of the NIS2 directive:",
- normal_center,
- )
- )
- elements.append(Spacer(1, 0.1 * inch))
-
- # Create section chart
- section_chart_buffer = _create_nis2_section_chart(
- requirements_list, attributes_by_requirement_id
- )
- section_chart_buffer.seek(0)
- section_chart = Image(section_chart_buffer, width=6.5 * inch, height=5 * inch)
- elements.append(section_chart)
- elements.append(PageBreak())
-
- # SECTION 4: SubSection Breakdown
- elements.append(Paragraph("SubSection Breakdown", h1))
- elements.append(Spacer(1, 0.1 * inch))
-
- subsection_table = _create_nis2_subsection_table(
- requirements_list, attributes_by_requirement_id
- )
- elements.append(subsection_table)
- elements.append(PageBreak())
-
- # SECTION 5: Requirements Index
- elements.append(Paragraph("Requirements Index", h1))
- elements.append(Spacer(1, 0.1 * inch))
-
- index_elements = _create_nis2_requirements_index(
- requirements_list, attributes_by_requirement_id, h2, h3, normal
- )
- elements.extend(index_elements)
- elements.append(PageBreak())
-
- # SECTION 6: Detailed Findings
- elements.append(Paragraph("Detailed Findings", h1))
- elements.append(Spacer(1, 0.2 * inch))
-
- # Filter requirements for detailed findings (FAIL + MANUAL if include_manual)
- filtered_requirements = [
- req
- for req in requirements_list
- if req["attributes"]["status"] == StatusChoices.FAIL
- or (include_manual and req["attributes"]["status"] == StatusChoices.MANUAL)
- ]
-
- if not filtered_requirements:
- elements.append(
- Paragraph("✅ All automatic requirements are compliant.", normal)
- )
- else:
- elements.append(
- Paragraph(
- f"Showing {len(filtered_requirements)} requirements that need attention:",
- normal,
- )
- )
- elements.append(Spacer(1, 0.2 * inch))
-
- # Collect check IDs to load
- check_ids_to_load = []
- for requirement in filtered_requirements:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(
- requirement_id, {}
- )
- check_ids = requirement_attributes.get("attributes", {}).get(
- "checks", []
- )
- check_ids_to_load.extend(check_ids)
-
- # Load findings on-demand
- logger.info(
- f"Loading findings on-demand for {len(filtered_requirements)} NIS2 requirements"
- )
- findings_by_check_id = _load_findings_for_requirement_checks(
- tenant_id, scan_id, check_ids_to_load, prowler_provider, findings_cache
- )
-
- for requirement in filtered_requirements:
- requirement_id = requirement["id"]
- requirement_attributes = attributes_by_requirement_id.get(
- requirement_id, {}
- )
- requirement_status = requirement["attributes"]["status"]
- requirement_description = requirement_attributes.get("description", "")
-
- # Requirement ID header in a box
- req_id_paragraph = Paragraph(f"Requirement: {requirement_id}", h2)
- req_id_table = Table([[req_id_paragraph]], colWidths=[6.5 * inch])
- req_id_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, 0), COLOR_NIS2_PRIMARY),
- ("TEXTCOLOR", (0, 0), (0, 0), colors.white),
- ("ALIGN", (0, 0), (0, 0), "CENTER"),
- ("VALIGN", (0, 0), (0, 0), "MIDDLE"),
- ("LEFTPADDING", (0, 0), (-1, -1), 15),
- ("RIGHTPADDING", (0, 0), (-1, -1), 15),
- ("TOPPADDING", (0, 0), (-1, -1), 10),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 10),
- ("BOX", (0, 0), (-1, -1), 2, COLOR_NIS2_SECONDARY),
- ]
- )
- )
- elements.append(req_id_table)
- elements.append(Spacer(1, 0.15 * inch))
-
- metadata = requirement_attributes.get("attributes", {}).get(
- "req_attributes", []
- )
- if metadata:
- m = metadata[0]
- section = _safe_getattr(m, "Section", "Unknown")
- subsection = _safe_getattr(m, "SubSection", "Unknown")
- service = _safe_getattr(m, "Service", "generic")
-
- # Status badge
- status_text = (
- "✓ PASS"
- if requirement_status == StatusChoices.PASS
- else (
- "✗ FAIL"
- if requirement_status == StatusChoices.FAIL
- else "⊙ MANUAL"
- )
- )
- status_color = (
- COLOR_SAFE
- if requirement_status == StatusChoices.PASS
- else (
- COLOR_HIGH_RISK
- if requirement_status == StatusChoices.FAIL
- else COLOR_DARK_GRAY
- )
- )
-
- status_badge = Paragraph(
- f"{status_text}",
- ParagraphStyle(
- "status_badge",
- parent=normal,
- alignment=1,
- textColor=colors.white,
- fontSize=14,
- ),
- )
- status_table = Table([[status_badge]], colWidths=[6.5 * inch])
- status_table.setStyle(
- TableStyle(
- [
- ("BACKGROUND", (0, 0), (0, 0), status_color),
- ("ALIGN", (0, 0), (0, 0), "CENTER"),
- ("VALIGN", (0, 0), (0, 0), "MIDDLE"),
- ("TOPPADDING", (0, 0), (-1, -1), 8),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 8),
- ]
- )
- )
- elements.append(status_table)
- elements.append(Spacer(1, 0.15 * inch))
-
- # Requirement details table
- details_data = [
- [
- "Description:",
- Paragraph(requirement_description, normal_center),
- ],
- ["Section:", Paragraph(section, normal_center)],
- ["SubSection:", Paragraph(subsection, normal_center)],
- ["Service:", service],
- ]
- details_table = Table(
- details_data, colWidths=[2.2 * inch, 4.5 * inch]
- )
- details_table.setStyle(
- TableStyle(
- [
- (
- "BACKGROUND",
- (0, 0),
- (0, -1),
- COLOR_NIS2_BG_BLUE,
- ),
- ("TEXTCOLOR", (0, 0), (0, -1), COLOR_GRAY),
- ("FONTNAME", (0, 0), (0, -1), "FiraCode"),
- ("FONTSIZE", (0, 0), (-1, -1), 10),
- ("ALIGN", (0, 0), (0, -1), "LEFT"),
- ("VALIGN", (0, 0), (-1, -1), "TOP"),
- ("GRID", (0, 0), (-1, -1), 0.5, COLOR_BORDER_GRAY),
- ("LEFTPADDING", (0, 0), (-1, -1), 8),
- ("RIGHTPADDING", (0, 0), (-1, -1), 8),
- ("TOPPADDING", (0, 0), (-1, -1), 6),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 6),
- ]
- )
- )
- elements.append(details_table)
- elements.append(Spacer(1, 0.2 * inch))
-
- # Findings for checks
- requirement_check_ids = requirement_attributes.get(
- "attributes", {}
- ).get("checks", [])
- for check_id in requirement_check_ids:
- elements.append(Paragraph(f"Check: {check_id}", h3))
- elements.append(Spacer(1, 0.1 * inch))
-
- check_findings = findings_by_check_id.get(check_id, [])
-
- if not check_findings:
- elements.append(
- Paragraph(
- "- No information available for this check", normal
- )
- )
- else:
- findings_table_data = [
- ["Finding", "Resource name", "Severity", "Status", "Region"]
- ]
- for finding_output in check_findings:
- check_metadata = getattr(finding_output, "metadata", {})
- finding_title = getattr(
- check_metadata,
- "CheckTitle",
- getattr(finding_output, "check_id", ""),
- )
- resource_name = getattr(finding_output, "resource_name", "")
- if not resource_name:
- resource_name = getattr(
- finding_output, "resource_uid", ""
- )
- severity = getattr(
- check_metadata, "Severity", ""
- ).capitalize()
- finding_status = getattr(
- finding_output, "status", ""
- ).upper()
- region = getattr(finding_output, "region", "global")
-
- findings_table_data.append(
- [
- Paragraph(finding_title, normal_center),
- Paragraph(resource_name, normal_center),
- Paragraph(severity, normal_center),
- Paragraph(finding_status, normal_center),
- Paragraph(region, normal_center),
- ]
- )
-
- findings_table = Table(
- findings_table_data,
- colWidths=[
- 2.5 * inch,
- 3 * inch,
- 0.9 * inch,
- 0.9 * inch,
- 0.9 * inch,
- ],
- )
- findings_table.setStyle(
- TableStyle(
- [
- (
- "BACKGROUND",
- (0, 0),
- (-1, 0),
- COLOR_NIS2_PRIMARY,
- ),
- ("TEXTCOLOR", (0, 0), (-1, 0), colors.white),
- ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
- ("ALIGN", (0, 0), (0, 0), "CENTER"),
- ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
- ("FONTSIZE", (0, 0), (-1, -1), 9),
- ("GRID", (0, 0), (-1, -1), 0.5, COLOR_BORDER_GRAY),
- (
- "ROWBACKGROUNDS",
- (0, 1),
- (-1, -1),
- [colors.white, COLOR_NIS2_BG_BLUE],
- ),
- ("LEFTPADDING", (0, 0), (-1, -1), 5),
- ("RIGHTPADDING", (0, 0), (-1, -1), 5),
- ("TOPPADDING", (0, 0), (-1, -1), 5),
- ("BOTTOMPADDING", (0, 0), (-1, -1), 5),
- ]
- )
- )
- elements.append(findings_table)
-
- elements.append(Spacer(1, 0.15 * inch))
-
- elements.append(Spacer(1, 0.2 * inch))
-
- # Build the PDF
- logger.info("Building NIS2 PDF...")
- doc.build(
- elements,
- onFirstPage=partial(_add_pdf_footer, compliance_name=compliance_name),
- onLaterPages=partial(_add_pdf_footer, compliance_name=compliance_name),
- )
- logger.info(f"NIS2 report successfully generated at {output_path}")
-
- except Exception as e:
- tb_lineno = e.__traceback__.tb_lineno if e.__traceback__ else "unknown"
- logger.error(f"Error building NIS2 report, line {tb_lineno} -- {e}")
- raise e
def generate_compliance_reports(
@@ -3459,58 +161,45 @@ def generate_compliance_reports(
only_failed_nis2: bool = True,
) -> dict[str, dict[str, bool | str]]:
"""
- Generate multiple compliance reports (ThreatScore, ENS, and/or NIS2) with shared database queries.
+ Generate multiple compliance reports with shared database queries.
This function optimizes the generation of multiple reports by:
- Fetching the provider object once
- Aggregating requirement statistics once (shared across all reports)
- Reusing compliance framework data when possible
- This can reduce database queries by up to 50-70% when generating multiple reports.
-
Args:
- tenant_id (str): The tenant ID for Row-Level Security context.
- scan_id (str): The ID of the scan to generate reports for.
- provider_id (str): The ID of the provider used in the scan.
- generate_threatscore (bool): Whether to generate ThreatScore report. Defaults to True.
- generate_ens (bool): Whether to generate ENS report. Defaults to True.
- generate_nis2 (bool): Whether to generate NIS2 report. Defaults to True.
- only_failed_threatscore (bool): For ThreatScore, only include failed requirements. Defaults to True.
- min_risk_level_threatscore (int): Minimum risk level for ThreatScore critical requirements. Defaults to 4.
- include_manual_ens (bool): For ENS, include manual requirements. Defaults to True.
- only_failed_nis2 (bool): For NIS2, only include failed requirements. Defaults to True.
+ tenant_id: The tenant ID for Row-Level Security context.
+ scan_id: The ID of the scan to generate reports for.
+ provider_id: The ID of the provider used in the scan.
+ generate_threatscore: Whether to generate ThreatScore report.
+ generate_ens: Whether to generate ENS report.
+ generate_nis2: Whether to generate NIS2 report.
+ only_failed_threatscore: For ThreatScore, only include failed requirements.
+ min_risk_level_threatscore: Minimum risk level for ThreatScore critical requirements.
+ include_manual_ens: For ENS, include manual requirements.
+ include_manual_nis2: For NIS2, include manual requirements.
+ only_failed_nis2: For NIS2, only include failed requirements.
Returns:
- dict[str, dict[str, bool | str]]: Dictionary with results for each report:
- {
- 'threatscore': {'upload': bool, 'path': str, 'error': str (optional)},
- 'ens': {'upload': bool, 'path': str, 'error': str (optional)},
- 'nis2': {'upload': bool, 'path': str, 'error': str (optional)}
- }
-
- Example:
- >>> results = generate_compliance_reports(
- ... tenant_id="tenant-123",
- ... scan_id="scan-456",
- ... provider_id="provider-789",
- ... generate_threatscore=True,
- ... generate_ens=True,
- ... generate_nis2=True
- ... )
- >>> print(results['threatscore']['upload'])
- True
+ Dictionary with results for each report type.
"""
logger.info(
- f"Generating compliance reports for scan {scan_id} with provider {provider_id}"
- f" (ThreatScore: {generate_threatscore}, ENS: {generate_ens}, NIS2: {generate_nis2})"
+ "Generating compliance reports for scan %s with provider %s"
+ " (ThreatScore: %s, ENS: %s, NIS2: %s)",
+ scan_id,
+ provider_id,
+ generate_threatscore,
+ generate_ens,
+ generate_nis2,
)
results = {}
- # Validate that the scan has findings and get provider info (shared query)
+ # Validate that the scan has findings and get provider info
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
if not ScanSummary.objects.filter(scan_id=scan_id).exists():
- logger.info(f"No findings found for scan {scan_id}")
+ logger.info("No findings found for scan %s", scan_id)
if generate_threatscore:
results["threatscore"] = {"upload": False, "path": ""}
if generate_ens:
@@ -3519,7 +208,6 @@ def generate_compliance_reports(
results["nis2"] = {"upload": False, "path": ""}
return results
- # Fetch provider once (optimization)
provider_obj = Provider.objects.get(id=provider_id)
provider_uid = provider_obj.uid
provider_type = provider_obj.provider
@@ -3533,43 +221,36 @@ def generate_compliance_reports(
"kubernetes",
"alibabacloud",
]:
- logger.info(
- f"Provider {provider_id} ({provider_type}) is not supported for ThreatScore report"
- )
+ logger.info("Provider %s not supported for ThreatScore report", provider_type)
results["threatscore"] = {"upload": False, "path": ""}
generate_threatscore = False
if generate_ens and provider_type not in ["aws", "azure", "gcp"]:
- logger.info(
- f"Provider {provider_id} ({provider_type}) is not supported for ENS report"
- )
+ logger.info("Provider %s not supported for ENS report", provider_type)
results["ens"] = {"upload": False, "path": ""}
generate_ens = False
if generate_nis2 and provider_type not in ["aws", "azure", "gcp"]:
- logger.info(
- f"Provider {provider_id} ({provider_type}) is not supported for NIS2 report"
- )
+ logger.info("Provider %s not supported for NIS2 report", provider_type)
results["nis2"] = {"upload": False, "path": ""}
generate_nis2 = False
- # If no reports to generate, return early
if not generate_threatscore and not generate_ens and not generate_nis2:
return results
- # Aggregate requirement statistics once (major optimization)
+ # Aggregate requirement statistics once
logger.info(
- f"Aggregating requirement statistics once for all reports (scan {scan_id})"
+ "Aggregating requirement statistics once for all reports (scan %s)", scan_id
)
requirement_statistics = _aggregate_requirement_statistics_from_database(
tenant_id, scan_id
)
- # Create shared findings cache (major optimization for findings queries)
+ # Create shared findings cache
findings_cache = {}
- logger.info("Created shared findings cache for both reports")
+ logger.info("Created shared findings cache for all reports")
- # Generate output directories for each compliance framework
+ # Generate output directories
try:
logger.info("Generating output directories")
threatscore_path = _generate_compliance_output_directory(
@@ -3593,10 +274,9 @@ def generate_compliance_reports(
scan_id,
compliance_framework="nis2",
)
- # Extract base scan directory for cleanup (parent of threatscore directory)
out_dir = str(Path(threatscore_path).parent.parent)
except Exception as e:
- logger.error(f"Error generating output directory: {e}")
+ logger.error("Error generating output directory: %s", e)
error_dict = {"error": str(e), "upload": False, "path": ""}
if generate_threatscore:
results["threatscore"] = error_dict.copy()
@@ -3611,7 +291,8 @@ def generate_compliance_reports(
compliance_id_threatscore = f"prowler_threatscore_{provider_type}"
pdf_path_threatscore = f"{threatscore_path}_threatscore_report.pdf"
logger.info(
- f"Generating ThreatScore report with compliance {compliance_id_threatscore}"
+ "Generating ThreatScore report with compliance %s",
+ compliance_id_threatscore,
)
try:
@@ -3623,13 +304,13 @@ def generate_compliance_reports(
provider_id=provider_id,
only_failed=only_failed_threatscore,
min_risk_level=min_risk_level_threatscore,
- provider_obj=provider_obj, # Reuse provider object
- requirement_statistics=requirement_statistics, # Reuse statistics
- findings_cache=findings_cache, # Share findings cache
+ provider_obj=provider_obj,
+ requirement_statistics=requirement_statistics,
+ findings_cache=findings_cache,
)
# Compute and store ThreatScore metrics snapshot
- logger.info(f"Computing ThreatScore metrics for scan {scan_id}")
+ logger.info("Computing ThreatScore metrics for scan %s", scan_id)
try:
metrics = compute_threatscore_metrics(
tenant_id=tenant_id,
@@ -3639,9 +320,7 @@ def generate_compliance_reports(
min_risk_level=min_risk_level_threatscore,
)
- # Create snapshot in database
with rls_transaction(tenant_id):
- # Get previous snapshot for the same provider to calculate delta
previous_snapshot = (
ThreatScoreSnapshot.objects.filter(
tenant_id=tenant_id,
@@ -3652,7 +331,6 @@ def generate_compliance_reports(
.first()
)
- # Calculate score delta (improvement)
score_delta = None
if previous_snapshot:
score_delta = metrics["overall_score"] - float(
@@ -3683,12 +361,10 @@ def generate_compliance_reports(
else ""
)
logger.info(
- f"ThreatScore snapshot created with ID {snapshot.id} "
- f"(score: {snapshot.overall_score}%{delta_msg})"
+ f"ThreatScore snapshot created with ID {snapshot.id} (score: {snapshot.overall_score}%{delta_msg})",
)
except Exception as e:
- # Log error but don't fail the job if snapshot creation fails
- logger.error(f"Error creating ThreatScore snapshot: {e}")
+ logger.error("Error creating ThreatScore snapshot: %s", e)
upload_uri_threatscore = _upload_to_s3(
tenant_id,
@@ -3702,20 +378,20 @@ def generate_compliance_reports(
"upload": True,
"path": upload_uri_threatscore,
}
- logger.info(f"ThreatScore report uploaded to {upload_uri_threatscore}")
+ logger.info("ThreatScore report uploaded to %s", upload_uri_threatscore)
else:
results["threatscore"] = {"upload": False, "path": out_dir}
- logger.warning(f"ThreatScore report saved locally at {out_dir}")
+ logger.warning("ThreatScore report saved locally at %s", out_dir)
except Exception as e:
- logger.error(f"Error generating ThreatScore report: {e}")
+ logger.error("Error generating ThreatScore report: %s", e)
results["threatscore"] = {"upload": False, "path": "", "error": str(e)}
# Generate ENS report
if generate_ens:
compliance_id_ens = f"ens_rd2022_{provider_type}"
pdf_path_ens = f"{ens_path}_ens_report.pdf"
- logger.info(f"Generating ENS report with compliance {compliance_id_ens}")
+ logger.info("Generating ENS report with compliance %s", compliance_id_ens)
try:
generate_ens_report(
@@ -3725,34 +401,31 @@ def generate_compliance_reports(
output_path=pdf_path_ens,
provider_id=provider_id,
include_manual=include_manual_ens,
- provider_obj=provider_obj, # Reuse provider object
- requirement_statistics=requirement_statistics, # Reuse statistics
- findings_cache=findings_cache, # Share findings cache
+ provider_obj=provider_obj,
+ requirement_statistics=requirement_statistics,
+ findings_cache=findings_cache,
)
upload_uri_ens = _upload_to_s3(
- tenant_id,
- scan_id,
- pdf_path_ens,
- f"ens/{Path(pdf_path_ens).name}",
+ tenant_id, scan_id, pdf_path_ens, f"ens/{Path(pdf_path_ens).name}"
)
if upload_uri_ens:
results["ens"] = {"upload": True, "path": upload_uri_ens}
- logger.info(f"ENS report uploaded to {upload_uri_ens}")
+ logger.info("ENS report uploaded to %s", upload_uri_ens)
else:
results["ens"] = {"upload": False, "path": out_dir}
- logger.warning(f"ENS report saved locally at {out_dir}")
+ logger.warning("ENS report saved locally at %s", out_dir)
except Exception as e:
- logger.error(f"Error generating ENS report: {e}")
+ logger.error("Error generating ENS report: %s", e)
results["ens"] = {"upload": False, "path": "", "error": str(e)}
# Generate NIS2 report
if generate_nis2:
compliance_id_nis2 = f"nis2_{provider_type}"
pdf_path_nis2 = f"{nis2_path}_nis2_report.pdf"
- logger.info(f"Generating NIS2 report with compliance {compliance_id_nis2}")
+ logger.info("Generating NIS2 report with compliance %s", compliance_id_nis2)
try:
generate_nis2_report(
@@ -3763,27 +436,24 @@ def generate_compliance_reports(
provider_id=provider_id,
only_failed=only_failed_nis2,
include_manual=include_manual_nis2,
- provider_obj=provider_obj, # Reuse provider object
- requirement_statistics=requirement_statistics, # Reuse statistics
- findings_cache=findings_cache, # Share findings cache
+ provider_obj=provider_obj,
+ requirement_statistics=requirement_statistics,
+ findings_cache=findings_cache,
)
upload_uri_nis2 = _upload_to_s3(
- tenant_id,
- scan_id,
- pdf_path_nis2,
- f"nis2/{Path(pdf_path_nis2).name}",
+ tenant_id, scan_id, pdf_path_nis2, f"nis2/{Path(pdf_path_nis2).name}"
)
if upload_uri_nis2:
results["nis2"] = {"upload": True, "path": upload_uri_nis2}
- logger.info(f"NIS2 report uploaded to {upload_uri_nis2}")
+ logger.info("NIS2 report uploaded to %s", upload_uri_nis2)
else:
results["nis2"] = {"upload": False, "path": out_dir}
- logger.warning(f"NIS2 report saved locally at {out_dir}")
+ logger.warning("NIS2 report saved locally at %s", out_dir)
except Exception as e:
- logger.error(f"Error generating NIS2 report: {e}")
+ logger.error("Error generating NIS2 report: %s", e)
results["nis2"] = {"upload": False, "path": "", "error": str(e)}
# Clean up temporary files if all reports were uploaded successfully
@@ -3796,11 +466,11 @@ def generate_compliance_reports(
if all_uploaded:
try:
rmtree(Path(out_dir), ignore_errors=True)
- logger.info(f"Cleaned up temporary files at {out_dir}")
+ logger.info("Cleaned up temporary files at %s", out_dir)
except Exception as e:
- logger.error(f"Error deleting output files: {e}")
+ logger.error("Error deleting output files: %s", e)
- logger.info(f"Compliance reports generation completed. Results: {results}")
+ logger.info("Compliance reports generation completed. Results: %s", results)
return results
@@ -3813,75 +483,24 @@ def generate_compliance_reports_job(
generate_nis2: bool = True,
) -> dict[str, dict[str, bool | str]]:
"""
- Job function to generate ThreatScore, ENS, and/or NIS2 compliance reports with optimized database queries.
-
- This function efficiently generates compliance reports by:
- - Fetching the provider object once (shared across all reports)
- - Aggregating requirement statistics once (shared across all reports)
- - Sharing findings cache between reports to avoid duplicate queries
- - Reducing total database queries by 50-70% compared to generating reports separately
-
- Use this job when you need to generate compliance reports for a scan.
+ Celery task wrapper for generate_compliance_reports.
Args:
- tenant_id (str): The tenant ID for Row-Level Security context.
- scan_id (str): The ID of the scan to generate reports for.
- provider_id (str): The ID of the provider used in the scan.
- generate_threatscore (bool): Whether to generate ThreatScore report. Defaults to True.
- generate_ens (bool): Whether to generate ENS report. Defaults to True.
- generate_nis2 (bool): Whether to generate NIS2 report. Defaults to True.
+ tenant_id: The tenant ID for Row-Level Security context.
+ scan_id: The ID of the scan to generate reports for.
+ provider_id: The ID of the provider used in the scan.
+ generate_threatscore: Whether to generate ThreatScore report.
+ generate_ens: Whether to generate ENS report.
+ generate_nis2: Whether to generate NIS2 report.
Returns:
- dict[str, dict[str, bool | str]]: Dictionary with results for each report:
- {
- 'threatscore': {'upload': bool, 'path': str, 'error': str (optional)},
- 'ens': {'upload': bool, 'path': str, 'error': str (optional)},
- 'nis2': {'upload': bool, 'path': str, 'error': str (optional)}
- }
-
- Example:
- >>> results = generate_compliance_reports_job(
- ... tenant_id="tenant-123",
- ... scan_id="scan-456",
- ... provider_id="provider-789"
- ... )
- >>> if results['threatscore']['upload']:
- ... print(f"ThreatScore uploaded to {results['threatscore']['path']}")
- >>> if results['ens']['upload']:
- ... print(f"ENS uploaded to {results['ens']['path']}")
- >>> if results['nis2']['upload']:
- ... print(f"NIS2 uploaded to {results['nis2']['path']}")
+ Dictionary with results for each report type.
"""
- logger.info(
- f"Starting optimized compliance reports job for scan {scan_id} "
- f"(ThreatScore: {generate_threatscore}, ENS: {generate_ens}, NIS2: {generate_nis2})"
+ return generate_compliance_reports(
+ tenant_id=tenant_id,
+ scan_id=scan_id,
+ provider_id=provider_id,
+ generate_threatscore=generate_threatscore,
+ generate_ens=generate_ens,
+ generate_nis2=generate_nis2,
)
-
- try:
- results = generate_compliance_reports(
- tenant_id=tenant_id,
- scan_id=scan_id,
- provider_id=provider_id,
- generate_threatscore=generate_threatscore,
- generate_ens=generate_ens,
- generate_nis2=generate_nis2,
- only_failed_threatscore=True,
- min_risk_level_threatscore=4,
- include_manual_ens=True,
- include_manual_nis2=False,
- only_failed_nis2=True,
- )
- logger.info("Optimized compliance reports job completed successfully")
- return results
-
- except Exception as e:
- logger.error(f"Error in optimized compliance reports job: {e}")
- error_result = {"upload": False, "path": "", "error": str(e)}
- results = {}
- if generate_threatscore:
- results["threatscore"] = error_result.copy()
- if generate_ens:
- results["ens"] = error_result.copy()
- if generate_nis2:
- results["nis2"] = error_result.copy()
- return results
diff --git a/api/src/backend/tasks/jobs/reports/__init__.py b/api/src/backend/tasks/jobs/reports/__init__.py
new file mode 100644
index 0000000000..60602b93ab
--- /dev/null
+++ b/api/src/backend/tasks/jobs/reports/__init__.py
@@ -0,0 +1,186 @@
+# Base classes and data structures
+from .base import (
+ BaseComplianceReportGenerator,
+ ComplianceData,
+ RequirementData,
+ create_pdf_styles,
+ get_requirement_metadata,
+)
+
+# Chart functions
+from .charts import (
+ create_horizontal_bar_chart,
+ create_pie_chart,
+ create_radar_chart,
+ create_stacked_bar_chart,
+ create_vertical_bar_chart,
+ get_chart_color_for_percentage,
+)
+
+# Reusable components
+# Reusable components: Color helpers, Badge components, Risk component,
+# Table components, Section components
+from .components import (
+ ColumnConfig,
+ create_badge,
+ create_data_table,
+ create_findings_table,
+ create_info_table,
+ create_multi_badge_row,
+ create_risk_component,
+ create_section_header,
+ create_status_badge,
+ create_summary_table,
+ get_color_for_compliance,
+ get_color_for_risk_level,
+ get_color_for_weight,
+ get_status_color,
+)
+
+# Framework configuration: Main configuration, Color constants, ENS colors,
+# NIS2 colors, Chart colors, ENS constants, Section constants, Layout constants
+from .config import (
+ CHART_COLOR_BLUE,
+ CHART_COLOR_GREEN_1,
+ CHART_COLOR_GREEN_2,
+ CHART_COLOR_ORANGE,
+ CHART_COLOR_RED,
+ CHART_COLOR_YELLOW,
+ COL_WIDTH_LARGE,
+ COL_WIDTH_MEDIUM,
+ COL_WIDTH_SMALL,
+ COL_WIDTH_XLARGE,
+ COL_WIDTH_XXLARGE,
+ COLOR_BG_BLUE,
+ COLOR_BG_LIGHT_BLUE,
+ COLOR_BLUE,
+ COLOR_DARK_GRAY,
+ COLOR_ENS_ALTO,
+ COLOR_ENS_BAJO,
+ COLOR_ENS_MEDIO,
+ COLOR_ENS_OPCIONAL,
+ COLOR_GRAY,
+ COLOR_HIGH_RISK,
+ COLOR_LIGHT_BLUE,
+ COLOR_LIGHT_GRAY,
+ COLOR_LIGHTER_BLUE,
+ COLOR_LOW_RISK,
+ COLOR_MEDIUM_RISK,
+ COLOR_NIS2_PRIMARY,
+ COLOR_NIS2_SECONDARY,
+ COLOR_PROWLER_DARK_GREEN,
+ COLOR_SAFE,
+ COLOR_WHITE,
+ DIMENSION_KEYS,
+ DIMENSION_MAPPING,
+ DIMENSION_NAMES,
+ ENS_NIVEL_ORDER,
+ ENS_TIPO_ORDER,
+ FRAMEWORK_REGISTRY,
+ NIS2_SECTION_TITLES,
+ NIS2_SECTIONS,
+ PADDING_LARGE,
+ PADDING_MEDIUM,
+ PADDING_SMALL,
+ PADDING_XLARGE,
+ THREATSCORE_SECTIONS,
+ TIPO_ICONS,
+ FrameworkConfig,
+ get_framework_config,
+)
+
+# Framework-specific generators
+from .ens import ENSReportGenerator
+from .nis2 import NIS2ReportGenerator
+from .threatscore import ThreatScoreReportGenerator
+
+__all__ = [
+ # Base classes
+ "BaseComplianceReportGenerator",
+ "ComplianceData",
+ "RequirementData",
+ "create_pdf_styles",
+ "get_requirement_metadata",
+ # Framework-specific generators
+ "ThreatScoreReportGenerator",
+ "ENSReportGenerator",
+ "NIS2ReportGenerator",
+ # Configuration
+ "FrameworkConfig",
+ "FRAMEWORK_REGISTRY",
+ "get_framework_config",
+ # Color constants
+ "COLOR_BLUE",
+ "COLOR_LIGHT_BLUE",
+ "COLOR_LIGHTER_BLUE",
+ "COLOR_BG_BLUE",
+ "COLOR_BG_LIGHT_BLUE",
+ "COLOR_GRAY",
+ "COLOR_LIGHT_GRAY",
+ "COLOR_DARK_GRAY",
+ "COLOR_WHITE",
+ "COLOR_HIGH_RISK",
+ "COLOR_MEDIUM_RISK",
+ "COLOR_LOW_RISK",
+ "COLOR_SAFE",
+ "COLOR_PROWLER_DARK_GREEN",
+ "COLOR_ENS_ALTO",
+ "COLOR_ENS_MEDIO",
+ "COLOR_ENS_BAJO",
+ "COLOR_ENS_OPCIONAL",
+ "COLOR_NIS2_PRIMARY",
+ "COLOR_NIS2_SECONDARY",
+ "CHART_COLOR_BLUE",
+ "CHART_COLOR_GREEN_1",
+ "CHART_COLOR_GREEN_2",
+ "CHART_COLOR_YELLOW",
+ "CHART_COLOR_ORANGE",
+ "CHART_COLOR_RED",
+ # ENS constants
+ "DIMENSION_MAPPING",
+ "DIMENSION_NAMES",
+ "DIMENSION_KEYS",
+ "ENS_NIVEL_ORDER",
+ "ENS_TIPO_ORDER",
+ "TIPO_ICONS",
+ # Section constants
+ "THREATSCORE_SECTIONS",
+ "NIS2_SECTIONS",
+ "NIS2_SECTION_TITLES",
+ # Layout constants
+ "COL_WIDTH_SMALL",
+ "COL_WIDTH_MEDIUM",
+ "COL_WIDTH_LARGE",
+ "COL_WIDTH_XLARGE",
+ "COL_WIDTH_XXLARGE",
+ "PADDING_SMALL",
+ "PADDING_MEDIUM",
+ "PADDING_LARGE",
+ "PADDING_XLARGE",
+ # Color helpers
+ "get_color_for_risk_level",
+ "get_color_for_weight",
+ "get_color_for_compliance",
+ "get_status_color",
+ # Badge components
+ "create_badge",
+ "create_status_badge",
+ "create_multi_badge_row",
+ # Risk component
+ "create_risk_component",
+ # Table components
+ "create_info_table",
+ "create_data_table",
+ "create_findings_table",
+ "ColumnConfig",
+ # Section components
+ "create_section_header",
+ "create_summary_table",
+ # Chart functions
+ "get_chart_color_for_percentage",
+ "create_vertical_bar_chart",
+ "create_horizontal_bar_chart",
+ "create_radar_chart",
+ "create_pie_chart",
+ "create_stacked_bar_chart",
+]
diff --git a/api/src/backend/tasks/jobs/reports/base.py b/api/src/backend/tasks/jobs/reports/base.py
new file mode 100644
index 0000000000..42776681fb
--- /dev/null
+++ b/api/src/backend/tasks/jobs/reports/base.py
@@ -0,0 +1,911 @@
+import gc
+import os
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from typing import Any
+
+from celery.utils.log import get_task_logger
+from reportlab.lib.enums import TA_CENTER
+from reportlab.lib.pagesizes import letter
+from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
+from reportlab.lib.units import inch
+from reportlab.pdfbase import pdfmetrics
+from reportlab.pdfbase.ttfonts import TTFont
+from reportlab.pdfgen import canvas
+from reportlab.platypus import Image, PageBreak, Paragraph, SimpleDocTemplate, Spacer
+from tasks.jobs.threatscore_utils import (
+ _aggregate_requirement_statistics_from_database,
+ _calculate_requirements_data_from_statistics,
+ _load_findings_for_requirement_checks,
+)
+
+from api.db_router import READ_REPLICA_ALIAS
+from api.db_utils import rls_transaction
+from api.models import Provider, StatusChoices
+from api.utils import initialize_prowler_provider
+from prowler.lib.check.compliance_models import Compliance
+from prowler.lib.outputs.finding import Finding as FindingOutput
+
+from .components import (
+ ColumnConfig,
+ create_data_table,
+ create_info_table,
+ create_status_badge,
+)
+from .config import (
+ COLOR_BG_BLUE,
+ COLOR_BG_LIGHT_BLUE,
+ COLOR_BLUE,
+ COLOR_BORDER_GRAY,
+ COLOR_GRAY,
+ COLOR_LIGHT_BLUE,
+ COLOR_LIGHTER_BLUE,
+ COLOR_PROWLER_DARK_GREEN,
+ PADDING_LARGE,
+ PADDING_SMALL,
+ FrameworkConfig,
+)
+
+logger = get_task_logger(__name__)
+
+# Register fonts (done once at module load)
+_fonts_registered: bool = False
+
+
+def _register_fonts() -> None:
+ """Register custom fonts for PDF generation.
+
+ Uses a module-level flag to ensure fonts are only registered once,
+ avoiding duplicate registration errors from reportlab.
+ """
+ global _fonts_registered
+ if _fonts_registered:
+ return
+
+ fonts_dir = os.path.join(os.path.dirname(__file__), "../../assets/fonts")
+
+ pdfmetrics.registerFont(
+ TTFont(
+ "PlusJakartaSans",
+ os.path.join(fonts_dir, "PlusJakartaSans-Regular.ttf"),
+ )
+ )
+
+ pdfmetrics.registerFont(
+ TTFont(
+ "FiraCode",
+ os.path.join(fonts_dir, "FiraCode-Regular.ttf"),
+ )
+ )
+
+ _fonts_registered = True
+
+
+# =============================================================================
+# Data Classes
+# =============================================================================
+
+
+@dataclass
+class RequirementData:
+ """Data for a single compliance requirement.
+
+ Attributes:
+ id: Requirement identifier
+ description: Requirement description
+ status: Compliance status (PASS, FAIL, MANUAL)
+ passed_findings: Number of passed findings
+ failed_findings: Number of failed findings
+ total_findings: Total number of findings
+ checks: List of check IDs associated with this requirement
+ attributes: Framework-specific requirement attributes
+ """
+
+ id: str
+ description: str
+ status: str
+ passed_findings: int = 0
+ failed_findings: int = 0
+ total_findings: int = 0
+ checks: list[str] = field(default_factory=list)
+ attributes: Any = None
+
+
+@dataclass
+class ComplianceData:
+ """Aggregated compliance data for report generation.
+
+ This dataclass holds all the data needed to generate a compliance report,
+ including compliance framework metadata, requirements, and findings.
+
+ Attributes:
+ tenant_id: Tenant identifier
+ scan_id: Scan identifier
+ provider_id: Provider identifier
+ compliance_id: Compliance framework identifier
+ framework: Framework name (e.g., "CIS", "ENS")
+ name: Full compliance framework name
+ version: Framework version
+ description: Framework description
+ requirements: List of RequirementData objects
+ attributes_by_requirement_id: Mapping of requirement IDs to their attributes
+ findings_by_check_id: Mapping of check IDs to their findings
+ provider_obj: Provider model object
+ prowler_provider: Initialized Prowler provider
+ """
+
+ tenant_id: str
+ scan_id: str
+ provider_id: str
+ compliance_id: str
+ framework: str
+ name: str
+ version: str
+ description: str
+ requirements: list[RequirementData] = field(default_factory=list)
+ attributes_by_requirement_id: dict[str, dict] = field(default_factory=dict)
+ findings_by_check_id: dict[str, list[FindingOutput]] = field(default_factory=dict)
+ provider_obj: Provider | None = None
+ prowler_provider: Any = None
+
+
+def get_requirement_metadata(
+ requirement_id: str,
+ attributes_by_requirement_id: dict[str, dict],
+) -> Any | None:
+ """Get the first requirement metadata object from attributes.
+
+ This helper function extracts the requirement metadata (req_attributes)
+ from the attributes dictionary. It's a common pattern used across all
+ report generators.
+
+ Args:
+ requirement_id: The requirement ID to look up.
+ attributes_by_requirement_id: Mapping of requirement IDs to their attributes.
+
+ Returns:
+ The first requirement attribute object, or None if not found.
+
+ Example:
+ >>> meta = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+ >>> if meta:
+ ... section = getattr(meta, "Section", "Unknown")
+ """
+ req_attrs = attributes_by_requirement_id.get(requirement_id, {})
+ meta_list = req_attrs.get("attributes", {}).get("req_attributes", [])
+ if meta_list:
+ return meta_list[0]
+ return None
+
+
+# =============================================================================
+# PDF Styles Cache
+# =============================================================================
+
+_PDF_STYLES_CACHE: dict[str, ParagraphStyle] | None = None
+
+
+def create_pdf_styles() -> dict[str, ParagraphStyle]:
+ """Create and return PDF paragraph styles used throughout the report.
+
+ Styles are cached on first call to improve performance.
+
+ Returns:
+ Dictionary containing the following styles:
+ - 'title': Title style with prowler green color
+ - 'h1': Heading 1 style with blue color and background
+ - 'h2': Heading 2 style with light blue color
+ - 'h3': Heading 3 style for sub-headings
+ - 'normal': Normal text style with left indent
+ - 'normal_center': Normal text style without indent
+ """
+ global _PDF_STYLES_CACHE
+
+ if _PDF_STYLES_CACHE is not None:
+ return _PDF_STYLES_CACHE
+
+ _register_fonts()
+ styles = getSampleStyleSheet()
+
+ title_style = ParagraphStyle(
+ "CustomTitle",
+ parent=styles["Title"],
+ fontSize=24,
+ textColor=COLOR_PROWLER_DARK_GREEN,
+ spaceAfter=20,
+ fontName="PlusJakartaSans",
+ alignment=TA_CENTER,
+ )
+
+ h1 = ParagraphStyle(
+ "CustomH1",
+ parent=styles["Heading1"],
+ fontSize=18,
+ textColor=COLOR_BLUE,
+ spaceBefore=20,
+ spaceAfter=12,
+ fontName="PlusJakartaSans",
+ leftIndent=0,
+ borderWidth=2,
+ borderColor=COLOR_BLUE,
+ borderPadding=PADDING_LARGE,
+ backColor=COLOR_BG_BLUE,
+ )
+
+ h2 = ParagraphStyle(
+ "CustomH2",
+ parent=styles["Heading2"],
+ fontSize=14,
+ textColor=COLOR_LIGHT_BLUE,
+ spaceBefore=15,
+ spaceAfter=8,
+ fontName="PlusJakartaSans",
+ leftIndent=10,
+ borderWidth=1,
+ borderColor=COLOR_BORDER_GRAY,
+ borderPadding=5,
+ backColor=COLOR_BG_LIGHT_BLUE,
+ )
+
+ h3 = ParagraphStyle(
+ "CustomH3",
+ parent=styles["Heading3"],
+ fontSize=12,
+ textColor=COLOR_LIGHTER_BLUE,
+ spaceBefore=10,
+ spaceAfter=6,
+ fontName="PlusJakartaSans",
+ leftIndent=20,
+ )
+
+ normal = ParagraphStyle(
+ "CustomNormal",
+ parent=styles["Normal"],
+ fontSize=10,
+ textColor=COLOR_GRAY,
+ spaceBefore=PADDING_SMALL,
+ spaceAfter=PADDING_SMALL,
+ leftIndent=30,
+ fontName="PlusJakartaSans",
+ )
+
+ normal_center = ParagraphStyle(
+ "CustomNormalCenter",
+ parent=styles["Normal"],
+ fontSize=10,
+ textColor=COLOR_GRAY,
+ fontName="PlusJakartaSans",
+ )
+
+ _PDF_STYLES_CACHE = {
+ "title": title_style,
+ "h1": h1,
+ "h2": h2,
+ "h3": h3,
+ "normal": normal,
+ "normal_center": normal_center,
+ }
+
+ return _PDF_STYLES_CACHE
+
+
+# =============================================================================
+# Base Report Generator
+# =============================================================================
+
+
+class BaseComplianceReportGenerator(ABC):
+ """Abstract base class for compliance PDF report generators.
+
+ This class implements the Template Method pattern, providing a common
+ structure for all compliance reports while allowing subclasses to
+ customize specific sections.
+
+ Subclasses must implement:
+ - create_executive_summary()
+ - create_charts_section()
+ - create_requirements_index()
+
+ Optionally, subclasses can override:
+ - create_cover_page()
+ - create_detailed_findings()
+ - get_footer_text()
+ """
+
+ def __init__(self, config: FrameworkConfig):
+ """Initialize the report generator.
+
+ Args:
+ config: Framework configuration
+ """
+ self.config = config
+ self.styles = create_pdf_styles()
+
+ # =========================================================================
+ # Template Method
+ # =========================================================================
+
+ def generate(
+ self,
+ tenant_id: str,
+ scan_id: str,
+ compliance_id: str,
+ output_path: str,
+ provider_id: str,
+ provider_obj: Provider | None = None,
+ requirement_statistics: dict[str, dict[str, int]] | None = None,
+ findings_cache: dict[str, list[FindingOutput]] | None = None,
+ **kwargs,
+ ) -> None:
+ """Generate the PDF compliance report.
+
+ This is the template method that orchestrates the report generation.
+ It calls abstract methods that subclasses must implement.
+
+ Args:
+ tenant_id: Tenant identifier for RLS context
+ scan_id: Scan identifier
+ compliance_id: Compliance framework identifier
+ output_path: Path where the PDF will be saved
+ provider_id: Provider identifier
+ provider_obj: Optional pre-fetched Provider object
+ requirement_statistics: Optional pre-aggregated statistics
+ findings_cache: Optional pre-loaded findings cache
+ **kwargs: Additional framework-specific arguments
+ """
+ logger.info(
+ "Generating %s report for scan %s", self.config.display_name, scan_id
+ )
+
+ try:
+ # 1. Load compliance data
+ data = self._load_compliance_data(
+ tenant_id=tenant_id,
+ scan_id=scan_id,
+ compliance_id=compliance_id,
+ provider_id=provider_id,
+ provider_obj=provider_obj,
+ requirement_statistics=requirement_statistics,
+ findings_cache=findings_cache,
+ )
+
+ # 2. Create PDF document
+ doc = self._create_document(output_path, data)
+
+ # 3. Build report elements incrementally to manage memory
+ # We collect garbage after heavy sections to prevent OOM on large reports
+ elements = []
+
+ # Cover page (lightweight)
+ elements.extend(self.create_cover_page(data))
+ elements.append(PageBreak())
+
+ # Executive summary (framework-specific)
+ elements.extend(self.create_executive_summary(data))
+
+ # Body sections (charts + requirements index)
+ # Override _build_body_sections() in subclasses to change section order
+ elements.extend(self._build_body_sections(data))
+
+ # Detailed findings - heaviest section, loads findings on-demand
+ logger.info("Building detailed findings section...")
+ elements.extend(self.create_detailed_findings(data, **kwargs))
+ gc.collect() # Free findings data after processing
+
+ # 4. Build the PDF
+ logger.info("Building PDF document with %d elements...", len(elements))
+ self._build_pdf(doc, elements, data)
+
+ # Final cleanup
+ del elements
+ gc.collect()
+
+ logger.info("Successfully generated report at %s", output_path)
+
+ except Exception as e:
+ import traceback
+
+ tb_lineno = e.__traceback__.tb_lineno if e.__traceback__ else "unknown"
+ logger.error("Error generating report, line %s -- %s", tb_lineno, e)
+ logger.error("Full traceback:\n%s", traceback.format_exc())
+ raise
+
+ def _build_body_sections(self, data: ComplianceData) -> list:
+ """Build the body sections between executive summary and detailed findings.
+
+ Override in subclasses to change section order.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+
+ # Charts section (framework-specific) - heavy on memory due to matplotlib
+ elements.extend(self.create_charts_section(data))
+ elements.append(PageBreak())
+ gc.collect() # Free matplotlib resources
+
+ # Requirements index (framework-specific)
+ elements.extend(self.create_requirements_index(data))
+ elements.append(PageBreak())
+
+ return elements
+
+ # =========================================================================
+ # Abstract Methods (must be implemented by subclasses)
+ # =========================================================================
+
+ @abstractmethod
+ def create_executive_summary(self, data: ComplianceData) -> list:
+ """Create the executive summary section.
+
+ This section typically includes:
+ - Overall compliance score/metrics
+ - High-level statistics
+ - Critical findings summary
+
+ Args:
+ data: Aggregated compliance data
+
+ Returns:
+ List of ReportLab elements
+ """
+
+ @abstractmethod
+ def create_charts_section(self, data: ComplianceData) -> list:
+ """Create the charts and visualizations section.
+
+ This section typically includes:
+ - Compliance score charts by section
+ - Distribution charts
+ - Trend visualizations
+
+ Args:
+ data: Aggregated compliance data
+
+ Returns:
+ List of ReportLab elements
+ """
+
+ @abstractmethod
+ def create_requirements_index(self, data: ComplianceData) -> list:
+ """Create the requirements index/table of contents.
+
+ This section typically includes:
+ - Hierarchical list of requirements
+ - Status indicators
+ - Section groupings
+
+ Args:
+ data: Aggregated compliance data
+
+ Returns:
+ List of ReportLab elements
+ """
+
+ # =========================================================================
+ # Common Methods (can be overridden by subclasses)
+ # =========================================================================
+
+ def create_cover_page(self, data: ComplianceData) -> list:
+ """Create the report cover page.
+
+ Args:
+ data: Aggregated compliance data
+
+ Returns:
+ List of ReportLab elements
+ """
+ elements = []
+
+ # Prowler logo
+ logo_path = os.path.join(
+ os.path.dirname(__file__), "../../assets/img/prowler_logo.png"
+ )
+ if os.path.exists(logo_path):
+ logo = Image(logo_path, width=5 * inch, height=1 * inch)
+ elements.append(logo)
+
+ elements.append(Spacer(1, 0.5 * inch))
+
+ # Title
+ title_text = f"{self.config.display_name} Report"
+ elements.append(Paragraph(title_text, self.styles["title"]))
+ elements.append(Spacer(1, 0.5 * inch))
+
+ # Compliance info table
+ info_rows = self._build_info_rows(data, language=self.config.language)
+
+ info_table = create_info_table(
+ rows=info_rows,
+ label_width=2 * inch,
+ value_width=4 * inch,
+ normal_style=self.styles["normal_center"],
+ )
+ elements.append(info_table)
+
+ return elements
+
+ def _build_info_rows(
+ self, data: ComplianceData, language: str = "en"
+ ) -> list[tuple[str, str]]:
+ """Build the standard info rows for the cover page table.
+
+ This helper method creates the common metadata rows used in all
+ report cover pages. Subclasses can use this to maintain consistency
+ while customizing other aspects of the cover page.
+
+ Args:
+ data: Aggregated compliance data.
+ language: Language for labels ("en" or "es").
+
+ Returns:
+ List of (label, value) tuples for the info table.
+ """
+ # Labels based on language
+ labels = {
+ "en": {
+ "framework": "Framework:",
+ "id": "ID:",
+ "name": "Name:",
+ "version": "Version:",
+ "provider": "Provider:",
+ "account_id": "Account ID:",
+ "alias": "Alias:",
+ "scan_id": "Scan ID:",
+ "description": "Description:",
+ },
+ "es": {
+ "framework": "Framework:",
+ "id": "ID:",
+ "name": "Nombre:",
+ "version": "Versión:",
+ "provider": "Proveedor:",
+ "account_id": "Account ID:",
+ "alias": "Alias:",
+ "scan_id": "Scan ID:",
+ "description": "Descripción:",
+ },
+ }
+ lang_labels = labels.get(language, labels["en"])
+
+ info_rows = [
+ (lang_labels["framework"], data.framework),
+ (lang_labels["id"], data.compliance_id),
+ (lang_labels["name"], data.name),
+ (lang_labels["version"], data.version),
+ ]
+
+ # Add provider info if available
+ if data.provider_obj:
+ info_rows.append(
+ (lang_labels["provider"], data.provider_obj.provider.upper())
+ )
+ info_rows.append(
+ (lang_labels["account_id"], data.provider_obj.uid or "N/A")
+ )
+ info_rows.append((lang_labels["alias"], data.provider_obj.alias or "N/A"))
+
+ info_rows.append((lang_labels["scan_id"], data.scan_id))
+
+ if data.description:
+ info_rows.append((lang_labels["description"], data.description))
+
+ return info_rows
+
+ def create_detailed_findings(self, data: ComplianceData, **kwargs) -> list:
+ """Create the detailed findings section.
+
+ This default implementation creates a requirement-by-requirement
+ breakdown with findings tables. Subclasses can override for
+ framework-specific presentation.
+
+ This method implements on-demand loading of findings using the shared
+ findings cache to minimize database queries and memory usage.
+
+ Args:
+ data: Aggregated compliance data
+ **kwargs: Framework-specific options (e.g., only_failed)
+
+ Returns:
+ List of ReportLab elements
+ """
+ elements = []
+ only_failed = kwargs.get("only_failed", True)
+ include_manual = kwargs.get("include_manual", False)
+
+ # Filter requirements if needed
+ requirements = data.requirements
+ if only_failed:
+ # Include FAIL requirements, and optionally MANUAL if include_manual is True
+ if include_manual:
+ requirements = [
+ r
+ for r in requirements
+ if r.status in (StatusChoices.FAIL, StatusChoices.MANUAL)
+ ]
+ else:
+ requirements = [
+ r for r in requirements if r.status == StatusChoices.FAIL
+ ]
+
+ # Collect all check IDs for requirements that will be displayed
+ # This allows us to load only the findings we actually need (memory optimization)
+ check_ids_to_load = []
+ for req in requirements:
+ check_ids_to_load.extend(req.checks)
+
+ # Load findings on-demand only for the checks that will be displayed
+ # Uses the shared findings cache to avoid duplicate queries across reports
+ logger.info("Loading findings on-demand for %d requirements", len(requirements))
+ findings_by_check_id = _load_findings_for_requirement_checks(
+ data.tenant_id,
+ data.scan_id,
+ check_ids_to_load,
+ data.prowler_provider,
+ data.findings_by_check_id, # Pass the cache to update it
+ )
+
+ for req in requirements:
+ # Requirement header
+ elements.append(
+ Paragraph(
+ f"{req.id}: {req.description}",
+ self.styles["h1"],
+ )
+ )
+
+ # Status badge
+ elements.append(create_status_badge(req.status))
+ elements.append(Spacer(1, 0.1 * inch))
+
+ # Findings for this requirement
+ for check_id in req.checks:
+ elements.append(Paragraph(f"Check: {check_id}", self.styles["h2"]))
+
+ findings = findings_by_check_id.get(check_id, [])
+ if not findings:
+ elements.append(
+ Paragraph(
+ "- No information for this finding currently",
+ self.styles["normal"],
+ )
+ )
+ else:
+ # Create findings table
+ findings_table = self._create_findings_table(findings)
+ elements.append(findings_table)
+
+ elements.append(Spacer(1, 0.1 * inch))
+
+ elements.append(PageBreak())
+
+ return elements
+
+ def get_footer_text(self, page_num: int) -> tuple[str, str]:
+ """Get footer text for a page.
+
+ Args:
+ page_num: Current page number
+
+ Returns:
+ Tuple of (left_text, right_text) for the footer
+ """
+ if self.config.language == "es":
+ page_text = f"Página {page_num}"
+ else:
+ page_text = f"Page {page_num}"
+
+ return page_text, "Powered by Prowler"
+
+ # =========================================================================
+ # Private Helper Methods
+ # =========================================================================
+
+ def _load_compliance_data(
+ self,
+ tenant_id: str,
+ scan_id: str,
+ compliance_id: str,
+ provider_id: str,
+ provider_obj: Provider | None,
+ requirement_statistics: dict | None,
+ findings_cache: dict | None,
+ ) -> ComplianceData:
+ """Load and aggregate compliance data from the database.
+
+ Args:
+ tenant_id: Tenant identifier
+ scan_id: Scan identifier
+ compliance_id: Compliance framework identifier
+ provider_id: Provider identifier
+ provider_obj: Optional pre-fetched Provider
+ requirement_statistics: Optional pre-aggregated statistics
+ findings_cache: Optional pre-loaded findings
+
+ Returns:
+ Aggregated ComplianceData object
+ """
+ with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
+ # Load provider
+ if provider_obj is None:
+ provider_obj = Provider.objects.get(id=provider_id)
+
+ prowler_provider = initialize_prowler_provider(provider_obj)
+ provider_type = provider_obj.provider
+
+ # Load compliance framework
+ frameworks_bulk = Compliance.get_bulk(provider_type)
+ compliance_obj = frameworks_bulk.get(compliance_id)
+
+ if not compliance_obj:
+ raise ValueError(f"Compliance framework not found: {compliance_id}")
+
+ framework = getattr(compliance_obj, "Framework", "N/A")
+ name = getattr(compliance_obj, "Name", "N/A")
+ version = getattr(compliance_obj, "Version", "N/A")
+ description = getattr(compliance_obj, "Description", "")
+
+ # Aggregate requirement statistics
+ if requirement_statistics is None:
+ logger.info("Aggregating requirement statistics for scan %s", scan_id)
+ requirement_statistics = _aggregate_requirement_statistics_from_database(
+ tenant_id, scan_id
+ )
+ else:
+ logger.info("Reusing pre-aggregated statistics for scan %s", scan_id)
+
+ # Calculate requirements data
+ attributes_by_requirement_id, requirements_list = (
+ _calculate_requirements_data_from_statistics(
+ compliance_obj, requirement_statistics
+ )
+ )
+
+ # Convert to RequirementData objects
+ requirements = []
+ for req_dict in requirements_list:
+ req = RequirementData(
+ id=req_dict["id"],
+ description=req_dict["attributes"].get("description", ""),
+ status=req_dict["attributes"].get("status", StatusChoices.MANUAL),
+ passed_findings=req_dict["attributes"].get("passed_findings", 0),
+ failed_findings=req_dict["attributes"].get("failed_findings", 0),
+ total_findings=req_dict["attributes"].get("total_findings", 0),
+ checks=attributes_by_requirement_id.get(req_dict["id"], {})
+ .get("attributes", {})
+ .get("checks", []),
+ )
+ requirements.append(req)
+
+ return ComplianceData(
+ tenant_id=tenant_id,
+ scan_id=scan_id,
+ provider_id=provider_id,
+ compliance_id=compliance_id,
+ framework=framework,
+ name=name,
+ version=version,
+ description=description,
+ requirements=requirements,
+ attributes_by_requirement_id=attributes_by_requirement_id,
+ findings_by_check_id=findings_cache if findings_cache is not None else {},
+ provider_obj=provider_obj,
+ prowler_provider=prowler_provider,
+ )
+
+ def _create_document(
+ self, output_path: str, data: ComplianceData
+ ) -> SimpleDocTemplate:
+ """Create the PDF document template.
+
+ Args:
+ output_path: Path for the output PDF
+ data: Compliance data for metadata
+
+ Returns:
+ Configured SimpleDocTemplate
+ """
+ return SimpleDocTemplate(
+ output_path,
+ pagesize=letter,
+ title=f"{self.config.display_name} Report - {data.framework}",
+ author="Prowler",
+ subject=f"Compliance Report for {data.framework}",
+ creator="Prowler Engineering Team",
+ keywords=f"compliance,{data.framework},security,framework,prowler",
+ )
+
+ def _build_pdf(
+ self,
+ doc: SimpleDocTemplate,
+ elements: list,
+ data: ComplianceData,
+ ) -> None:
+ """Build the final PDF with footers.
+
+ Args:
+ doc: Document template
+ elements: List of ReportLab elements
+ data: Compliance data
+ """
+
+ def add_footer(
+ canvas_obj: canvas.Canvas,
+ doc_template: SimpleDocTemplate,
+ ) -> None:
+ canvas_obj.saveState()
+ width, _ = doc_template.pagesize
+ left_text, right_text = self.get_footer_text(doc_template.page)
+
+ canvas_obj.setFont("PlusJakartaSans", 9)
+ canvas_obj.setFillColorRGB(0.4, 0.4, 0.4)
+ canvas_obj.drawString(30, 20, left_text)
+
+ text_width = canvas_obj.stringWidth(right_text, "PlusJakartaSans", 9)
+ canvas_obj.drawString(width - text_width - 30, 20, right_text)
+ canvas_obj.restoreState()
+
+ doc.build(
+ elements,
+ onFirstPage=add_footer,
+ onLaterPages=add_footer,
+ )
+
+ def _create_findings_table(self, findings: list[FindingOutput]) -> Any:
+ """Create a findings table.
+
+ Args:
+ findings: List of finding objects
+
+ Returns:
+ ReportLab Table element
+ """
+
+ def get_finding_title(f):
+ metadata = getattr(f, "metadata", None)
+ if metadata:
+ return getattr(metadata, "CheckTitle", getattr(f, "check_id", ""))
+ return getattr(f, "check_id", "")
+
+ def get_resource_name(f):
+ name = getattr(f, "resource_name", "")
+ if not name:
+ name = getattr(f, "resource_uid", "")
+ return name
+
+ def get_severity(f):
+ metadata = getattr(f, "metadata", None)
+ if metadata:
+ return getattr(metadata, "Severity", "").capitalize()
+ return ""
+
+ # Convert findings to dicts for the table
+ data = []
+ for f in findings:
+ item = {
+ "title": get_finding_title(f),
+ "resource_name": get_resource_name(f),
+ "severity": get_severity(f),
+ "status": getattr(f, "status", "").upper(),
+ "region": getattr(f, "region", "global"),
+ }
+ data.append(item)
+
+ columns = [
+ ColumnConfig("Finding", 2.5 * inch, "title"),
+ ColumnConfig("Resource", 3 * inch, "resource_name"),
+ ColumnConfig("Severity", 0.9 * inch, "severity"),
+ ColumnConfig("Status", 0.9 * inch, "status"),
+ ColumnConfig("Region", 0.9 * inch, "region"),
+ ]
+
+ return create_data_table(
+ data=data,
+ columns=columns,
+ header_color=self.config.primary_color,
+ normal_style=self.styles["normal_center"],
+ )
diff --git a/api/src/backend/tasks/jobs/reports/charts.py b/api/src/backend/tasks/jobs/reports/charts.py
new file mode 100644
index 0000000000..0f0338acab
--- /dev/null
+++ b/api/src/backend/tasks/jobs/reports/charts.py
@@ -0,0 +1,404 @@
+import gc
+import io
+import math
+from typing import Callable
+
+import matplotlib
+
+# Use non-interactive Agg backend for memory efficiency in server environments
+# This MUST be set before importing pyplot
+matplotlib.use("Agg")
+import matplotlib.pyplot as plt # noqa: E402
+
+from .config import ( # noqa: E402
+ CHART_COLOR_BLUE,
+ CHART_COLOR_GREEN_1,
+ CHART_COLOR_GREEN_2,
+ CHART_COLOR_ORANGE,
+ CHART_COLOR_RED,
+ CHART_COLOR_YELLOW,
+ CHART_DPI_DEFAULT,
+)
+
+# Use centralized DPI setting from config
+DEFAULT_CHART_DPI = CHART_DPI_DEFAULT
+
+
+def get_chart_color_for_percentage(percentage: float) -> str:
+ """Get chart color string based on percentage.
+
+ Args:
+ percentage: Value between 0 and 100
+
+ Returns:
+ Hex color string for matplotlib
+ """
+ if percentage >= 80:
+ return CHART_COLOR_GREEN_1
+ if percentage >= 60:
+ return CHART_COLOR_GREEN_2
+ if percentage >= 40:
+ return CHART_COLOR_YELLOW
+ if percentage >= 20:
+ return CHART_COLOR_ORANGE
+ return CHART_COLOR_RED
+
+
+def create_vertical_bar_chart(
+ labels: list[str],
+ values: list[float],
+ ylabel: str = "Compliance Score (%)",
+ xlabel: str = "Section",
+ title: str | None = None,
+ color_func: Callable[[float], str] | None = None,
+ colors: list[str] | None = None,
+ figsize: tuple[int, int] = (10, 6),
+ dpi: int = DEFAULT_CHART_DPI,
+ y_limit: tuple[float, float] = (0, 100),
+ show_labels: bool = True,
+ rotation: int = 45,
+) -> io.BytesIO:
+ """Create a vertical bar chart.
+
+ Args:
+ labels: X-axis labels
+ values: Bar heights (numeric values)
+ ylabel: Y-axis label
+ xlabel: X-axis label
+ title: Optional chart title
+ color_func: Function to determine bar color based on value
+ colors: Explicit list of colors (overrides color_func)
+ figsize: Figure size (width, height) in inches
+ dpi: Resolution for output image
+ y_limit: Y-axis limits (min, max)
+ show_labels: Whether to show value labels on bars
+ rotation: X-axis label rotation angle
+
+ Returns:
+ BytesIO buffer containing the PNG image
+ """
+ if color_func is None:
+ color_func = get_chart_color_for_percentage
+
+ fig, ax = plt.subplots(figsize=figsize)
+
+ # Determine colors
+ if colors is None:
+ colors_list = [color_func(v) for v in values]
+ else:
+ colors_list = colors
+
+ bars = ax.bar(labels, values, color=colors_list)
+
+ ax.set_ylabel(ylabel, fontsize=12)
+ ax.set_xlabel(xlabel, fontsize=12)
+ ax.set_ylim(*y_limit)
+
+ if title:
+ ax.set_title(title, fontsize=14, fontweight="bold")
+
+ # Add value labels on bars
+ if show_labels:
+ for bar_item, value in zip(bars, values):
+ height = bar_item.get_height()
+ ax.text(
+ bar_item.get_x() + bar_item.get_width() / 2.0,
+ height + 1,
+ f"{value:.1f}%",
+ ha="center",
+ va="bottom",
+ fontweight="bold",
+ )
+
+ plt.xticks(rotation=rotation, ha="right")
+ ax.grid(True, alpha=0.3, axis="y")
+ plt.tight_layout()
+
+ buffer = io.BytesIO()
+ try:
+ fig.savefig(buffer, format="png", dpi=dpi, bbox_inches="tight")
+ buffer.seek(0)
+ finally:
+ plt.close(fig)
+ gc.collect() # Force garbage collection after heavy matplotlib operation
+
+ return buffer
+
+
+def create_horizontal_bar_chart(
+ labels: list[str],
+ values: list[float],
+ xlabel: str = "Compliance (%)",
+ title: str | None = None,
+ color_func: Callable[[float], str] | None = None,
+ colors: list[str] | None = None,
+ figsize: tuple[int, int] | None = None,
+ dpi: int = DEFAULT_CHART_DPI,
+ x_limit: tuple[float, float] = (0, 100),
+ show_labels: bool = True,
+ label_fontsize: int = 16,
+) -> io.BytesIO:
+ """Create a horizontal bar chart.
+
+ Args:
+ labels: Y-axis labels (bar names)
+ values: Bar widths (numeric values)
+ xlabel: X-axis label
+ title: Optional chart title
+ color_func: Function to determine bar color based on value
+ colors: Explicit list of colors (overrides color_func)
+ figsize: Figure size (auto-calculated if None based on label count)
+ dpi: Resolution for output image
+ x_limit: X-axis limits (min, max)
+ show_labels: Whether to show value labels on bars
+ label_fontsize: Font size for y-axis labels
+
+ Returns:
+ BytesIO buffer containing the PNG image
+ """
+ if color_func is None:
+ color_func = get_chart_color_for_percentage
+
+ # Auto-calculate figure size based on number of items
+ if figsize is None:
+ figsize = (10, max(6, int(len(labels) * 0.4)))
+
+ fig, ax = plt.subplots(figsize=figsize)
+
+ # Determine colors
+ if colors is None:
+ colors_list = [color_func(v) for v in values]
+ else:
+ colors_list = colors
+
+ y_pos = range(len(labels))
+ bars = ax.barh(y_pos, values, color=colors_list)
+
+ ax.set_yticks(y_pos)
+ ax.set_yticklabels(labels, fontsize=label_fontsize)
+ ax.set_xlabel(xlabel, fontsize=14)
+ ax.set_xlim(*x_limit)
+
+ if title:
+ ax.set_title(title, fontsize=14, fontweight="bold")
+
+ # Add value labels
+ if show_labels:
+ for bar_item, value in zip(bars, values):
+ width = bar_item.get_width()
+ ax.text(
+ width + 1,
+ bar_item.get_y() + bar_item.get_height() / 2.0,
+ f"{value:.1f}%",
+ ha="left",
+ va="center",
+ fontweight="bold",
+ fontsize=10,
+ )
+
+ ax.grid(True, alpha=0.3, axis="x")
+ plt.tight_layout()
+
+ buffer = io.BytesIO()
+ try:
+ fig.savefig(buffer, format="png", dpi=dpi, bbox_inches="tight")
+ buffer.seek(0)
+ finally:
+ plt.close(fig)
+ gc.collect() # Force garbage collection after heavy matplotlib operation
+
+ return buffer
+
+
+def create_radar_chart(
+ labels: list[str],
+ values: list[float],
+ color: str = CHART_COLOR_BLUE,
+ fill_alpha: float = 0.25,
+ figsize: tuple[int, int] = (8, 8),
+ dpi: int = DEFAULT_CHART_DPI,
+ y_limit: tuple[float, float] = (0, 100),
+ y_ticks: list[int] | None = None,
+ label_fontsize: int = 14,
+ title: str | None = None,
+) -> io.BytesIO:
+ """Create a radar/spider chart.
+
+ Args:
+ labels: Category names around the chart
+ values: Values for each category (should have same length as labels)
+ color: Line and fill color
+ fill_alpha: Transparency of the fill (0-1)
+ figsize: Figure size (width, height) in inches
+ dpi: Resolution for output image
+ y_limit: Radial axis limits (min, max)
+ y_ticks: Custom tick values for radial axis
+ label_fontsize: Font size for category labels
+ title: Optional chart title
+
+ Returns:
+ BytesIO buffer containing the PNG image
+ """
+ num_vars = len(labels)
+ angles = [n / float(num_vars) * 2 * math.pi for n in range(num_vars)]
+
+ # Close the polygon
+ values_closed = list(values) + [values[0]]
+ angles_closed = angles + [angles[0]]
+
+ fig, ax = plt.subplots(figsize=figsize, subplot_kw={"projection": "polar"})
+
+ ax.plot(angles_closed, values_closed, "o-", linewidth=2, color=color)
+ ax.fill(angles_closed, values_closed, alpha=fill_alpha, color=color)
+
+ ax.set_xticks(angles)
+ ax.set_xticklabels(labels, fontsize=label_fontsize)
+ ax.set_ylim(*y_limit)
+
+ if y_ticks is None:
+ y_ticks = [20, 40, 60, 80, 100]
+ ax.set_yticks(y_ticks)
+ ax.set_yticklabels([f"{t}%" for t in y_ticks], fontsize=12)
+
+ ax.grid(True, alpha=0.3)
+
+ if title:
+ ax.set_title(title, fontsize=14, fontweight="bold", y=1.08)
+
+ plt.tight_layout()
+
+ buffer = io.BytesIO()
+ try:
+ fig.savefig(buffer, format="png", dpi=dpi, bbox_inches="tight")
+ buffer.seek(0)
+ finally:
+ plt.close(fig)
+ gc.collect() # Force garbage collection after heavy matplotlib operation
+
+ return buffer
+
+
+def create_pie_chart(
+ labels: list[str],
+ values: list[float],
+ colors: list[str] | None = None,
+ figsize: tuple[int, int] = (6, 6),
+ dpi: int = DEFAULT_CHART_DPI,
+ autopct: str = "%1.1f%%",
+ startangle: int = 90,
+ title: str | None = None,
+) -> io.BytesIO:
+ """Create a pie chart.
+
+ Args:
+ labels: Slice labels
+ values: Slice values
+ colors: Optional list of colors for slices
+ figsize: Figure size (width, height) in inches
+ dpi: Resolution for output image
+ autopct: Format string for percentage labels
+ startangle: Starting angle for first slice
+ title: Optional chart title
+
+ Returns:
+ BytesIO buffer containing the PNG image
+ """
+ fig, ax = plt.subplots(figsize=figsize)
+
+ _, _, autotexts = ax.pie(
+ values,
+ labels=labels,
+ colors=colors,
+ autopct=autopct,
+ startangle=startangle,
+ )
+
+ # Style the text
+ for autotext in autotexts:
+ autotext.set_fontweight("bold")
+
+ if title:
+ ax.set_title(title, fontsize=14, fontweight="bold")
+
+ plt.tight_layout()
+
+ buffer = io.BytesIO()
+ try:
+ fig.savefig(buffer, format="png", dpi=dpi, bbox_inches="tight")
+ buffer.seek(0)
+ finally:
+ plt.close(fig)
+ gc.collect() # Force garbage collection after heavy matplotlib operation
+
+ return buffer
+
+
+def create_stacked_bar_chart(
+ labels: list[str],
+ data_series: dict[str, list[float]],
+ colors: dict[str, str] | None = None,
+ xlabel: str = "",
+ ylabel: str = "Count",
+ title: str | None = None,
+ figsize: tuple[int, int] = (10, 6),
+ dpi: int = DEFAULT_CHART_DPI,
+ rotation: int = 45,
+ show_legend: bool = True,
+) -> io.BytesIO:
+ """Create a stacked bar chart.
+
+ Args:
+ labels: X-axis labels
+ data_series: Dictionary mapping series name to list of values
+ colors: Dictionary mapping series name to color
+ xlabel: X-axis label
+ ylabel: Y-axis label
+ title: Optional chart title
+ figsize: Figure size (width, height) in inches
+ dpi: Resolution for output image
+ rotation: X-axis label rotation angle
+ show_legend: Whether to show the legend
+
+ Returns:
+ BytesIO buffer containing the PNG image
+ """
+ fig, ax = plt.subplots(figsize=figsize)
+
+ # Default colors if not provided
+ default_colors = {
+ "Pass": CHART_COLOR_GREEN_1,
+ "Fail": CHART_COLOR_RED,
+ "Manual": CHART_COLOR_YELLOW,
+ }
+ if colors is None:
+ colors = default_colors
+
+ bottom = [0] * len(labels)
+ for series_name, values in data_series.items():
+ color = colors.get(series_name, CHART_COLOR_BLUE)
+ ax.bar(labels, values, bottom=bottom, label=series_name, color=color)
+ bottom = [b + v for b, v in zip(bottom, values)]
+
+ ax.set_xlabel(xlabel, fontsize=12)
+ ax.set_ylabel(ylabel, fontsize=12)
+
+ if title:
+ ax.set_title(title, fontsize=14, fontweight="bold")
+
+ plt.xticks(rotation=rotation, ha="right")
+
+ if show_legend:
+ ax.legend()
+
+ ax.grid(True, alpha=0.3, axis="y")
+ plt.tight_layout()
+
+ buffer = io.BytesIO()
+ try:
+ fig.savefig(buffer, format="png", dpi=dpi, bbox_inches="tight")
+ buffer.seek(0)
+ finally:
+ plt.close(fig)
+ gc.collect() # Force garbage collection after heavy matplotlib operation
+
+ return buffer
diff --git a/api/src/backend/tasks/jobs/reports/components.py b/api/src/backend/tasks/jobs/reports/components.py
new file mode 100644
index 0000000000..323c4547e6
--- /dev/null
+++ b/api/src/backend/tasks/jobs/reports/components.py
@@ -0,0 +1,599 @@
+from dataclasses import dataclass
+from typing import Any, Callable
+
+from reportlab.lib import colors
+from reportlab.lib.styles import ParagraphStyle
+from reportlab.lib.units import inch
+from reportlab.platypus import LongTable, Paragraph, Spacer, Table, TableStyle
+
+from .config import (
+ ALTERNATE_ROWS_MAX_SIZE,
+ COLOR_BLUE,
+ COLOR_BORDER_GRAY,
+ COLOR_DARK_GRAY,
+ COLOR_GRID_GRAY,
+ COLOR_HIGH_RISK,
+ COLOR_LIGHT_GRAY,
+ COLOR_LOW_RISK,
+ COLOR_MEDIUM_RISK,
+ COLOR_SAFE,
+ COLOR_WHITE,
+ LONG_TABLE_THRESHOLD,
+ PADDING_LARGE,
+ PADDING_MEDIUM,
+ PADDING_SMALL,
+ PADDING_XLARGE,
+)
+
+
+def get_color_for_risk_level(risk_level: int) -> colors.Color:
+ """
+ Get color based on risk level.
+
+ Args:
+ risk_level (int): Numeric risk level (0-5).
+
+ Returns:
+ colors.Color: Appropriate color for the risk level.
+ """
+ if risk_level >= 4:
+ return COLOR_HIGH_RISK
+ if risk_level >= 3:
+ return COLOR_MEDIUM_RISK
+ if risk_level >= 2:
+ return COLOR_LOW_RISK
+ return COLOR_SAFE
+
+
+def get_color_for_weight(weight: int) -> colors.Color:
+ """
+ Get color based on weight value.
+
+ Args:
+ weight (int): Numeric weight value.
+
+ Returns:
+ colors.Color: Appropriate color for the weight.
+ """
+ if weight > 100:
+ return COLOR_HIGH_RISK
+ if weight > 50:
+ return COLOR_LOW_RISK
+ return COLOR_SAFE
+
+
+def get_color_for_compliance(percentage: float) -> colors.Color:
+ """
+ Get color based on compliance percentage.
+
+ Args:
+ percentage (float): Compliance percentage (0-100).
+
+ Returns:
+ colors.Color: Appropriate color for the compliance level.
+ """
+ if percentage >= 80:
+ return COLOR_SAFE
+ if percentage >= 60:
+ return COLOR_LOW_RISK
+ return COLOR_HIGH_RISK
+
+
+def get_status_color(status: str) -> colors.Color:
+ """
+ Get color for a status value.
+
+ Args:
+ status (str): Status string (PASS, FAIL, MANUAL, etc.).
+
+ Returns:
+ colors.Color: Appropriate color for the status.
+ """
+ status_upper = status.upper()
+ if status_upper == "PASS":
+ return COLOR_SAFE
+ if status_upper == "FAIL":
+ return COLOR_HIGH_RISK
+ return COLOR_DARK_GRAY
+
+
+def create_badge(
+ text: str,
+ bg_color: colors.Color,
+ text_color: colors.Color = COLOR_WHITE,
+ width: float = 1.4 * inch,
+ font: str = "FiraCode",
+ font_size: int = 11,
+) -> Table:
+ """
+ Create a generic colored badge component.
+
+ Args:
+ text (str): Text to display in the badge.
+ bg_color (colors.Color): Background color.
+ text_color (colors.Color): Text color (default white).
+ width (float): Badge width in inches.
+ font (str): Font name to use.
+ font_size (int): Font size.
+
+ Returns:
+ Table: A Table object styled as a badge.
+ """
+ data = [[text]]
+ table = Table(data, colWidths=[width])
+
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), bg_color),
+ ("TEXTCOLOR", (0, 0), (0, 0), text_color),
+ ("FONTNAME", (0, 0), (0, 0), font),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("FONTSIZE", (0, 0), (-1, -1), font_size),
+ ("GRID", (0, 0), (-1, -1), 0.5, colors.black),
+ ("LEFTPADDING", (0, 0), (-1, -1), PADDING_LARGE),
+ ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_LARGE),
+ ("TOPPADDING", (0, 0), (-1, -1), PADDING_LARGE),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_LARGE),
+ ]
+ )
+ )
+
+ return table
+
+
+def create_status_badge(status: str) -> Table:
+ """
+ Create a PASS/FAIL/MANUAL status badge.
+
+ Args:
+ status (str): Status value (e.g., "PASS", "FAIL", "MANUAL").
+
+ Returns:
+ Table: A styled Table badge for the status.
+ """
+ status_upper = status.upper()
+ status_color = get_status_color(status_upper)
+
+ data = [["State:", status_upper]]
+ table = Table(data, colWidths=[0.6 * inch, 0.8 * inch])
+
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), COLOR_LIGHT_GRAY),
+ ("FONTNAME", (0, 0), (0, 0), "PlusJakartaSans"),
+ ("BACKGROUND", (1, 0), (1, 0), status_color),
+ ("TEXTCOLOR", (1, 0), (1, 0), COLOR_WHITE),
+ ("FONTNAME", (1, 0), (1, 0), "FiraCode"),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("FONTSIZE", (0, 0), (-1, -1), 12),
+ ("GRID", (0, 0), (-1, -1), 0.5, colors.black),
+ ("LEFTPADDING", (0, 0), (-1, -1), PADDING_LARGE),
+ ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_LARGE),
+ ("TOPPADDING", (0, 0), (-1, -1), PADDING_XLARGE),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_XLARGE),
+ ]
+ )
+ )
+
+ return table
+
+
+def create_multi_badge_row(
+ badges: list[tuple[str, colors.Color]],
+ badge_width: float = 0.4 * inch,
+ font: str = "FiraCode",
+) -> Table:
+ """
+ Create a row of multiple small badges.
+
+ Args:
+ badges (list[tuple[str, colors.Color]]): List of (text, color) tuples for each badge.
+ badge_width (float): Width of each badge.
+ font (str): Font name to use.
+
+ Returns:
+ Table: A Table with multiple colored badges in a row.
+ """
+ if not badges:
+ data = [["N/A"]]
+ table = Table(data, colWidths=[1 * inch])
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), COLOR_LIGHT_GRAY),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("FONTSIZE", (0, 0), (-1, -1), 10),
+ ]
+ )
+ )
+ return table
+
+ data = [[text for text, _ in badges]]
+ col_widths = [badge_width] * len(badges)
+ table = Table(data, colWidths=col_widths)
+
+ styles = [
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("FONTNAME", (0, 0), (-1, -1), font),
+ ("FONTSIZE", (0, 0), (-1, -1), 10),
+ ("TEXTCOLOR", (0, 0), (-1, -1), COLOR_WHITE),
+ ("GRID", (0, 0), (-1, -1), 0.5, colors.black),
+ ("LEFTPADDING", (0, 0), (-1, -1), PADDING_SMALL),
+ ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_SMALL),
+ ("TOPPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
+ ]
+
+ for idx, (_, badge_color) in enumerate(badges):
+ styles.append(("BACKGROUND", (idx, 0), (idx, 0), badge_color))
+
+ table.setStyle(TableStyle(styles))
+ return table
+
+
+def create_risk_component(
+ risk_level: int,
+ weight: int,
+ score: int = 0,
+) -> Table:
+ """
+ Create a visual risk component showing risk level, weight, and score.
+
+ Args:
+ risk_level (int): The risk level (0-5).
+ weight (int): The weight value.
+ score (int): The calculated score (default 0).
+
+ Returns:
+ Table: A styled Table showing risk metrics.
+ """
+ risk_color = get_color_for_risk_level(risk_level)
+ weight_color = get_color_for_weight(weight)
+
+ data = [
+ [
+ "Risk Level:",
+ str(risk_level),
+ "Weight:",
+ str(weight),
+ "Score:",
+ str(score),
+ ]
+ ]
+
+ table = Table(
+ data,
+ colWidths=[
+ 0.8 * inch,
+ 0.4 * inch,
+ 0.6 * inch,
+ 0.4 * inch,
+ 0.5 * inch,
+ 0.4 * inch,
+ ],
+ )
+
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), COLOR_LIGHT_GRAY),
+ ("BACKGROUND", (1, 0), (1, 0), risk_color),
+ ("TEXTCOLOR", (1, 0), (1, 0), COLOR_WHITE),
+ ("FONTNAME", (1, 0), (1, 0), "FiraCode"),
+ ("BACKGROUND", (2, 0), (2, 0), COLOR_LIGHT_GRAY),
+ ("BACKGROUND", (3, 0), (3, 0), weight_color),
+ ("TEXTCOLOR", (3, 0), (3, 0), COLOR_WHITE),
+ ("FONTNAME", (3, 0), (3, 0), "FiraCode"),
+ ("BACKGROUND", (4, 0), (4, 0), COLOR_LIGHT_GRAY),
+ ("BACKGROUND", (5, 0), (5, 0), COLOR_DARK_GRAY),
+ ("TEXTCOLOR", (5, 0), (5, 0), COLOR_WHITE),
+ ("FONTNAME", (5, 0), (5, 0), "FiraCode"),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("FONTSIZE", (0, 0), (-1, -1), 10),
+ ("GRID", (0, 0), (-1, -1), 0.5, colors.black),
+ ("LEFTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
+ ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
+ ("TOPPADDING", (0, 0), (-1, -1), PADDING_LARGE),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_LARGE),
+ ]
+ )
+ )
+
+ return table
+
+
+def create_info_table(
+ rows: list[tuple[str, Any]],
+ label_width: float = 2 * inch,
+ value_width: float = 4 * inch,
+ label_color: colors.Color = COLOR_BLUE,
+ value_bg_color: colors.Color | None = None,
+ normal_style: ParagraphStyle | None = None,
+) -> Table:
+ """
+ Create a key-value information table.
+
+ Args:
+ rows (list[tuple[str, Any]]): List of (label, value) tuples.
+ label_width (float): Width of the label column.
+ value_width (float): Width of the value column.
+ label_color (colors.Color): Background color for labels.
+ value_bg_color (colors.Color | None): Background color for values (optional).
+ normal_style (ParagraphStyle | None): ParagraphStyle for wrapping long values.
+
+ Returns:
+ Table: A styled Table with key-value pairs.
+ """
+ from .config import COLOR_BG_BLUE
+
+ if value_bg_color is None:
+ value_bg_color = COLOR_BG_BLUE
+
+ # Handle empty rows case - Table requires at least one row
+ if not rows:
+ table = Table([["", ""]], colWidths=[label_width, value_width])
+ table.setStyle(TableStyle([("FONTSIZE", (0, 0), (-1, -1), 0)]))
+ return table
+
+ # Process rows - wrap long values in Paragraph if style provided
+ table_data = []
+ for label, value in rows:
+ if normal_style and isinstance(value, str) and len(value) > 50:
+ value = Paragraph(value, normal_style)
+ table_data.append([label, value])
+
+ table = Table(table_data, colWidths=[label_width, value_width])
+
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, -1), label_color),
+ ("TEXTCOLOR", (0, 0), (0, -1), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (0, -1), "FiraCode"),
+ ("BACKGROUND", (1, 0), (1, -1), value_bg_color),
+ ("TEXTCOLOR", (1, 0), (1, -1), COLOR_DARK_GRAY),
+ ("FONTNAME", (1, 0), (1, -1), "PlusJakartaSans"),
+ ("ALIGN", (0, 0), (-1, -1), "LEFT"),
+ ("VALIGN", (0, 0), (-1, -1), "TOP"),
+ ("FONTSIZE", (0, 0), (-1, -1), 11),
+ ("GRID", (0, 0), (-1, -1), 1, COLOR_BORDER_GRAY),
+ ("LEFTPADDING", (0, 0), (-1, -1), PADDING_XLARGE),
+ ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_XLARGE),
+ ("TOPPADDING", (0, 0), (-1, -1), PADDING_LARGE),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_LARGE),
+ ]
+ )
+ )
+
+ return table
+
+
+@dataclass
+class ColumnConfig:
+ """
+ Configuration for a table column.
+
+ Attributes:
+ header (str): Column header text.
+ width (float): Column width in inches.
+ field (str | Callable[[Any], str]): Field name or callable to extract value from data.
+ align (str): Text alignment (LEFT, CENTER, RIGHT).
+ """
+
+ header: str
+ width: float
+ field: str | Callable[[Any], str]
+ align: str = "CENTER"
+
+
+def create_data_table(
+ data: list[dict[str, Any]],
+ columns: list[ColumnConfig],
+ header_color: colors.Color = COLOR_BLUE,
+ alternate_rows: bool = True,
+ normal_style: ParagraphStyle | None = None,
+) -> Table | LongTable:
+ """
+ Create a data table with configurable columns.
+
+ Uses LongTable for large datasets (>50 rows) for better memory efficiency
+ and page splitting. LongTable repeats headers on each page and has
+ optimized memory handling for large tables.
+
+ Args:
+ data (list[dict[str, Any]]): List of data dictionaries.
+ columns (list[ColumnConfig]): Column configuration list.
+ header_color (colors.Color): Background color for header row.
+ alternate_rows (bool): Whether to alternate row backgrounds.
+ normal_style (ParagraphStyle | None): ParagraphStyle for cell values.
+
+ Returns:
+ Table or LongTable: A styled table with data.
+ """
+ # Build header row
+ header_row = [col.header for col in columns]
+ table_data = [header_row]
+
+ # Build data rows
+ for item in data:
+ row = []
+ for col in columns:
+ if callable(col.field):
+ value = col.field(item)
+ else:
+ value = item.get(col.field, "")
+
+ if normal_style and isinstance(value, str):
+ value = Paragraph(value, normal_style)
+ row.append(value)
+ table_data.append(row)
+
+ col_widths = [col.width for col in columns]
+
+ # Use LongTable for large datasets - it handles page breaks better
+ # and has optimized memory handling for tables with many rows
+ use_long_table = len(data) > LONG_TABLE_THRESHOLD
+ if use_long_table:
+ table = LongTable(table_data, colWidths=col_widths, repeatRows=1)
+ else:
+ table = Table(table_data, colWidths=col_widths)
+
+ styles = [
+ ("BACKGROUND", (0, 0), (-1, 0), header_color),
+ ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
+ ("FONTSIZE", (0, 0), (-1, 0), 10),
+ ("FONTSIZE", (0, 1), (-1, -1), 9),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("GRID", (0, 0), (-1, -1), 1, COLOR_GRID_GRAY),
+ ("LEFTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
+ ("RIGHTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
+ ("TOPPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
+ ]
+
+ # Apply column alignments
+ for idx, col in enumerate(columns):
+ styles.append(("ALIGN", (idx, 0), (idx, -1), col.align))
+
+ # Alternate row backgrounds - skip for very large tables as it adds memory overhead
+ if (
+ alternate_rows
+ and len(table_data) > 1
+ and len(table_data) <= ALTERNATE_ROWS_MAX_SIZE
+ ):
+ for i in range(1, len(table_data)):
+ if i % 2 == 0:
+ styles.append(
+ ("BACKGROUND", (0, i), (-1, i), colors.Color(0.98, 0.98, 0.98))
+ )
+
+ table.setStyle(TableStyle(styles))
+ return table
+
+
+def create_findings_table(
+ findings: list[Any],
+ columns: list[ColumnConfig] | None = None,
+ header_color: colors.Color = COLOR_BLUE,
+ normal_style: ParagraphStyle | None = None,
+) -> Table:
+ """
+ Create a findings table with default or custom columns.
+
+ Args:
+ findings (list[Any]): List of finding objects.
+ columns (list[ColumnConfig] | None): Optional column configuration (defaults to standard columns).
+ header_color (colors.Color): Background color for header row.
+ normal_style (ParagraphStyle | None): ParagraphStyle for cell values.
+
+ Returns:
+ Table: A styled Table with findings data.
+ """
+ if columns is None:
+ columns = [
+ ColumnConfig("Finding", 2.5 * inch, "title"),
+ ColumnConfig("Resource", 3 * inch, "resource_name"),
+ ColumnConfig("Severity", 0.9 * inch, "severity"),
+ ColumnConfig("Status", 0.9 * inch, "status"),
+ ColumnConfig("Region", 0.9 * inch, "region"),
+ ]
+
+ # Convert findings to dicts
+ data = []
+ for finding in findings:
+ item = {}
+ for col in columns:
+ if callable(col.field):
+ item[col.header.lower()] = col.field(finding)
+ elif hasattr(finding, col.field):
+ item[col.field] = getattr(finding, col.field, "")
+ elif isinstance(finding, dict):
+ item[col.field] = finding.get(col.field, "")
+ data.append(item)
+
+ return create_data_table(
+ data=data,
+ columns=columns,
+ header_color=header_color,
+ alternate_rows=True,
+ normal_style=normal_style,
+ )
+
+
+def create_section_header(
+ text: str,
+ style: ParagraphStyle,
+ add_spacer: bool = True,
+ spacer_height: float = 0.2,
+) -> list:
+ """
+ Create a section header with optional spacer.
+
+ Args:
+ text (str): Header text.
+ style (ParagraphStyle): ParagraphStyle to apply.
+ add_spacer (bool): Whether to add a spacer after the header.
+ spacer_height (float): Height of the spacer in inches.
+
+ Returns:
+ list: List of elements (Paragraph and optional Spacer).
+ """
+ elements = [Paragraph(text, style)]
+ if add_spacer:
+ elements.append(Spacer(1, spacer_height * inch))
+ return elements
+
+
+def create_summary_table(
+ label: str,
+ value: str,
+ value_color: colors.Color,
+ label_width: float = 2.5 * inch,
+ value_width: float = 2 * inch,
+) -> Table:
+ """
+ Create a summary metric table (e.g., for ThreatScore display).
+
+ Args:
+ label (str): Label text (e.g., "ThreatScore:").
+ value (str): Value text (e.g., "85.5%").
+ value_color (colors.Color): Background color for the value cell.
+ label_width (float): Width of the label column.
+ value_width (float): Width of the value column.
+
+ Returns:
+ Table: A styled summary Table.
+ """
+ data = [[label, value]]
+ table = Table(data, colWidths=[label_width, value_width])
+
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), colors.Color(0.1, 0.3, 0.5)),
+ ("TEXTCOLOR", (0, 0), (0, 0), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (0, 0), "FiraCode"),
+ ("FONTSIZE", (0, 0), (0, 0), 12),
+ ("BACKGROUND", (1, 0), (1, 0), value_color),
+ ("TEXTCOLOR", (1, 0), (1, 0), COLOR_WHITE),
+ ("FONTNAME", (1, 0), (1, 0), "FiraCode"),
+ ("FONTSIZE", (1, 0), (1, 0), 16),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("GRID", (0, 0), (-1, -1), 1.5, colors.Color(0.5, 0.6, 0.7)),
+ ("LEFTPADDING", (0, 0), (-1, -1), 12),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 12),
+ ("TOPPADDING", (0, 0), (-1, -1), 10),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 10),
+ ]
+ )
+ )
+
+ return table
diff --git a/api/src/backend/tasks/jobs/reports/config.py b/api/src/backend/tasks/jobs/reports/config.py
new file mode 100644
index 0000000000..0785505820
--- /dev/null
+++ b/api/src/backend/tasks/jobs/reports/config.py
@@ -0,0 +1,286 @@
+from dataclasses import dataclass, field
+
+from reportlab.lib import colors
+from reportlab.lib.units import inch
+
+# =============================================================================
+# Performance & Memory Optimization Settings
+# =============================================================================
+# These settings control memory usage and performance for large reports.
+# Adjust these values if workers are running out of memory.
+
+# Chart settings - lower DPI = less memory, 150 is good quality for PDF
+CHART_DPI_DEFAULT = 150
+
+# LongTable threshold - use LongTable for tables with more rows than this
+# LongTable handles page breaks better and has optimized memory for large tables
+LONG_TABLE_THRESHOLD = 50
+
+# Skip alternating row colors for tables larger than this (reduces memory)
+ALTERNATE_ROWS_MAX_SIZE = 200
+
+# Database query batch size for findings (matches Django settings)
+# Larger = fewer queries but more memory per batch
+FINDINGS_BATCH_SIZE = 2000
+
+
+# =============================================================================
+# Base colors
+# =============================================================================
+COLOR_PROWLER_DARK_GREEN = colors.Color(0.1, 0.5, 0.2)
+COLOR_BLUE = colors.Color(0.2, 0.4, 0.6)
+COLOR_LIGHT_BLUE = colors.Color(0.3, 0.5, 0.7)
+COLOR_LIGHTER_BLUE = colors.Color(0.4, 0.6, 0.8)
+COLOR_BG_BLUE = colors.Color(0.95, 0.97, 1.0)
+COLOR_BG_LIGHT_BLUE = colors.Color(0.98, 0.99, 1.0)
+COLOR_GRAY = colors.Color(0.2, 0.2, 0.2)
+COLOR_LIGHT_GRAY = colors.Color(0.9, 0.9, 0.9)
+COLOR_BORDER_GRAY = colors.Color(0.7, 0.8, 0.9)
+COLOR_GRID_GRAY = colors.Color(0.7, 0.7, 0.7)
+COLOR_DARK_GRAY = colors.Color(0.4, 0.4, 0.4)
+COLOR_HEADER_DARK = colors.Color(0.1, 0.3, 0.5)
+COLOR_HEADER_MEDIUM = colors.Color(0.15, 0.35, 0.55)
+COLOR_WHITE = colors.white
+
+# Risk and status colors
+COLOR_HIGH_RISK = colors.Color(0.8, 0.2, 0.2)
+COLOR_MEDIUM_RISK = colors.Color(0.9, 0.6, 0.2)
+COLOR_LOW_RISK = colors.Color(0.9, 0.9, 0.2)
+COLOR_SAFE = colors.Color(0.2, 0.8, 0.2)
+
+# ENS specific colors
+COLOR_ENS_ALTO = colors.Color(0.8, 0.2, 0.2)
+COLOR_ENS_MEDIO = colors.Color(0.98, 0.75, 0.13)
+COLOR_ENS_BAJO = colors.Color(0.06, 0.72, 0.51)
+COLOR_ENS_OPCIONAL = colors.Color(0.42, 0.45, 0.50)
+COLOR_ENS_TIPO = colors.Color(0.2, 0.4, 0.6)
+COLOR_ENS_AUTO = colors.Color(0.30, 0.69, 0.31)
+COLOR_ENS_MANUAL = colors.Color(0.96, 0.60, 0.0)
+
+# NIS2 specific colors
+COLOR_NIS2_PRIMARY = colors.Color(0.12, 0.23, 0.54)
+COLOR_NIS2_SECONDARY = colors.Color(0.23, 0.51, 0.96)
+COLOR_NIS2_BG_BLUE = colors.Color(0.96, 0.97, 0.99)
+
+# Chart colors (hex strings for matplotlib)
+CHART_COLOR_GREEN_1 = "#4CAF50"
+CHART_COLOR_GREEN_2 = "#8BC34A"
+CHART_COLOR_YELLOW = "#FFEB3B"
+CHART_COLOR_ORANGE = "#FF9800"
+CHART_COLOR_RED = "#F44336"
+CHART_COLOR_BLUE = "#2196F3"
+
+# ENS dimension mappings: dimension name -> (abbreviation, color)
+DIMENSION_MAPPING = {
+ "trazabilidad": ("T", colors.Color(0.26, 0.52, 0.96)),
+ "autenticidad": ("A", colors.Color(0.30, 0.69, 0.31)),
+ "integridad": ("I", colors.Color(0.61, 0.15, 0.69)),
+ "confidencialidad": ("C", colors.Color(0.96, 0.26, 0.21)),
+ "disponibilidad": ("D", colors.Color(1.0, 0.60, 0.0)),
+}
+
+# ENS tipo icons
+TIPO_ICONS = {
+ "requisito": "\u26a0\ufe0f",
+ "refuerzo": "\U0001f6e1\ufe0f",
+ "recomendacion": "\U0001f4a1",
+ "medida": "\U0001f4cb",
+}
+
+# Dimension names for charts (Spanish)
+DIMENSION_NAMES = [
+ "Trazabilidad",
+ "Autenticidad",
+ "Integridad",
+ "Confidencialidad",
+ "Disponibilidad",
+]
+
+DIMENSION_KEYS = [
+ "trazabilidad",
+ "autenticidad",
+ "integridad",
+ "confidencialidad",
+ "disponibilidad",
+]
+
+# ENS nivel and tipo order
+ENS_NIVEL_ORDER = ["alto", "medio", "bajo", "opcional"]
+ENS_TIPO_ORDER = ["requisito", "refuerzo", "recomendacion", "medida"]
+
+# ThreatScore sections
+THREATSCORE_SECTIONS = [
+ "1. IAM",
+ "2. Attack Surface",
+ "3. Logging and Monitoring",
+ "4. Encryption",
+]
+
+# NIS2 sections
+NIS2_SECTIONS = [
+ "1",
+ "2",
+ "3",
+ "4",
+ "5",
+ "6",
+ "7",
+ "9",
+ "11",
+ "12",
+]
+
+NIS2_SECTION_TITLES = {
+ "1": "1. Policy on Security",
+ "2": "2. Risk Management",
+ "3": "3. Incident Handling",
+ "4": "4. Business Continuity",
+ "5": "5. Supply Chain",
+ "6": "6. Acquisition & Dev",
+ "7": "7. Effectiveness",
+ "9": "9. Cryptography",
+ "11": "11. Access Control",
+ "12": "12. Asset Management",
+}
+
+# Table column widths
+COL_WIDTH_SMALL = 0.4 * inch
+COL_WIDTH_MEDIUM = 0.9 * inch
+COL_WIDTH_LARGE = 1.5 * inch
+COL_WIDTH_XLARGE = 2 * inch
+COL_WIDTH_XXLARGE = 3 * inch
+
+# Common padding values
+PADDING_SMALL = 4
+PADDING_MEDIUM = 6
+PADDING_LARGE = 8
+PADDING_XLARGE = 10
+
+
+@dataclass
+class FrameworkConfig:
+ """
+ Configuration for a compliance framework PDF report.
+
+ This dataclass defines all the configurable aspects of a compliance framework
+ report, including visual styling, metadata fields, and feature flags.
+
+ Attributes:
+ name (str): Internal framework identifier (e.g., "prowler_threatscore").
+ display_name (str): Human-readable framework name for the report title.
+ logo_filename (str | None): Optional filename of the framework logo in assets/img/.
+ primary_color (colors.Color): Main color used for headers and important elements.
+ secondary_color (colors.Color): Secondary color for sub-headers and accents.
+ bg_color (colors.Color): Background color for highlighted sections.
+ attribute_fields (list[str]): List of metadata field names to extract from requirements.
+ sections (list[str] | None): Optional ordered list of section names for grouping.
+ language (str): Report language ("en" for English, "es" for Spanish).
+ has_risk_levels (bool): Whether the framework uses numeric risk levels.
+ has_dimensions (bool): Whether the framework uses security dimensions (ENS).
+ has_niveles (bool): Whether the framework uses nivel classification (ENS).
+ has_weight (bool): Whether requirements have weight values.
+ """
+
+ name: str
+ display_name: str
+ logo_filename: str | None = None
+ primary_color: colors.Color = field(default_factory=lambda: COLOR_BLUE)
+ secondary_color: colors.Color = field(default_factory=lambda: COLOR_LIGHT_BLUE)
+ bg_color: colors.Color = field(default_factory=lambda: COLOR_BG_BLUE)
+ attribute_fields: list[str] = field(default_factory=list)
+ sections: list[str] | None = None
+ language: str = "en"
+ has_risk_levels: bool = False
+ has_dimensions: bool = False
+ has_niveles: bool = False
+ has_weight: bool = False
+
+
+FRAMEWORK_REGISTRY: dict[str, FrameworkConfig] = {
+ "prowler_threatscore": FrameworkConfig(
+ name="prowler_threatscore",
+ display_name="Prowler ThreatScore",
+ logo_filename=None,
+ primary_color=COLOR_BLUE,
+ secondary_color=COLOR_LIGHT_BLUE,
+ bg_color=COLOR_BG_BLUE,
+ attribute_fields=[
+ "Title",
+ "Section",
+ "SubSection",
+ "LevelOfRisk",
+ "Weight",
+ "AttributeDescription",
+ "AdditionalInformation",
+ ],
+ sections=THREATSCORE_SECTIONS,
+ language="en",
+ has_risk_levels=True,
+ has_weight=True,
+ ),
+ "ens": FrameworkConfig(
+ name="ens",
+ display_name="ENS RD2022",
+ logo_filename="ens_logo.png",
+ primary_color=COLOR_ENS_ALTO,
+ secondary_color=COLOR_ENS_MEDIO,
+ bg_color=COLOR_BG_BLUE,
+ attribute_fields=[
+ "IdGrupoControl",
+ "Marco",
+ "Categoria",
+ "DescripcionControl",
+ "Tipo",
+ "Nivel",
+ "Dimensiones",
+ "ModoEjecucion",
+ ],
+ sections=None,
+ language="es",
+ has_risk_levels=False,
+ has_dimensions=True,
+ has_niveles=True,
+ has_weight=False,
+ ),
+ "nis2": FrameworkConfig(
+ name="nis2",
+ display_name="NIS2 Directive",
+ logo_filename="nis2_logo.png",
+ primary_color=COLOR_NIS2_PRIMARY,
+ secondary_color=COLOR_NIS2_SECONDARY,
+ bg_color=COLOR_NIS2_BG_BLUE,
+ attribute_fields=[
+ "Section",
+ "SubSection",
+ "Description",
+ ],
+ sections=NIS2_SECTIONS,
+ language="en",
+ has_risk_levels=False,
+ has_dimensions=False,
+ has_niveles=False,
+ has_weight=False,
+ ),
+}
+
+
+def get_framework_config(compliance_id: str) -> FrameworkConfig | None:
+ """
+ Get framework configuration based on compliance ID.
+
+ Args:
+ compliance_id (str): The compliance framework identifier (e.g., "prowler_threatscore_aws").
+
+ Returns:
+ FrameworkConfig | None: The framework configuration if found, None otherwise.
+ """
+ compliance_lower = compliance_id.lower()
+
+ if "threatscore" in compliance_lower:
+ return FRAMEWORK_REGISTRY["prowler_threatscore"]
+ if "ens" in compliance_lower:
+ return FRAMEWORK_REGISTRY["ens"]
+ if "nis2" in compliance_lower:
+ return FRAMEWORK_REGISTRY["nis2"]
+
+ return None
diff --git a/api/src/backend/tasks/jobs/reports/ens.py b/api/src/backend/tasks/jobs/reports/ens.py
new file mode 100644
index 0000000000..56ee4bc40f
--- /dev/null
+++ b/api/src/backend/tasks/jobs/reports/ens.py
@@ -0,0 +1,1004 @@
+import os
+from collections import defaultdict
+
+from reportlab.lib import colors
+from reportlab.lib.styles import ParagraphStyle
+from reportlab.lib.units import inch
+from reportlab.platypus import Image, PageBreak, Paragraph, Spacer, Table, TableStyle
+
+from api.models import StatusChoices
+
+from .base import (
+ BaseComplianceReportGenerator,
+ ComplianceData,
+ get_requirement_metadata,
+)
+from .charts import create_horizontal_bar_chart, create_radar_chart
+from .components import get_color_for_compliance
+from .config import (
+ COLOR_BG_BLUE,
+ COLOR_BLUE,
+ COLOR_BORDER_GRAY,
+ COLOR_ENS_ALTO,
+ COLOR_ENS_AUTO,
+ COLOR_ENS_BAJO,
+ COLOR_ENS_MANUAL,
+ COLOR_ENS_MEDIO,
+ COLOR_ENS_OPCIONAL,
+ COLOR_ENS_TIPO,
+ COLOR_GRAY,
+ COLOR_GRID_GRAY,
+ COLOR_HIGH_RISK,
+ COLOR_SAFE,
+ COLOR_WHITE,
+ DIMENSION_KEYS,
+ DIMENSION_MAPPING,
+ DIMENSION_NAMES,
+ ENS_NIVEL_ORDER,
+ ENS_TIPO_ORDER,
+)
+
+
+class ENSReportGenerator(BaseComplianceReportGenerator):
+ """
+ PDF report generator for ENS RD2022 framework.
+
+ This generator creates comprehensive PDF reports containing:
+ - Cover page with both Prowler and ENS logos
+ - Executive summary with overall compliance score
+ - Marco/Categoría analysis with charts
+ - Security dimensions radar chart
+ - Requirement type distribution
+ - Execution mode distribution
+ - Critical failed requirements (nivel alto)
+ - Requirements index
+ - Detailed findings for failed and manual requirements
+ """
+
+ def create_cover_page(self, data: ComplianceData) -> list:
+ """
+ Create the ENS report cover page with both logos and legend.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+
+ # Create logos side by side
+ prowler_logo_path = os.path.join(
+ os.path.dirname(__file__), "../../assets/img/prowler_logo.png"
+ )
+ ens_logo_path = os.path.join(
+ os.path.dirname(__file__), "../../assets/img/ens_logo.png"
+ )
+
+ prowler_logo = Image(prowler_logo_path, width=3.5 * inch, height=0.7 * inch)
+ ens_logo = Image(ens_logo_path, width=1.5 * inch, height=2 * inch)
+
+ logos_table = Table(
+ [[prowler_logo, ens_logo]], colWidths=[4 * inch, 2.5 * inch]
+ )
+ logos_table.setStyle(
+ TableStyle(
+ [
+ ("ALIGN", (0, 0), (0, 0), "LEFT"),
+ ("ALIGN", (1, 0), (1, 0), "RIGHT"),
+ ("VALIGN", (0, 0), (0, 0), "MIDDLE"),
+ ("VALIGN", (1, 0), (1, 0), "TOP"),
+ ]
+ )
+ )
+ elements.append(logos_table)
+ elements.append(Spacer(1, 0.3 * inch))
+ elements.append(
+ Paragraph("Informe de Cumplimiento ENS RD 311/2022", self.styles["title"])
+ )
+ elements.append(Spacer(1, 0.5 * inch))
+
+ # Compliance info table - use base class helper for consistency
+ info_rows = self._build_info_rows(data, language="es")
+ # Convert tuples to lists and wrap long text in Paragraphs
+ info_data = []
+ for label, value in info_rows:
+ if label in ("Nombre:", "Descripción:") and value:
+ info_data.append(
+ [label, Paragraph(value, self.styles["normal_center"])]
+ )
+ else:
+ info_data.append([label, value])
+
+ info_table = Table(info_data, colWidths=[2 * inch, 4 * inch])
+ info_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, -1), COLOR_BLUE),
+ ("TEXTCOLOR", (0, 0), (0, -1), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (0, -1), "FiraCode"),
+ ("BACKGROUND", (1, 0), (1, -1), COLOR_BG_BLUE),
+ ("TEXTCOLOR", (1, 0), (1, -1), COLOR_GRAY),
+ ("FONTNAME", (1, 0), (1, -1), "PlusJakartaSans"),
+ ("ALIGN", (0, 0), (-1, -1), "LEFT"),
+ ("VALIGN", (0, 0), (-1, -1), "TOP"),
+ ("FONTSIZE", (0, 0), (-1, -1), 11),
+ ("GRID", (0, 0), (-1, -1), 1, colors.Color(0.7, 0.8, 0.9)),
+ ("LEFTPADDING", (0, 0), (-1, -1), 10),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 10),
+ ("TOPPADDING", (0, 0), (-1, -1), 8),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 8),
+ ]
+ )
+ )
+ elements.append(info_table)
+ elements.append(Spacer(1, 0.5 * inch))
+
+ # Warning about excluded manual requirements
+ manual_count = self._count_manual_requirements(data)
+ auto_count = len(
+ [r for r in data.requirements if r.status != StatusChoices.MANUAL]
+ )
+
+ warning_text = (
+ f"AVISO: Este informe no incluye los requisitos de ejecución manual. "
+ f"El compliance {data.compliance_id} contiene un total de "
+ f"{manual_count} requisitos manuales que no han sido evaluados "
+ f"automáticamente y por tanto no están reflejados en las estadísticas de este reporte. "
+ f"El análisis se basa únicamente en los {auto_count} requisitos automatizados."
+ )
+ warning_paragraph = Paragraph(warning_text, self.styles["normal"])
+ warning_table = Table([[warning_paragraph]], colWidths=[6 * inch])
+ warning_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), colors.Color(1.0, 0.95, 0.7)),
+ ("TEXTCOLOR", (0, 0), (0, 0), colors.Color(0.4, 0.3, 0.0)),
+ ("ALIGN", (0, 0), (0, 0), "LEFT"),
+ ("VALIGN", (0, 0), (0, 0), "MIDDLE"),
+ ("BOX", (0, 0), (-1, -1), 2, colors.Color(0.9, 0.7, 0.0)),
+ ("LEFTPADDING", (0, 0), (-1, -1), 15),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 15),
+ ("TOPPADDING", (0, 0), (-1, -1), 12),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 12),
+ ]
+ )
+ )
+ elements.append(warning_table)
+ elements.append(Spacer(1, 0.5 * inch))
+
+ # Legend
+ elements.append(self._create_legend())
+
+ return elements
+
+ def create_executive_summary(self, data: ComplianceData) -> list:
+ """
+ Create the executive summary with compliance metrics.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+
+ elements.append(Paragraph("Resumen Ejecutivo", self.styles["h1"]))
+ elements.append(Spacer(1, 0.2 * inch))
+
+ # Filter out manual requirements
+ auto_requirements = [
+ r for r in data.requirements if r.status != StatusChoices.MANUAL
+ ]
+ total = len(auto_requirements)
+ passed = sum(1 for r in auto_requirements if r.status == StatusChoices.PASS)
+ failed = sum(1 for r in auto_requirements if r.status == StatusChoices.FAIL)
+
+ overall_compliance = (passed / total * 100) if total > 0 else 0
+ compliance_color = get_color_for_compliance(overall_compliance)
+
+ # Summary table
+ summary_data = [["Nivel de Cumplimiento Global:", f"{overall_compliance:.2f}%"]]
+ summary_table = Table(summary_data, colWidths=[3 * inch, 2 * inch])
+ summary_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), colors.Color(0.1, 0.3, 0.5)),
+ ("TEXTCOLOR", (0, 0), (0, 0), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (0, 0), "FiraCode"),
+ ("FONTSIZE", (0, 0), (0, 0), 12),
+ ("BACKGROUND", (1, 0), (1, 0), compliance_color),
+ ("TEXTCOLOR", (1, 0), (1, 0), COLOR_WHITE),
+ ("FONTNAME", (1, 0), (1, 0), "FiraCode"),
+ ("FONTSIZE", (1, 0), (1, 0), 16),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("GRID", (0, 0), (-1, -1), 1.5, colors.Color(0.5, 0.6, 0.7)),
+ ("LEFTPADDING", (0, 0), (-1, -1), 12),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 12),
+ ("TOPPADDING", (0, 0), (-1, -1), 10),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 10),
+ ]
+ )
+ )
+ elements.append(summary_table)
+ elements.append(Spacer(1, 0.3 * inch))
+
+ # Counts table
+ counts_data = [
+ ["Estado", "Cantidad", "Porcentaje"],
+ [
+ "CUMPLE",
+ str(passed),
+ f"{(passed / total * 100):.1f}%" if total > 0 else "0%",
+ ],
+ [
+ "NO CUMPLE",
+ str(failed),
+ f"{(failed / total * 100):.1f}%" if total > 0 else "0%",
+ ],
+ ["TOTAL", str(total), "100%"],
+ ]
+ counts_table = Table(counts_data, colWidths=[2 * inch, 1.5 * inch, 1.5 * inch])
+ counts_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), COLOR_BLUE),
+ ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
+ ("BACKGROUND", (0, 1), (0, 1), COLOR_SAFE),
+ ("TEXTCOLOR", (0, 1), (0, 1), COLOR_WHITE),
+ ("BACKGROUND", (0, 2), (0, 2), COLOR_HIGH_RISK),
+ ("TEXTCOLOR", (0, 2), (0, 2), COLOR_WHITE),
+ ("BACKGROUND", (0, 3), (0, 3), colors.Color(0.4, 0.4, 0.4)),
+ ("TEXTCOLOR", (0, 3), (0, 3), COLOR_WHITE),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("FONTSIZE", (0, 0), (-1, -1), 10),
+ ("GRID", (0, 0), (-1, -1), 1, COLOR_GRID_GRAY),
+ ("LEFTPADDING", (0, 0), (-1, -1), 8),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 8),
+ ("TOPPADDING", (0, 0), (-1, -1), 6),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 6),
+ ]
+ )
+ )
+ elements.append(counts_table)
+ elements.append(Spacer(1, 0.3 * inch))
+
+ # Compliance by Nivel
+ elements.extend(self._create_nivel_table(data))
+
+ return elements
+
+ def create_charts_section(self, data: ComplianceData) -> list:
+ """
+ Create the charts section with Marco analysis and radar chart.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+
+ # Critical failed requirements section (nivel alto) - new page
+ elements.append(PageBreak())
+ elements.extend(self._create_critical_failed_section(data))
+
+ # Marco y Categorías chart - new page
+ elements.append(PageBreak())
+ elements.append(
+ Paragraph("Análisis por Marcos y Categorías", self.styles["h1"])
+ )
+ elements.append(Spacer(1, 0.2 * inch))
+
+ marco_cat_chart = self._create_marco_category_chart(data)
+ marco_cat_image = Image(marco_cat_chart, width=7 * inch, height=5.5 * inch)
+ elements.append(marco_cat_image)
+
+ # Security dimensions radar chart - new page
+ elements.append(PageBreak())
+ elements.append(
+ Paragraph("Análisis por Dimensiones de Seguridad", self.styles["h1"])
+ )
+ elements.append(Spacer(1, 0.2 * inch))
+
+ radar_buffer = self._create_dimensions_radar_chart(data)
+ radar_image = Image(radar_buffer, width=6 * inch, height=6 * inch)
+ elements.append(radar_image)
+ elements.append(PageBreak())
+
+ # Type distribution
+ elements.extend(self._create_tipo_section(data))
+
+ return elements
+
+ def create_requirements_index(self, data: ComplianceData) -> list:
+ """
+ Create the requirements index organized by Marco and Categoria.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+
+ elements.append(Paragraph("Índice de Requisitos", self.styles["h1"]))
+ elements.append(Spacer(1, 0.2 * inch))
+
+ # Organize by Marco and Categoria
+ marcos = {}
+ for req in data.requirements:
+ if req.status == StatusChoices.MANUAL:
+ continue
+
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+ if m:
+ marco = getattr(m, "Marco", "Otros")
+ categoria = getattr(m, "Categoria", "Sin categoría")
+ descripcion = getattr(m, "DescripcionControl", req.description)
+ nivel = getattr(m, "Nivel", "")
+
+ if marco not in marcos:
+ marcos[marco] = {}
+ if categoria not in marcos[marco]:
+ marcos[marco][categoria] = []
+
+ marcos[marco][categoria].append(
+ {
+ "id": req.id,
+ "descripcion": descripcion,
+ "nivel": nivel,
+ "status": req.status,
+ }
+ )
+
+ for marco_name, categorias in marcos.items():
+ elements.append(Paragraph(f"Marco: {marco_name}", self.styles["h2"]))
+
+ for categoria_name, reqs in categorias.items():
+ elements.append(Paragraph(f"{categoria_name}", self.styles["h3"]))
+
+ for req in reqs:
+ status_indicator = (
+ "✓" if req["status"] == StatusChoices.PASS else "✗"
+ )
+ nivel_badge = f"[{req['nivel'].upper()}]" if req["nivel"] else ""
+ elements.append(
+ Paragraph(
+ f"{status_indicator} {req['id']} {nivel_badge}",
+ self.styles["normal"],
+ )
+ )
+
+ elements.append(Spacer(1, 0.1 * inch))
+
+ return elements
+
+ def get_footer_text(self, page_num: int) -> tuple[str, str]:
+ """
+ Get Spanish footer text for ENS report.
+
+ Args:
+ page_num: Current page number.
+
+ Returns:
+ Tuple of (left_text, right_text) for the footer.
+ """
+ return f"Página {page_num}", "Powered by Prowler"
+
+ def _count_manual_requirements(self, data: ComplianceData) -> int:
+ """Count requirements with manual execution mode."""
+ return sum(1 for r in data.requirements if r.status == StatusChoices.MANUAL)
+
+ def _create_legend(self) -> Table:
+ """Create the ENS values legend table."""
+ legend_text = """
+ Nivel (Criticidad del requisito):
+ • Alto: Requisitos críticos que deben cumplirse prioritariamente
+ • Medio: Requisitos importantes con impacto moderado
+ • Bajo: Requisitos complementarios de menor criticidad
+ • Opcional: Recomendaciones adicionales no obligatorias
+
+ Tipo (Clasificación del requisito):
+ • Requisito: Obligación establecida por el ENS
+ • Refuerzo: Medida adicional que refuerza un requisito
+ • Recomendación: Buena práctica sugerida
+ • Medida: Acción concreta de implementación
+
+ Dimensiones de Seguridad:
+ • C (Confidencialidad): Protección contra accesos no autorizados
+ • I (Integridad): Garantía de exactitud y completitud
+ • T (Trazabilidad): Capacidad de rastrear acciones
+ • A (Autenticidad): Verificación de identidad
+ • D (Disponibilidad): Acceso cuando se necesita
+ """
+ legend_paragraph = Paragraph(legend_text, self.styles["normal"])
+ legend_table = Table([[legend_paragraph]], colWidths=[6.5 * inch])
+ legend_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), COLOR_BG_BLUE),
+ ("TEXTCOLOR", (0, 0), (0, 0), COLOR_GRAY),
+ ("ALIGN", (0, 0), (0, 0), "LEFT"),
+ ("VALIGN", (0, 0), (0, 0), "TOP"),
+ ("BOX", (0, 0), (-1, -1), 1.5, colors.Color(0.5, 0.6, 0.8)),
+ ("LEFTPADDING", (0, 0), (-1, -1), 15),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 15),
+ ("TOPPADDING", (0, 0), (-1, -1), 12),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 12),
+ ]
+ )
+ )
+ return legend_table
+
+ def _create_nivel_table(self, data: ComplianceData) -> list:
+ """Create compliance by nivel table."""
+ elements = []
+ elements.append(Paragraph("Cumplimiento por Nivel", self.styles["h2"]))
+
+ nivel_data = defaultdict(lambda: {"passed": 0, "total": 0})
+ for req in data.requirements:
+ if req.status == StatusChoices.MANUAL:
+ continue
+
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+ if m:
+ nivel = getattr(m, "Nivel", "").lower()
+ nivel_data[nivel]["total"] += 1
+ if req.status == StatusChoices.PASS:
+ nivel_data[nivel]["passed"] += 1
+
+ table_data = [["Nivel", "Cumplidos", "Total", "Porcentaje"]]
+ nivel_colors = {
+ "alto": COLOR_ENS_ALTO,
+ "medio": COLOR_ENS_MEDIO,
+ "bajo": COLOR_ENS_BAJO,
+ "opcional": COLOR_ENS_OPCIONAL,
+ }
+
+ for nivel in ENS_NIVEL_ORDER:
+ if nivel in nivel_data:
+ d = nivel_data[nivel]
+ pct = (d["passed"] / d["total"] * 100) if d["total"] > 0 else 0
+ table_data.append(
+ [
+ nivel.capitalize(),
+ str(d["passed"]),
+ str(d["total"]),
+ f"{pct:.1f}%",
+ ]
+ )
+
+ table = Table(
+ table_data, colWidths=[1.5 * inch, 1.5 * inch, 1.5 * inch, 1.5 * inch]
+ )
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), COLOR_BLUE),
+ ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("FONTSIZE", (0, 0), (-1, -1), 10),
+ ("GRID", (0, 0), (-1, -1), 1, COLOR_GRID_GRAY),
+ ("LEFTPADDING", (0, 0), (-1, -1), 8),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 8),
+ ("TOPPADDING", (0, 0), (-1, -1), 6),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 6),
+ ]
+ )
+ )
+
+ # Color nivel column
+ for idx, nivel in enumerate(ENS_NIVEL_ORDER):
+ if nivel in nivel_data:
+ row_idx = idx + 1
+ if row_idx < len(table_data):
+ color = nivel_colors.get(nivel, COLOR_GRAY)
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, row_idx), (0, row_idx), color),
+ ("TEXTCOLOR", (0, row_idx), (0, row_idx), COLOR_WHITE),
+ ]
+ )
+ )
+
+ elements.append(table)
+ return elements
+
+ def _create_marco_category_chart(self, data: ComplianceData):
+ """Create Marco - Categoría combined compliance chart."""
+ # Group by marco + categoria combination
+ marco_cat_scores = defaultdict(lambda: {"passed": 0, "total": 0})
+
+ for req in data.requirements:
+ if req.status == StatusChoices.MANUAL:
+ continue
+
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+ if m:
+ marco = getattr(m, "Marco", "otros")
+ categoria = getattr(m, "Categoria", "sin categoría")
+ # Combined key: "marco - categoría"
+ key = f"{marco} - {categoria}"
+ marco_cat_scores[key]["total"] += 1
+ if req.status == StatusChoices.PASS:
+ marco_cat_scores[key]["passed"] += 1
+
+ labels = []
+ values = []
+ for key, scores in sorted(marco_cat_scores.items()):
+ if scores["total"] > 0:
+ pct = (scores["passed"] / scores["total"]) * 100
+ labels.append(key)
+ values.append(pct)
+
+ return create_horizontal_bar_chart(
+ labels=labels,
+ values=values,
+ xlabel="Porcentaje de Cumplimiento (%)",
+ )
+
+ def _create_dimensions_radar_chart(self, data: ComplianceData):
+ """Create security dimensions radar chart."""
+ dimension_scores = {dim: {"passed": 0, "total": 0} for dim in DIMENSION_KEYS}
+
+ for req in data.requirements:
+ if req.status == StatusChoices.MANUAL:
+ continue
+
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+ if m:
+ dimensiones = getattr(m, "Dimensiones", [])
+ if isinstance(dimensiones, str):
+ dimensiones = [d.strip().lower() for d in dimensiones.split(",")]
+ elif isinstance(dimensiones, list):
+ dimensiones = [
+ d.lower() if isinstance(d, str) else d for d in dimensiones
+ ]
+
+ for dim in dimensiones:
+ if dim in dimension_scores:
+ dimension_scores[dim]["total"] += 1
+ if req.status == StatusChoices.PASS:
+ dimension_scores[dim]["passed"] += 1
+
+ values = []
+ for dim in DIMENSION_KEYS:
+ scores = dimension_scores[dim]
+ if scores["total"] > 0:
+ pct = (scores["passed"] / scores["total"]) * 100
+ else:
+ pct = 100
+ values.append(pct)
+
+ return create_radar_chart(
+ labels=DIMENSION_NAMES,
+ values=values,
+ color="#2196F3",
+ )
+
+ def _create_tipo_section(self, data: ComplianceData) -> list:
+ """Create type distribution section."""
+ elements = []
+ elements.append(
+ Paragraph("Distribución por Tipo de Requisito", self.styles["h1"])
+ )
+ elements.append(Spacer(1, 0.2 * inch))
+
+ tipo_data = defaultdict(lambda: {"passed": 0, "total": 0})
+ for req in data.requirements:
+ if req.status == StatusChoices.MANUAL:
+ continue
+
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+ if m:
+ tipo = getattr(m, "Tipo", "").lower()
+ tipo_data[tipo]["total"] += 1
+ if req.status == StatusChoices.PASS:
+ tipo_data[tipo]["passed"] += 1
+
+ table_data = [["Tipo", "Cumplidos", "Total", "Porcentaje"]]
+ for tipo in ENS_TIPO_ORDER:
+ if tipo in tipo_data:
+ d = tipo_data[tipo]
+ pct = (d["passed"] / d["total"] * 100) if d["total"] > 0 else 0
+ table_data.append(
+ [
+ tipo.capitalize(),
+ str(d["passed"]),
+ str(d["total"]),
+ f"{pct:.1f}%",
+ ]
+ )
+
+ table = Table(
+ table_data, colWidths=[2 * inch, 1.5 * inch, 1.5 * inch, 1.5 * inch]
+ )
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), COLOR_BLUE),
+ ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("FONTSIZE", (0, 0), (-1, -1), 10),
+ ("GRID", (0, 0), (-1, -1), 1, COLOR_GRID_GRAY),
+ ("LEFTPADDING", (0, 0), (-1, -1), 8),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 8),
+ ("TOPPADDING", (0, 0), (-1, -1), 6),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 6),
+ ]
+ )
+ )
+ elements.append(table)
+ return elements
+
+ def _create_critical_failed_section(self, data: ComplianceData) -> list:
+ """Create section for critical failed requirements (nivel alto)."""
+ elements = []
+
+ elements.append(
+ Paragraph("Requisitos Críticos No Cumplidos", self.styles["h1"])
+ )
+ elements.append(Spacer(1, 0.2 * inch))
+
+ # Get failed requirements with nivel alto
+ critical_failed = []
+ for req in data.requirements:
+ if req.status != StatusChoices.FAIL:
+ continue
+
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+ if m:
+ nivel = getattr(m, "Nivel", "").lower()
+ if nivel == "alto":
+ critical_failed.append(
+ {
+ "id": req.id,
+ "descripcion": getattr(
+ m, "DescripcionControl", req.description
+ ),
+ "marco": getattr(m, "Marco", ""),
+ "categoria": getattr(m, "Categoria", ""),
+ "tipo": getattr(m, "Tipo", ""),
+ }
+ )
+
+ if not critical_failed:
+ elements.append(
+ Paragraph(
+ "✅ No hay requisitos críticos (nivel ALTO) que hayan fallado.",
+ self.styles["normal"],
+ )
+ )
+ return elements
+
+ elements.append(
+ Paragraph(
+ f"Se encontraron {len(critical_failed)} requisitos de nivel ALTO "
+ "que no cumplen y requieren atención inmediata:",
+ self.styles["normal"],
+ )
+ )
+ elements.append(Spacer(1, 0.2 * inch))
+
+ # Create table - use a cell style without leftIndent for proper alignment
+ cell_style = ParagraphStyle(
+ "CellStyle",
+ parent=self.styles["normal"],
+ leftIndent=0,
+ spaceBefore=0,
+ spaceAfter=0,
+ )
+ table_data: list = [["ID Requisito", "Marco", "Categoría", "Tipo"]]
+ for req in critical_failed:
+ table_data.append(
+ [
+ req["id"],
+ req["marco"],
+ Paragraph(req["categoria"], cell_style),
+ req["tipo"].capitalize() if req["tipo"] else "",
+ ]
+ )
+
+ table = Table(
+ table_data,
+ colWidths=[2 * inch, 1.5 * inch, 1.8 * inch, 1.2 * inch],
+ )
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), COLOR_ENS_ALTO),
+ ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
+ ("FONTSIZE", (0, 0), (-1, 0), 10),
+ ("ALIGN", (0, 0), (-1, -1), "LEFT"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("FONTSIZE", (0, 1), (-1, -1), 9),
+ ("GRID", (0, 0), (-1, -1), 0.5, COLOR_GRID_GRAY),
+ ("LEFTPADDING", (0, 0), (-1, -1), 6),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 6),
+ ("TOPPADDING", (0, 0), (-1, -1), 4),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 4),
+ (
+ "ROWBACKGROUNDS",
+ (0, 1),
+ (-1, -1),
+ [COLOR_WHITE, colors.Color(1.0, 0.95, 0.95)],
+ ),
+ ]
+ )
+ )
+ elements.append(table)
+
+ return elements
+
+ def create_detailed_findings(self, data: ComplianceData, **kwargs) -> list:
+ """
+ Create detailed findings section with ENS-specific format.
+
+ Shows each failed requirement with:
+ - Requirement ID as title
+ - Status, Nivel, Tipo, ModoEjecucion badges
+ - Dimensiones badges
+ - Info table with Descripción, Marco, Categoría, etc.
+
+ Args:
+ data: Aggregated compliance data.
+ **kwargs: Additional options.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+ include_manual = kwargs.get("include_manual", True)
+
+ elements.append(Paragraph("Detalle de Requisitos", self.styles["h1"]))
+ elements.append(Spacer(1, 0.2 * inch))
+
+ # Get failed requirements, and optionally manual requirements
+ if include_manual:
+ failed_requirements = [
+ r
+ for r in data.requirements
+ if r.status in (StatusChoices.FAIL, StatusChoices.MANUAL)
+ ]
+ else:
+ failed_requirements = [
+ r for r in data.requirements if r.status == StatusChoices.FAIL
+ ]
+
+ if not failed_requirements:
+ elements.append(
+ Paragraph(
+ "No hay requisitos fallidos para mostrar.",
+ self.styles["normal"],
+ )
+ )
+ return elements
+
+ elements.append(
+ Paragraph(
+ f"Se muestran {len(failed_requirements)} requisitos que requieren "
+ "atención:",
+ self.styles["normal"],
+ )
+ )
+ elements.append(Spacer(1, 0.3 * inch))
+
+ # Nivel colors mapping
+ nivel_colors = {
+ "alto": COLOR_ENS_ALTO,
+ "medio": COLOR_ENS_MEDIO,
+ "bajo": COLOR_ENS_BAJO,
+ "opcional": COLOR_ENS_OPCIONAL,
+ }
+
+ for req in failed_requirements:
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+
+ if not m:
+ continue
+
+ nivel = getattr(m, "Nivel", "").lower()
+ tipo = getattr(m, "Tipo", "")
+ modo = getattr(m, "ModoEjecucion", "")
+ dimensiones = getattr(m, "Dimensiones", [])
+ descripcion = getattr(m, "DescripcionControl", req.description)
+ marco = getattr(m, "Marco", "")
+ categoria = getattr(m, "Categoria", "")
+ id_grupo = getattr(m, "IdGrupoControl", "")
+
+ # Requirement ID title
+ req_title = Table([[req.id]], colWidths=[6.5 * inch])
+ req_title.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), COLOR_BG_BLUE),
+ ("TEXTCOLOR", (0, 0), (0, 0), COLOR_BLUE),
+ ("FONTNAME", (0, 0), (0, 0), "FiraCode"),
+ ("FONTSIZE", (0, 0), (0, 0), 14),
+ ("ALIGN", (0, 0), (0, 0), "LEFT"),
+ ("BOX", (0, 0), (-1, -1), 2, COLOR_BLUE),
+ ("LEFTPADDING", (0, 0), (-1, -1), 12),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 12),
+ ("TOPPADDING", (0, 0), (-1, -1), 10),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 10),
+ ]
+ )
+ )
+ elements.append(req_title)
+ elements.append(Spacer(1, 0.15 * inch))
+
+ # Status and Nivel badges row
+ status_color = COLOR_HIGH_RISK # FAIL
+ nivel_color = nivel_colors.get(nivel, COLOR_GRAY)
+
+ badges_row1 = [
+ ["State:", "FAIL", "", f"Nivel: {nivel.upper()}"],
+ ]
+ badges_table1 = Table(
+ badges_row1,
+ colWidths=[0.7 * inch, 0.8 * inch, 1.5 * inch, 1.5 * inch],
+ )
+ badges_table1.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), colors.Color(0.9, 0.9, 0.9)),
+ ("FONTNAME", (0, 0), (0, 0), "PlusJakartaSans"),
+ ("BACKGROUND", (1, 0), (1, 0), status_color),
+ ("TEXTCOLOR", (1, 0), (1, 0), COLOR_WHITE),
+ ("FONTNAME", (1, 0), (1, 0), "FiraCode"),
+ ("BACKGROUND", (3, 0), (3, 0), nivel_color),
+ ("TEXTCOLOR", (3, 0), (3, 0), COLOR_WHITE),
+ ("FONTNAME", (3, 0), (3, 0), "FiraCode"),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("FONTSIZE", (0, 0), (-1, -1), 11),
+ ("GRID", (0, 0), (1, 0), 0.5, colors.black),
+ ("GRID", (3, 0), (3, 0), 0.5, colors.black),
+ ("LEFTPADDING", (0, 0), (-1, -1), 8),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 8),
+ ("TOPPADDING", (0, 0), (-1, -1), 8),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 8),
+ ]
+ )
+ )
+ elements.append(badges_table1)
+ elements.append(Spacer(1, 0.1 * inch))
+
+ # Tipo and Modo badges row
+ tipo_display = f"☰ {tipo.capitalize()}" if tipo else "N/A"
+ modo_display = f"☰ {modo.capitalize()}" if modo else "N/A"
+ modo_color = (
+ COLOR_ENS_AUTO if modo.lower() == "automatico" else COLOR_ENS_MANUAL
+ )
+
+ badges_row2 = [[tipo_display, "", modo_display]]
+ badges_table2 = Table(
+ badges_row2, colWidths=[2.2 * inch, 0.5 * inch, 2.2 * inch]
+ )
+ badges_table2.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), COLOR_ENS_TIPO),
+ ("TEXTCOLOR", (0, 0), (0, 0), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (0, 0), "PlusJakartaSans"),
+ ("BACKGROUND", (2, 0), (2, 0), modo_color),
+ ("TEXTCOLOR", (2, 0), (2, 0), COLOR_WHITE),
+ ("FONTNAME", (2, 0), (2, 0), "PlusJakartaSans"),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("FONTSIZE", (0, 0), (-1, -1), 11),
+ ("GRID", (0, 0), (0, 0), 0.5, colors.black),
+ ("GRID", (2, 0), (2, 0), 0.5, colors.black),
+ ("LEFTPADDING", (0, 0), (-1, -1), 10),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 10),
+ ("TOPPADDING", (0, 0), (-1, -1), 8),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 8),
+ ]
+ )
+ )
+ elements.append(badges_table2)
+ elements.append(Spacer(1, 0.1 * inch))
+
+ # Dimensiones badges
+ if dimensiones:
+ if isinstance(dimensiones, str):
+ dim_list = [d.strip().lower() for d in dimensiones.split(",")]
+ else:
+ dim_list = [
+ d.lower() if isinstance(d, str) else str(d) for d in dimensiones
+ ]
+
+ dim_badges = []
+ for dim in dim_list:
+ if dim in DIMENSION_MAPPING:
+ abbrev, dim_color = DIMENSION_MAPPING[dim]
+ dim_badges.append((abbrev, dim_color))
+
+ if dim_badges:
+ dim_label = [["Dimensiones:"] + [b[0] for b in dim_badges]]
+ dim_widths = [1.2 * inch] + [0.4 * inch] * len(dim_badges)
+ dim_table = Table(dim_label, colWidths=dim_widths)
+
+ dim_styles = [
+ ("FONTNAME", (0, 0), (0, 0), "PlusJakartaSans"),
+ ("FONTSIZE", (0, 0), (-1, -1), 11),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("LEFTPADDING", (0, 0), (-1, -1), 4),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 4),
+ ("TOPPADDING", (0, 0), (-1, -1), 6),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 6),
+ ]
+ for idx, (_, dim_color) in enumerate(dim_badges):
+ col_idx = idx + 1
+ dim_styles.extend(
+ [
+ (
+ "BACKGROUND",
+ (col_idx, 0),
+ (col_idx, 0),
+ dim_color,
+ ),
+ ("TEXTCOLOR", (col_idx, 0), (col_idx, 0), COLOR_WHITE),
+ ("FONTNAME", (col_idx, 0), (col_idx, 0), "FiraCode"),
+ ("GRID", (col_idx, 0), (col_idx, 0), 0.5, colors.black),
+ ]
+ )
+
+ dim_table.setStyle(TableStyle(dim_styles))
+ elements.append(dim_table)
+ elements.append(Spacer(1, 0.15 * inch))
+
+ # Info table - use Paragraph for text wrapping
+ info_data = [
+ [
+ "Descripción:",
+ Paragraph(descripcion, self.styles["normal_center"]),
+ ],
+ ["Marco:", marco],
+ [
+ "Categoría:",
+ Paragraph(categoria, self.styles["normal_center"]),
+ ],
+ ["ID Grupo Control:", id_grupo],
+ ]
+ info_table = Table(info_data, colWidths=[2 * inch, 4.5 * inch])
+ info_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, -1), COLOR_BLUE),
+ ("TEXTCOLOR", (0, 0), (0, -1), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (0, -1), "FiraCode"),
+ ("FONTSIZE", (0, 0), (0, -1), 10),
+ ("BACKGROUND", (1, 0), (1, -1), COLOR_BG_BLUE),
+ ("TEXTCOLOR", (1, 0), (1, -1), COLOR_GRAY),
+ ("FONTNAME", (1, 0), (1, -1), "PlusJakartaSans"),
+ ("FONTSIZE", (1, 0), (1, -1), 10),
+ ("ALIGN", (0, 0), (0, -1), "LEFT"),
+ ("ALIGN", (1, 0), (1, -1), "LEFT"),
+ ("VALIGN", (0, 0), (-1, -1), "TOP"),
+ ("GRID", (0, 0), (-1, -1), 1, COLOR_BORDER_GRAY),
+ ("LEFTPADDING", (0, 0), (-1, -1), 8),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 8),
+ ("TOPPADDING", (0, 0), (-1, -1), 6),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 6),
+ ]
+ )
+ )
+ elements.append(info_table)
+ elements.append(Spacer(1, 0.3 * inch))
+
+ return elements
diff --git a/api/src/backend/tasks/jobs/reports/nis2.py b/api/src/backend/tasks/jobs/reports/nis2.py
new file mode 100644
index 0000000000..4ac5fa3d15
--- /dev/null
+++ b/api/src/backend/tasks/jobs/reports/nis2.py
@@ -0,0 +1,471 @@
+import os
+from collections import defaultdict
+
+from reportlab.lib.units import inch
+from reportlab.platypus import Image, PageBreak, Paragraph, Spacer, Table, TableStyle
+
+from api.models import StatusChoices
+
+from .base import (
+ BaseComplianceReportGenerator,
+ ComplianceData,
+ get_requirement_metadata,
+)
+from .charts import create_horizontal_bar_chart, get_chart_color_for_percentage
+from .config import (
+ COLOR_BORDER_GRAY,
+ COLOR_DARK_GRAY,
+ COLOR_GRAY,
+ COLOR_GRID_GRAY,
+ COLOR_HIGH_RISK,
+ COLOR_NIS2_BG_BLUE,
+ COLOR_NIS2_PRIMARY,
+ COLOR_SAFE,
+ COLOR_WHITE,
+ NIS2_SECTION_TITLES,
+ NIS2_SECTIONS,
+)
+
+
+def _extract_section_number(section_string: str) -> str:
+ """Extract the section number from a full NIS2 section title.
+
+ NIS2 section strings are formatted like:
+ "1 POLICY ON THE SECURITY OF NETWORK AND INFORMATION SYSTEMS..."
+
+ This function extracts just the leading number.
+
+ Args:
+ section_string: Full section title string.
+
+ Returns:
+ Section number as string (e.g., "1", "2", "11").
+ """
+ if not section_string:
+ return "Other"
+ parts = section_string.split()
+ if parts and parts[0].isdigit():
+ return parts[0]
+ return "Other"
+
+
+class NIS2ReportGenerator(BaseComplianceReportGenerator):
+ """
+ PDF report generator for NIS2 Directive (EU) 2022/2555.
+
+ This generator creates comprehensive PDF reports containing:
+ - Cover page with both Prowler and NIS2 logos
+ - Executive summary with overall compliance score
+ - Section analysis with horizontal bar chart
+ - SubSection breakdown table
+ - Critical failed requirements
+ - Requirements index organized by section and subsection
+ - Detailed findings for failed requirements
+ """
+
+ def create_cover_page(self, data: ComplianceData) -> list:
+ """
+ Create the NIS2 report cover page with both logos.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+
+ # Create logos side by side
+ prowler_logo_path = os.path.join(
+ os.path.dirname(__file__), "../../assets/img/prowler_logo.png"
+ )
+ nis2_logo_path = os.path.join(
+ os.path.dirname(__file__), "../../assets/img/nis2_logo.png"
+ )
+
+ prowler_logo = Image(prowler_logo_path, width=3.5 * inch, height=0.7 * inch)
+ nis2_logo = Image(nis2_logo_path, width=2.3 * inch, height=1.5 * inch)
+
+ logos_table = Table(
+ [[prowler_logo, nis2_logo]], colWidths=[4 * inch, 2.5 * inch]
+ )
+ logos_table.setStyle(
+ TableStyle(
+ [
+ ("ALIGN", (0, 0), (0, 0), "LEFT"),
+ ("ALIGN", (1, 0), (1, 0), "RIGHT"),
+ ("VALIGN", (0, 0), (0, 0), "MIDDLE"),
+ ("VALIGN", (1, 0), (1, 0), "MIDDLE"),
+ ]
+ )
+ )
+ elements.append(logos_table)
+ elements.append(Spacer(1, 0.3 * inch))
+
+ # Title
+ title = Paragraph(
+ "NIS2 Compliance Report
Directive (EU) 2022/2555",
+ self.styles["title"],
+ )
+ elements.append(title)
+ elements.append(Spacer(1, 0.3 * inch))
+
+ # Compliance metadata table - use base class helper for consistency
+ info_rows = self._build_info_rows(data, language="en")
+ # Convert tuples to lists and wrap long text in Paragraphs
+ metadata_data = []
+ for label, value in info_rows:
+ if label in ("Name:", "Description:") and value:
+ metadata_data.append(
+ [label, Paragraph(value, self.styles["normal_center"])]
+ )
+ else:
+ metadata_data.append([label, value])
+
+ metadata_table = Table(metadata_data, colWidths=[2 * inch, 4 * inch])
+ metadata_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, -1), COLOR_NIS2_PRIMARY),
+ ("TEXTCOLOR", (0, 0), (0, -1), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (0, -1), "FiraCode"),
+ ("BACKGROUND", (1, 0), (1, -1), COLOR_NIS2_BG_BLUE),
+ ("TEXTCOLOR", (1, 0), (1, -1), COLOR_GRAY),
+ ("FONTNAME", (1, 0), (1, -1), "PlusJakartaSans"),
+ ("ALIGN", (0, 0), (-1, -1), "LEFT"),
+ ("VALIGN", (0, 0), (-1, -1), "TOP"),
+ ("FONTSIZE", (0, 0), (-1, -1), 11),
+ ("GRID", (0, 0), (-1, -1), 1, COLOR_BORDER_GRAY),
+ ("LEFTPADDING", (0, 0), (-1, -1), 10),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 10),
+ ("TOPPADDING", (0, 0), (-1, -1), 8),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 8),
+ ]
+ )
+ )
+ elements.append(metadata_table)
+
+ return elements
+
+ def create_executive_summary(self, data: ComplianceData) -> list:
+ """
+ Create the executive summary with compliance metrics.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+
+ elements.append(Paragraph("Executive Summary", self.styles["h1"]))
+ elements.append(Spacer(1, 0.1 * inch))
+
+ # Calculate statistics
+ total = len(data.requirements)
+ passed = sum(1 for r in data.requirements if r.status == StatusChoices.PASS)
+ failed = sum(1 for r in data.requirements if r.status == StatusChoices.FAIL)
+ manual = sum(1 for r in data.requirements if r.status == StatusChoices.MANUAL)
+
+ # Calculate compliance excluding manual
+ evaluated = passed + failed
+ overall_compliance = (passed / evaluated * 100) if evaluated > 0 else 100
+
+ # Summary statistics table
+ summary_data = [
+ ["Metric", "Value"],
+ ["Total Requirements", str(total)],
+ ["Passed ✓", str(passed)],
+ ["Failed ✗", str(failed)],
+ ["Manual ⊙", str(manual)],
+ ["Overall Compliance", f"{overall_compliance:.1f}%"],
+ ]
+
+ summary_table = Table(summary_data, colWidths=[3 * inch, 2 * inch])
+ summary_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), COLOR_NIS2_PRIMARY),
+ ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
+ ("BACKGROUND", (0, 2), (0, 2), COLOR_SAFE),
+ ("TEXTCOLOR", (0, 2), (0, 2), COLOR_WHITE),
+ ("BACKGROUND", (0, 3), (0, 3), COLOR_HIGH_RISK),
+ ("TEXTCOLOR", (0, 3), (0, 3), COLOR_WHITE),
+ ("BACKGROUND", (0, 4), (0, 4), COLOR_DARK_GRAY),
+ ("TEXTCOLOR", (0, 4), (0, 4), COLOR_WHITE),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("FONTNAME", (0, 0), (-1, 0), "PlusJakartaSans"),
+ ("FONTSIZE", (0, 0), (-1, 0), 12),
+ ("FONTSIZE", (0, 1), (-1, -1), 10),
+ ("BOTTOMPADDING", (0, 0), (-1, 0), 10),
+ ("GRID", (0, 0), (-1, -1), 0.5, COLOR_BORDER_GRAY),
+ (
+ "ROWBACKGROUNDS",
+ (1, 1),
+ (1, -1),
+ [COLOR_WHITE, COLOR_NIS2_BG_BLUE],
+ ),
+ ]
+ )
+ )
+ elements.append(summary_table)
+
+ return elements
+
+ def create_charts_section(self, data: ComplianceData) -> list:
+ """
+ Create the charts section with section analysis.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+
+ # Section chart
+ elements.append(Paragraph("Compliance by Section", self.styles["h1"]))
+ elements.append(Spacer(1, 0.1 * inch))
+ elements.append(
+ Paragraph(
+ "The following chart shows compliance percentage for each main section "
+ "of the NIS2 directive:",
+ self.styles["normal_center"],
+ )
+ )
+ elements.append(Spacer(1, 0.1 * inch))
+
+ chart_buffer = self._create_section_chart(data)
+ chart_buffer.seek(0)
+ chart_image = Image(chart_buffer, width=6.5 * inch, height=5 * inch)
+ elements.append(chart_image)
+ elements.append(PageBreak())
+
+ # SubSection breakdown table
+ elements.append(Paragraph("SubSection Breakdown", self.styles["h1"]))
+ elements.append(Spacer(1, 0.1 * inch))
+
+ subsection_table = self._create_subsection_table(data)
+ elements.append(subsection_table)
+
+ return elements
+
+ def create_requirements_index(self, data: ComplianceData) -> list:
+ """
+ Create the requirements index organized by section and subsection.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+
+ elements.append(Paragraph("Requirements Index", self.styles["h1"]))
+ elements.append(Spacer(1, 0.1 * inch))
+
+ # Organize by section number and subsection
+ sections = {}
+ for req in data.requirements:
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+ if m:
+ full_section = getattr(m, "Section", "Other")
+ # Extract section number from full title (e.g., "1 POLICY..." -> "1")
+ section_num = _extract_section_number(full_section)
+ subsection = getattr(m, "SubSection", "")
+ description = getattr(m, "Description", req.description)
+
+ if section_num not in sections:
+ sections[section_num] = {}
+ if subsection not in sections[section_num]:
+ sections[section_num][subsection] = []
+
+ sections[section_num][subsection].append(
+ {
+ "id": req.id,
+ "description": description,
+ "status": req.status,
+ }
+ )
+
+ # Sort by NIS2 section order
+ for section in NIS2_SECTIONS:
+ if section not in sections:
+ continue
+
+ section_title = NIS2_SECTION_TITLES.get(section, f"Section {section}")
+ elements.append(Paragraph(section_title, self.styles["h2"]))
+
+ for subsection_name, reqs in sections[section].items():
+ if subsection_name:
+ # Truncate long subsection names for display
+ display_subsection = (
+ subsection_name[:80] + "..."
+ if len(subsection_name) > 80
+ else subsection_name
+ )
+ elements.append(Paragraph(display_subsection, self.styles["h3"]))
+
+ for req in reqs:
+ status_indicator = (
+ "✓" if req["status"] == StatusChoices.PASS else "✗"
+ )
+ if req["status"] == StatusChoices.MANUAL:
+ status_indicator = "⊙"
+
+ desc = (
+ req["description"][:60] + "..."
+ if len(req["description"]) > 60
+ else req["description"]
+ )
+ elements.append(
+ Paragraph(
+ f"{status_indicator} {req['id']}: {desc}",
+ self.styles["normal"],
+ )
+ )
+
+ elements.append(Spacer(1, 0.1 * inch))
+
+ return elements
+
+ def _create_section_chart(self, data: ComplianceData):
+ """
+ Create the section compliance chart.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ BytesIO buffer containing the chart image.
+ """
+ section_scores = defaultdict(lambda: {"passed": 0, "total": 0})
+
+ for req in data.requirements:
+ if req.status == StatusChoices.MANUAL:
+ continue
+
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+ if m:
+ full_section = getattr(m, "Section", "Other")
+ # Extract section number from full title (e.g., "1 POLICY..." -> "1")
+ section_num = _extract_section_number(full_section)
+ section_scores[section_num]["total"] += 1
+ if req.status == StatusChoices.PASS:
+ section_scores[section_num]["passed"] += 1
+
+ # Build labels and values in NIS2 section order
+ labels = []
+ values = []
+ for section in NIS2_SECTIONS:
+ if section in section_scores and section_scores[section]["total"] > 0:
+ scores = section_scores[section]
+ pct = (scores["passed"] / scores["total"]) * 100
+ section_title = NIS2_SECTION_TITLES.get(section, f"Section {section}")
+ labels.append(section_title)
+ values.append(pct)
+
+ return create_horizontal_bar_chart(
+ labels=labels,
+ values=values,
+ xlabel="Compliance (%)",
+ color_func=get_chart_color_for_percentage,
+ )
+
+ def _create_subsection_table(self, data: ComplianceData) -> Table:
+ """
+ Create the subsection breakdown table.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ ReportLab Table element.
+ """
+ subsection_scores = defaultdict(lambda: {"passed": 0, "failed": 0, "manual": 0})
+
+ for req in data.requirements:
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+ if m:
+ full_section = getattr(m, "Section", "")
+ subsection = getattr(m, "SubSection", "")
+ # Use section number + subsection for grouping
+ section_num = _extract_section_number(full_section)
+ # Create a shorter key using section number
+ if subsection:
+ # Extract subsection number if present (e.g., "1.1 Policy..." -> "1.1")
+ subsection_parts = subsection.split()
+ if subsection_parts:
+ key = subsection_parts[0] # Just the number like "1.1"
+ else:
+ key = f"{section_num}"
+ else:
+ key = section_num
+
+ if req.status == StatusChoices.PASS:
+ subsection_scores[key]["passed"] += 1
+ elif req.status == StatusChoices.FAIL:
+ subsection_scores[key]["failed"] += 1
+ else:
+ subsection_scores[key]["manual"] += 1
+
+ table_data = [["Section", "Passed", "Failed", "Manual", "Compliance"]]
+ for key, scores in sorted(
+ subsection_scores.items(), key=lambda x: self._sort_section_key(x[0])
+ ):
+ total = scores["passed"] + scores["failed"]
+ pct = (scores["passed"] / total * 100) if total > 0 else 100
+ table_data.append(
+ [
+ key,
+ str(scores["passed"]),
+ str(scores["failed"]),
+ str(scores["manual"]),
+ f"{pct:.1f}%",
+ ]
+ )
+
+ table = Table(
+ table_data,
+ colWidths=[1.2 * inch, 0.9 * inch, 0.9 * inch, 0.9 * inch, 1.2 * inch],
+ )
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), COLOR_NIS2_PRIMARY),
+ ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
+ ("FONTSIZE", (0, 0), (-1, 0), 10),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("FONTSIZE", (0, 1), (-1, -1), 9),
+ ("GRID", (0, 0), (-1, -1), 0.5, COLOR_GRID_GRAY),
+ ("LEFTPADDING", (0, 0), (-1, -1), 6),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 6),
+ ("TOPPADDING", (0, 0), (-1, -1), 4),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 4),
+ (
+ "ROWBACKGROUNDS",
+ (0, 1),
+ (-1, -1),
+ [COLOR_WHITE, COLOR_NIS2_BG_BLUE],
+ ),
+ ]
+ )
+ )
+
+ return table
+
+ def _sort_section_key(self, key: str) -> tuple:
+ """Sort section keys numerically (e.g., 1, 1.1, 1.2, 2, 11)."""
+ parts = key.split(".")
+ result = []
+ for part in parts:
+ try:
+ result.append(int(part))
+ except ValueError:
+ result.append(float("inf"))
+ return tuple(result)
diff --git a/api/src/backend/tasks/jobs/reports/threatscore.py b/api/src/backend/tasks/jobs/reports/threatscore.py
new file mode 100644
index 0000000000..e23085b1c3
--- /dev/null
+++ b/api/src/backend/tasks/jobs/reports/threatscore.py
@@ -0,0 +1,509 @@
+import gc
+
+from reportlab.lib import colors
+from reportlab.lib.styles import ParagraphStyle
+from reportlab.lib.units import inch
+from reportlab.platypus import Image, PageBreak, Paragraph, Spacer, Table, TableStyle
+
+from api.models import StatusChoices
+
+from .base import (
+ BaseComplianceReportGenerator,
+ ComplianceData,
+ get_requirement_metadata,
+)
+from .charts import create_vertical_bar_chart, get_chart_color_for_percentage
+from .components import get_color_for_compliance, get_color_for_weight
+from .config import COLOR_HIGH_RISK, COLOR_WHITE
+
+
+class ThreatScoreReportGenerator(BaseComplianceReportGenerator):
+ """
+ PDF report generator for Prowler ThreatScore framework.
+
+ This generator creates comprehensive PDF reports containing:
+ - Compliance overview and metadata
+ - Section-by-section compliance scores with charts
+ - Overall ThreatScore calculation
+ - Critical failed requirements
+ - Detailed findings for each requirement
+ """
+
+ def create_executive_summary(self, data: ComplianceData) -> list:
+ """
+ Create the executive summary section with ThreatScore calculation.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+
+ elements.append(Paragraph("Compliance Score by Sections", self.styles["h1"]))
+ elements.append(Spacer(1, 0.2 * inch))
+
+ # Create section score chart
+ chart_buffer = self._create_section_score_chart(data)
+ chart_image = Image(chart_buffer, width=7 * inch, height=5.5 * inch)
+ elements.append(chart_image)
+
+ # Calculate overall ThreatScore
+ overall_compliance = self._calculate_threatscore(data)
+
+ elements.append(Spacer(1, 0.3 * inch))
+
+ # Summary table
+ summary_data = [["ThreatScore:", f"{overall_compliance:.2f}%"]]
+ compliance_color = get_color_for_compliance(overall_compliance)
+
+ summary_table = Table(summary_data, colWidths=[2.5 * inch, 2 * inch])
+ summary_table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (0, 0), colors.Color(0.1, 0.3, 0.5)),
+ ("TEXTCOLOR", (0, 0), (0, 0), colors.white),
+ ("FONTNAME", (0, 0), (0, 0), "FiraCode"),
+ ("FONTSIZE", (0, 0), (0, 0), 12),
+ ("BACKGROUND", (1, 0), (1, 0), compliance_color),
+ ("TEXTCOLOR", (1, 0), (1, 0), colors.white),
+ ("FONTNAME", (1, 0), (1, 0), "FiraCode"),
+ ("FONTSIZE", (1, 0), (1, 0), 16),
+ ("ALIGN", (0, 0), (-1, -1), "CENTER"),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("GRID", (0, 0), (-1, -1), 1.5, colors.Color(0.5, 0.6, 0.7)),
+ ("LEFTPADDING", (0, 0), (-1, -1), 12),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 12),
+ ("TOPPADDING", (0, 0), (-1, -1), 10),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 10),
+ ]
+ )
+ )
+
+ elements.append(summary_table)
+
+ return elements
+
+ def _build_body_sections(self, data: ComplianceData) -> list:
+ """Override section order: Requirements Index before Critical Requirements."""
+ elements = []
+
+ # Page break to separate from executive summary
+ elements.append(PageBreak())
+
+ # Requirements index first
+ elements.extend(self.create_requirements_index(data))
+
+ # Critical requirements section (already starts with PageBreak internally)
+ elements.extend(self.create_charts_section(data))
+ elements.append(PageBreak())
+ gc.collect()
+
+ return elements
+
+ def create_charts_section(self, data: ComplianceData) -> list:
+ """
+ Create the critical failed requirements section.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+ min_risk_level = getattr(self, "_min_risk_level", 4)
+
+ # Start on a new page
+ elements.append(PageBreak())
+ elements.append(
+ Paragraph("Top Requirements by Level of Risk", self.styles["h1"])
+ )
+ elements.append(Spacer(1, 0.1 * inch))
+ elements.append(
+ Paragraph(
+ f"Critical Failed Requirements (Risk Level ≥ {min_risk_level})",
+ self.styles["h2"],
+ )
+ )
+ elements.append(Spacer(1, 0.2 * inch))
+
+ critical_failed = self._get_critical_failed_requirements(data, min_risk_level)
+
+ if not critical_failed:
+ elements.append(
+ Paragraph(
+ "✅ No critical failed requirements found. Great job!",
+ self.styles["normal"],
+ )
+ )
+ else:
+ elements.append(
+ Paragraph(
+ f"Found {len(critical_failed)} critical failed requirements "
+ "that require immediate attention:",
+ self.styles["normal"],
+ )
+ )
+ elements.append(Spacer(1, 0.5 * inch))
+
+ table = self._create_critical_requirements_table(critical_failed)
+ elements.append(table)
+
+ # Immediate action required banner
+ elements.append(Spacer(1, 0.3 * inch))
+ elements.append(self._create_action_required_banner())
+
+ return elements
+
+ def create_requirements_index(self, data: ComplianceData) -> list:
+ """
+ Create the requirements index organized by section and subsection.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ List of ReportLab elements.
+ """
+ elements = []
+
+ elements.append(Paragraph("Requirements Index", self.styles["h1"]))
+
+ # Organize requirements by section and subsection
+ sections = {}
+ for req_id in data.attributes_by_requirement_id:
+ m = get_requirement_metadata(req_id, data.attributes_by_requirement_id)
+ if m:
+ section = getattr(m, "Section", "N/A")
+ subsection = getattr(m, "SubSection", "N/A")
+ title = getattr(m, "Title", "N/A")
+
+ if section not in sections:
+ sections[section] = {}
+ if subsection not in sections[section]:
+ sections[section][subsection] = []
+
+ sections[section][subsection].append({"id": req_id, "title": title})
+
+ section_num = 1
+ for section_name, subsections in sections.items():
+ elements.append(
+ Paragraph(f"{section_num}. {section_name}", self.styles["h2"])
+ )
+
+ for subsection_name, requirements in subsections.items():
+ elements.append(Paragraph(f"{subsection_name}", self.styles["h3"]))
+
+ for req in requirements:
+ elements.append(
+ Paragraph(
+ f"{req['id']} - {req['title']}", self.styles["normal"]
+ )
+ )
+
+ section_num += 1
+ elements.append(Spacer(1, 0.1 * inch))
+
+ return elements
+
+ def _create_section_score_chart(self, data: ComplianceData):
+ """
+ Create the section compliance score chart using weighted ThreatScore formula.
+
+ The section score uses the same weighted formula as the overall ThreatScore:
+ Score = Σ(rate_i * total_findings_i * weight_i * rfac_i) / Σ(total_findings_i * weight_i * rfac_i)
+ Where rfac_i = 1 + 0.25 * risk_level
+
+ Sections without findings are shown with 100% score.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ BytesIO buffer containing the chart image.
+ """
+ # First, collect ALL sections from requirements (including those without findings)
+ all_sections = set()
+ sections_data = {}
+
+ for req in data.requirements:
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+ if m:
+ section = getattr(m, "Section", "Other")
+ all_sections.add(section)
+
+ # Only calculate scores for requirements with findings
+ if req.total_findings == 0:
+ continue
+
+ risk_level_raw = getattr(m, "LevelOfRisk", 0)
+ weight_raw = getattr(m, "Weight", 0)
+ # Ensure numeric types for calculations (compliance data may have str)
+ try:
+ risk_level = int(risk_level_raw) if risk_level_raw else 0
+ except (ValueError, TypeError):
+ risk_level = 0
+ try:
+ weight = int(weight_raw) if weight_raw else 0
+ except (ValueError, TypeError):
+ weight = 0
+
+ # ThreatScore formula components
+ rate_i = req.passed_findings / req.total_findings
+ rfac_i = 1 + 0.25 * risk_level
+
+ if section not in sections_data:
+ sections_data[section] = {
+ "numerator": 0,
+ "denominator": 0,
+ }
+
+ sections_data[section]["numerator"] += (
+ rate_i * req.total_findings * weight * rfac_i
+ )
+ sections_data[section]["denominator"] += (
+ req.total_findings * weight * rfac_i
+ )
+
+ # Calculate percentages for all sections
+ labels = []
+ values = []
+ for section in sorted(all_sections):
+ if section in sections_data and sections_data[section]["denominator"] > 0:
+ pct = (
+ sections_data[section]["numerator"]
+ / sections_data[section]["denominator"]
+ ) * 100
+ else:
+ # Sections without findings get 100%
+ pct = 100.0
+ labels.append(section)
+ values.append(pct)
+
+ return create_vertical_bar_chart(
+ labels=labels,
+ values=values,
+ ylabel="Compliance Score (%)",
+ xlabel="",
+ color_func=get_chart_color_for_percentage,
+ rotation=0,
+ )
+
+ def _calculate_threatscore(self, data: ComplianceData) -> float:
+ """
+ Calculate the overall ThreatScore using the weighted formula.
+
+ Args:
+ data: Aggregated compliance data.
+
+ Returns:
+ Overall ThreatScore percentage.
+ """
+ numerator = 0
+ denominator = 0
+ has_findings = False
+
+ for req in data.requirements:
+ if req.total_findings == 0:
+ continue
+
+ has_findings = True
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+
+ if m:
+ risk_level_raw = getattr(m, "LevelOfRisk", 0)
+ weight_raw = getattr(m, "Weight", 0)
+ # Ensure numeric types for calculations (compliance data may have str)
+ try:
+ risk_level = int(risk_level_raw) if risk_level_raw else 0
+ except (ValueError, TypeError):
+ risk_level = 0
+ try:
+ weight = int(weight_raw) if weight_raw else 0
+ except (ValueError, TypeError):
+ weight = 0
+
+ rate_i = req.passed_findings / req.total_findings
+ rfac_i = 1 + 0.25 * risk_level
+
+ numerator += rate_i * req.total_findings * weight * rfac_i
+ denominator += req.total_findings * weight * rfac_i
+
+ if not has_findings:
+ return 100.0
+ if denominator > 0:
+ return (numerator / denominator) * 100
+ return 0.0
+
+ def _get_critical_failed_requirements(
+ self, data: ComplianceData, min_risk_level: int
+ ) -> list[dict]:
+ """
+ Get critical failed requirements sorted by risk level and weight.
+
+ Args:
+ data: Aggregated compliance data.
+ min_risk_level: Minimum risk level threshold.
+
+ Returns:
+ List of critical failed requirement dictionaries.
+ """
+ critical = []
+
+ for req in data.requirements:
+ if req.status != StatusChoices.FAIL:
+ continue
+
+ m = get_requirement_metadata(req.id, data.attributes_by_requirement_id)
+
+ if m:
+ risk_level_raw = getattr(m, "LevelOfRisk", 0)
+ weight_raw = getattr(m, "Weight", 0)
+ # Ensure numeric types for calculations (compliance data may have str)
+ try:
+ risk_level = int(risk_level_raw) if risk_level_raw else 0
+ except (ValueError, TypeError):
+ risk_level = 0
+ try:
+ weight = int(weight_raw) if weight_raw else 0
+ except (ValueError, TypeError):
+ weight = 0
+
+ if risk_level >= min_risk_level:
+ critical.append(
+ {
+ "id": req.id,
+ "risk_level": risk_level,
+ "weight": weight,
+ "title": getattr(m, "Title", "N/A"),
+ "section": getattr(m, "Section", "N/A"),
+ }
+ )
+
+ critical.sort(key=lambda x: (x["risk_level"], x["weight"]), reverse=True)
+ return critical
+
+ def _create_critical_requirements_table(self, critical_requirements: list) -> Table:
+ """
+ Create the critical requirements table.
+
+ Args:
+ critical_requirements: List of critical requirement dictionaries.
+
+ Returns:
+ ReportLab Table element.
+ """
+ table_data = [["Risk", "Weight", "Requirement ID", "Title", "Section"]]
+
+ for req in critical_requirements:
+ title = req["title"]
+ if len(title) > 50:
+ title = title[:47] + "..."
+
+ table_data.append(
+ [
+ str(req["risk_level"]),
+ str(req["weight"]),
+ req["id"],
+ title,
+ req["section"],
+ ]
+ )
+
+ table = Table(
+ table_data,
+ colWidths=[0.7 * inch, 0.9 * inch, 1.3 * inch, 3.1 * inch, 1.5 * inch],
+ )
+
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (0, 0), (-1, 0), COLOR_HIGH_RISK),
+ ("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
+ ("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
+ ("FONTSIZE", (0, 0), (-1, 0), 10),
+ ("BACKGROUND", (0, 1), (0, -1), COLOR_HIGH_RISK),
+ ("TEXTCOLOR", (0, 1), (0, -1), COLOR_WHITE),
+ ("FONTNAME", (0, 1), (0, -1), "FiraCode"),
+ ("ALIGN", (0, 1), (0, -1), "CENTER"),
+ ("FONTSIZE", (0, 1), (0, -1), 12),
+ ("ALIGN", (1, 1), (1, -1), "CENTER"),
+ ("FONTNAME", (1, 1), (1, -1), "FiraCode"),
+ ("FONTNAME", (2, 1), (2, -1), "FiraCode"),
+ ("FONTSIZE", (2, 1), (2, -1), 9),
+ ("FONTNAME", (3, 1), (-1, -1), "PlusJakartaSans"),
+ ("FONTSIZE", (3, 1), (-1, -1), 8),
+ ("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
+ ("GRID", (0, 0), (-1, -1), 1, colors.Color(0.7, 0.7, 0.7)),
+ ("LEFTPADDING", (0, 0), (-1, -1), 6),
+ ("RIGHTPADDING", (0, 0), (-1, -1), 6),
+ ("TOPPADDING", (0, 0), (-1, -1), 8),
+ ("BOTTOMPADDING", (0, 0), (-1, -1), 8),
+ ("BACKGROUND", (1, 1), (-1, -1), colors.Color(0.98, 0.98, 0.98)),
+ ]
+ )
+ )
+
+ # Color weight column based on value
+ for idx, req in enumerate(critical_requirements):
+ row_idx = idx + 1
+ weight_color = get_color_for_weight(req["weight"])
+ table.setStyle(
+ TableStyle(
+ [
+ ("BACKGROUND", (1, row_idx), (1, row_idx), weight_color),
+ ("TEXTCOLOR", (1, row_idx), (1, row_idx), COLOR_WHITE),
+ ]
+ )
+ )
+
+ return table
+
+ def _create_action_required_banner(self) -> Table:
+ """
+ Create the 'Immediate Action Required' banner for critical requirements.
+
+ Returns:
+ ReportLab Table element styled as a red-bordered alert banner.
+ """
+ banner_style = ParagraphStyle(
+ "ActionRequired",
+ fontName="PlusJakartaSans",
+ fontSize=11,
+ textColor=COLOR_HIGH_RISK,
+ leading=16,
+ )
+
+ banner_content = Paragraph(
+ "IMMEDIATE ACTION REQUIRED:
"
+ "These requirements have the highest risk levels and have failed "
+ "compliance checks. Please prioritize addressing these issues to "
+ "improve your security posture.",
+ banner_style,
+ )
+
+ banner_table = Table(
+ [[banner_content]],
+ colWidths=[6.5 * inch],
+ )
+ banner_table.setStyle(
+ TableStyle(
+ [
+ (
+ "BACKGROUND",
+ (0, 0),
+ (0, 0),
+ colors.Color(0.98, 0.92, 0.92),
+ ),
+ ("BOX", (0, 0), (0, 0), 2, COLOR_HIGH_RISK),
+ ("LEFTPADDING", (0, 0), (0, 0), 20),
+ ("RIGHTPADDING", (0, 0), (0, 0), 20),
+ ("TOPPADDING", (0, 0), (0, 0), 15),
+ ("BOTTOMPADDING", (0, 0), (0, 0), 15),
+ ]
+ )
+ )
+
+ return banner_table
diff --git a/api/src/backend/tasks/jobs/threatscore.py b/api/src/backend/tasks/jobs/threatscore.py
index 414f2d20f2..a9a7516e55 100644
--- a/api/src/backend/tasks/jobs/threatscore.py
+++ b/api/src/backend/tasks/jobs/threatscore.py
@@ -131,9 +131,11 @@ def compute_threatscore_metrics(
continue
m = metadata[0]
- risk_level = getattr(m, "LevelOfRisk", 0)
- weight = getattr(m, "Weight", 0)
+ risk_level_raw = getattr(m, "LevelOfRisk", 0)
+ weight_raw = getattr(m, "Weight", 0)
section = getattr(m, "Section", "Unknown")
+ risk_level = int(risk_level_raw) if risk_level_raw else 0
+ weight = int(weight_raw) if weight_raw else 0
# Calculate ThreatScore components using formula from UI
rate_i = req_passed_findings / req_total_findings
diff --git a/api/src/backend/tasks/jobs/threatscore_utils.py b/api/src/backend/tasks/jobs/threatscore_utils.py
index 78adb7842b..c46c279bf4 100644
--- a/api/src/backend/tasks/jobs/threatscore_utils.py
+++ b/api/src/backend/tasks/jobs/threatscore_utils.py
@@ -1,9 +1,6 @@
-from collections import defaultdict
-
from celery.utils.log import get_task_logger
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE
from django.db.models import Count, Q
-from tasks.utils import batched
from api.db_router import READ_REPLICA_ALIAS
from api.db_utils import rls_transaction
@@ -154,6 +151,12 @@ def _load_findings_for_requirement_checks(
Supports optional caching to avoid duplicate queries when generating multiple
reports for the same scan.
+ Memory optimizations:
+ - Uses database iterator with chunk_size for streaming large result sets
+ - Shares references between cache and return dict (no duplication)
+ - Only selects required fields from database
+ - Processes findings in batches to reduce memory pressure
+
Args:
tenant_id (str): The tenant ID for Row-Level Security context.
scan_id (str): The ID of the scan to retrieve findings for.
@@ -171,69 +174,73 @@ def _load_findings_for_requirement_checks(
'aws_s3_bucket_public_access': [FindingOutput(...)]
}
"""
- findings_by_check_id = defaultdict(list)
-
if not check_ids:
- return dict(findings_by_check_id)
+ return {}
# Initialize cache if not provided
if findings_cache is None:
findings_cache = {}
+ # Deduplicate check_ids to avoid redundant processing
+ unique_check_ids = list(set(check_ids))
+
# Separate cached and non-cached check_ids
check_ids_to_load = []
cache_hits = 0
- cache_misses = 0
- for check_id in check_ids:
+ for check_id in unique_check_ids:
if check_id in findings_cache:
- # Reuse from cache
- findings_by_check_id[check_id] = findings_cache[check_id]
cache_hits += 1
else:
- # Need to load from database
check_ids_to_load.append(check_id)
- cache_misses += 1
if cache_hits > 0:
+ total_checks = len(unique_check_ids)
logger.info(
- f"Findings cache: {cache_hits} hits, {cache_misses} misses "
- f"({cache_hits / (cache_hits + cache_misses) * 100:.1f}% hit rate)"
+ f"Findings cache: {cache_hits}/{total_checks} hits "
+ f"({cache_hits / total_checks * 100:.1f}% hit rate)"
)
- # If all check_ids were in cache, return early
- if not check_ids_to_load:
- return dict(findings_by_check_id)
-
- logger.info(f"Loading findings for {len(check_ids_to_load)} checks on-demand")
-
- findings_queryset = (
- Finding.all_objects.filter(
- tenant_id=tenant_id, scan_id=scan_id, check_id__in=check_ids_to_load
+ # Load missing check_ids from database
+ if check_ids_to_load:
+ logger.info(
+ f"Loading findings for {len(check_ids_to_load)} checks from database"
)
- .order_by("uid")
- .iterator()
- )
- with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
- for batch, is_last_batch in batched(
- findings_queryset, DJANGO_FINDINGS_BATCH_SIZE
- ):
- for finding_model in batch:
+ with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
+ # Use iterator with chunk_size for memory-efficient streaming
+ # chunk_size controls how many rows Django fetches from DB at once
+ findings_queryset = (
+ Finding.all_objects.filter(
+ tenant_id=tenant_id,
+ scan_id=scan_id,
+ check_id__in=check_ids_to_load,
+ )
+ .order_by("check_id", "uid")
+ .iterator(chunk_size=DJANGO_FINDINGS_BATCH_SIZE)
+ )
+
+ # Pre-initialize empty lists for all check_ids to load
+ # This avoids repeated dict lookups and 'if not in' checks
+ for check_id in check_ids_to_load:
+ findings_cache[check_id] = []
+
+ findings_count = 0
+ for finding_model in findings_queryset:
finding_output = FindingOutput.transform_api_finding(
finding_model, prowler_provider
)
- findings_by_check_id[finding_output.check_id].append(finding_output)
- # Update cache with newly loaded findings
- if finding_output.check_id not in findings_cache:
- findings_cache[finding_output.check_id] = []
findings_cache[finding_output.check_id].append(finding_output)
+ findings_count += 1
- total_findings_loaded = sum(
- len(findings) for findings in findings_by_check_id.values()
- )
- logger.info(
- f"Loaded {total_findings_loaded} findings for {len(findings_by_check_id)} checks"
- )
+ logger.info(
+ f"Loaded {findings_count} findings for {len(check_ids_to_load)} checks"
+ )
- return dict(findings_by_check_id)
+ # Build result dict using cache references (no data duplication)
+ # This shares the same list objects between cache and result
+ result = {
+ check_id: findings_cache.get(check_id, []) for check_id in unique_check_ids
+ }
+
+ return result
diff --git a/api/src/backend/tasks/tests/test_report.py b/api/src/backend/tasks/tests/test_report.py
deleted file mode 100644
index 16dd19e0ab..0000000000
--- a/api/src/backend/tasks/tests/test_report.py
+++ /dev/null
@@ -1,1807 +0,0 @@
-import io
-import uuid
-from unittest.mock import MagicMock, Mock, patch
-
-import matplotlib
-import pytest
-from reportlab.lib import colors
-from reportlab.platypus import Table, TableStyle
-from tasks.jobs.report import (
- CHART_COLOR_GREEN_1,
- CHART_COLOR_GREEN_2,
- CHART_COLOR_ORANGE,
- CHART_COLOR_RED,
- CHART_COLOR_YELLOW,
- COLOR_BLUE,
- COLOR_ENS_ALTO,
- COLOR_ENS_BAJO,
- COLOR_ENS_MEDIO,
- COLOR_ENS_OPCIONAL,
- COLOR_HIGH_RISK,
- COLOR_LOW_RISK,
- COLOR_MEDIUM_RISK,
- COLOR_NIS2_PRIMARY,
- COLOR_SAFE,
- _create_dimensions_radar_chart,
- _create_ens_dimension_badges,
- _create_ens_nivel_badge,
- _create_ens_tipo_badge,
- _create_findings_table_style,
- _create_header_table_style,
- _create_info_table_style,
- _create_marco_category_chart,
- _create_nis2_requirements_index,
- _create_nis2_section_chart,
- _create_nis2_subsection_table,
- _create_pdf_styles,
- _create_risk_component,
- _create_section_score_chart,
- _create_status_component,
- _get_chart_color_for_percentage,
- _get_color_for_compliance,
- _get_color_for_risk_level,
- _get_color_for_weight,
- _get_ens_nivel_color,
- _load_findings_for_requirement_checks,
- _safe_getattr,
- generate_compliance_reports_job,
- generate_nis2_report,
- generate_threatscore_report,
-)
-from tasks.jobs.threatscore_utils import (
- _aggregate_requirement_statistics_from_database,
- _calculate_requirements_data_from_statistics,
-)
-
-from api.models import Finding, StatusChoices
-from prowler.lib.check.models import Severity
-
-matplotlib.use("Agg") # Use non-interactive backend for tests
-
-
-@pytest.mark.django_db
-class TestAggregateRequirementStatistics:
- """Test suite for _aggregate_requirement_statistics_from_database function."""
-
- def test_aggregates_findings_correctly(self, tenants_fixture, scans_fixture):
- """Verify correct pass/total counts per check are aggregated from database."""
- tenant = tenants_fixture[0]
- scan = scans_fixture[0]
-
- # Create findings with different check_ids and statuses
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid="finding-1",
- check_id="check_1",
- status=StatusChoices.PASS,
- severity=Severity.high,
- impact=Severity.high,
- check_metadata={},
- raw_result={},
- )
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid="finding-2",
- check_id="check_1",
- status=StatusChoices.FAIL,
- severity=Severity.high,
- impact=Severity.high,
- check_metadata={},
- raw_result={},
- )
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid="finding-3",
- check_id="check_2",
- status=StatusChoices.PASS,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
-
- result = _aggregate_requirement_statistics_from_database(
- str(tenant.id), str(scan.id)
- )
-
- assert result == {
- "check_1": {"passed": 1, "total": 2},
- "check_2": {"passed": 1, "total": 1},
- }
-
- def test_handles_empty_scan(self, tenants_fixture, scans_fixture):
- """Return empty dict when no findings exist for the scan."""
- tenant = tenants_fixture[0]
- scan = scans_fixture[0]
-
- result = _aggregate_requirement_statistics_from_database(
- str(tenant.id), str(scan.id)
- )
-
- assert result == {}
-
- def test_multiple_findings_same_check(self, tenants_fixture, scans_fixture):
- """Aggregate multiple findings for same check_id correctly."""
- tenant = tenants_fixture[0]
- scan = scans_fixture[0]
-
- # Create 5 findings for same check, 3 passed
- for i in range(3):
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid=f"finding-pass-{i}",
- check_id="check_same",
- status=StatusChoices.PASS,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
-
- for i in range(2):
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid=f"finding-fail-{i}",
- check_id="check_same",
- status=StatusChoices.FAIL,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
-
- result = _aggregate_requirement_statistics_from_database(
- str(tenant.id), str(scan.id)
- )
-
- assert result == {"check_same": {"passed": 3, "total": 5}}
-
- def test_only_failed_findings(self, tenants_fixture, scans_fixture):
- """Correctly count when all findings are FAIL status."""
- tenant = tenants_fixture[0]
- scan = scans_fixture[0]
-
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid="finding-fail-1",
- check_id="check_fail",
- status=StatusChoices.FAIL,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid="finding-fail-2",
- check_id="check_fail",
- status=StatusChoices.FAIL,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
-
- result = _aggregate_requirement_statistics_from_database(
- str(tenant.id), str(scan.id)
- )
-
- assert result == {"check_fail": {"passed": 0, "total": 2}}
-
- def test_mixed_statuses(self, tenants_fixture, scans_fixture):
- """Test with PASS, FAIL, and MANUAL statuses mixed."""
- tenant = tenants_fixture[0]
- scan = scans_fixture[0]
-
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid="finding-pass",
- check_id="check_mixed",
- status=StatusChoices.PASS,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid="finding-fail",
- check_id="check_mixed",
- status=StatusChoices.FAIL,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid="finding-manual",
- check_id="check_mixed",
- status=StatusChoices.MANUAL,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
-
- result = _aggregate_requirement_statistics_from_database(
- str(tenant.id), str(scan.id)
- )
-
- # Only PASS status is counted as passed, MANUAL findings are excluded from total
- assert result == {"check_mixed": {"passed": 1, "total": 2}}
-
-
-@pytest.mark.django_db
-class TestLoadFindingsForChecks:
- """Test suite for _load_findings_for_requirement_checks function."""
-
- def test_loads_only_requested_checks(
- self, tenants_fixture, scans_fixture, providers_fixture
- ):
- """Verify only findings for specified check_ids are loaded."""
- tenant = tenants_fixture[0]
- scan = scans_fixture[0]
- providers_fixture[0]
-
- # Create findings with different check_ids
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid="finding-1",
- check_id="check_requested",
- status=StatusChoices.PASS,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid="finding-2",
- check_id="check_not_requested",
- status=StatusChoices.FAIL,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
-
- mock_provider = MagicMock()
-
- with patch(
- "tasks.jobs.threatscore_utils.FindingOutput.transform_api_finding"
- ) as mock_transform:
- mock_finding_output = MagicMock()
- mock_finding_output.check_id = "check_requested"
- mock_transform.return_value = mock_finding_output
-
- result = _load_findings_for_requirement_checks(
- str(tenant.id), str(scan.id), ["check_requested"], mock_provider
- )
-
- # Only one finding should be loaded
- assert "check_requested" in result
- assert "check_not_requested" not in result
- assert len(result["check_requested"]) == 1
- assert mock_transform.call_count == 1
-
- def test_empty_check_ids_returns_empty(
- self, tenants_fixture, scans_fixture, providers_fixture
- ):
- """Return empty dict when check_ids list is empty."""
- tenant = tenants_fixture[0]
- scan = scans_fixture[0]
- mock_provider = MagicMock()
-
- result = _load_findings_for_requirement_checks(
- str(tenant.id), str(scan.id), [], mock_provider
- )
-
- assert result == {}
-
- def test_groups_by_check_id(
- self, tenants_fixture, scans_fixture, providers_fixture
- ):
- """Multiple findings for same check are grouped correctly."""
- tenant = tenants_fixture[0]
- scan = scans_fixture[0]
-
- # Create multiple findings for same check
- for i in range(3):
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid=f"finding-{i}",
- check_id="check_group",
- status=StatusChoices.PASS,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
-
- mock_provider = MagicMock()
-
- with patch(
- "tasks.jobs.threatscore_utils.FindingOutput.transform_api_finding"
- ) as mock_transform:
- mock_finding_output = MagicMock()
- mock_finding_output.check_id = "check_group"
- mock_transform.return_value = mock_finding_output
-
- result = _load_findings_for_requirement_checks(
- str(tenant.id), str(scan.id), ["check_group"], mock_provider
- )
-
- assert len(result["check_group"]) == 3
-
- def test_transforms_to_finding_output(
- self, tenants_fixture, scans_fixture, providers_fixture
- ):
- """Findings are transformed using FindingOutput.transform_api_finding."""
- tenant = tenants_fixture[0]
- scan = scans_fixture[0]
-
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid="finding-transform",
- check_id="check_transform",
- status=StatusChoices.PASS,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
-
- mock_provider = MagicMock()
-
- with patch(
- "tasks.jobs.threatscore_utils.FindingOutput.transform_api_finding"
- ) as mock_transform:
- mock_finding_output = MagicMock()
- mock_finding_output.check_id = "check_transform"
- mock_transform.return_value = mock_finding_output
-
- result = _load_findings_for_requirement_checks(
- str(tenant.id), str(scan.id), ["check_transform"], mock_provider
- )
-
- # Verify transform was called
- mock_transform.assert_called_once()
- # Verify the transformed output is in the result
- assert result["check_transform"][0] == mock_finding_output
-
- def test_batched_iteration(self, tenants_fixture, scans_fixture, providers_fixture):
- """Works correctly with multiple batches of findings."""
- tenant = tenants_fixture[0]
- scan = scans_fixture[0]
-
- # Create enough findings to ensure batching (assuming batch size > 1)
- for i in range(10):
- Finding.objects.create(
- tenant_id=tenant.id,
- scan=scan,
- uid=f"finding-batch-{i}",
- check_id="check_batch",
- status=StatusChoices.PASS,
- severity=Severity.medium,
- impact=Severity.medium,
- check_metadata={},
- raw_result={},
- )
-
- mock_provider = MagicMock()
-
- with patch(
- "tasks.jobs.threatscore_utils.FindingOutput.transform_api_finding"
- ) as mock_transform:
- mock_finding_output = MagicMock()
- mock_finding_output.check_id = "check_batch"
- mock_transform.return_value = mock_finding_output
-
- result = _load_findings_for_requirement_checks(
- str(tenant.id), str(scan.id), ["check_batch"], mock_provider
- )
-
- # All 10 findings should be loaded regardless of batching
- assert len(result["check_batch"]) == 10
- assert mock_transform.call_count == 10
-
-
-@pytest.mark.django_db
-class TestCalculateRequirementsData:
- """Test suite for _calculate_requirements_data_from_statistics function."""
-
- def test_requirement_status_all_pass(self):
- """Status is PASS when all findings for requirement checks pass."""
- mock_compliance = MagicMock()
- mock_compliance.Framework = "TestFramework"
- mock_compliance.Version = "1.0"
-
- mock_requirement = MagicMock()
- mock_requirement.Id = "req_1"
- mock_requirement.Description = "Test requirement"
- mock_requirement.Checks = ["check_1", "check_2"]
- mock_requirement.Attributes = [MagicMock()]
-
- mock_compliance.Requirements = [mock_requirement]
-
- requirement_statistics = {
- "check_1": {"passed": 5, "total": 5},
- "check_2": {"passed": 3, "total": 3},
- }
-
- attributes_by_id, requirements_list = (
- _calculate_requirements_data_from_statistics(
- mock_compliance, requirement_statistics
- )
- )
-
- assert len(requirements_list) == 1
- assert requirements_list[0]["attributes"]["status"] == StatusChoices.PASS
- assert requirements_list[0]["attributes"]["passed_findings"] == 8
- assert requirements_list[0]["attributes"]["total_findings"] == 8
-
- def test_requirement_status_some_fail(self):
- """Status is FAIL when some findings fail."""
- mock_compliance = MagicMock()
- mock_compliance.Framework = "TestFramework"
- mock_compliance.Version = "1.0"
-
- mock_requirement = MagicMock()
- mock_requirement.Id = "req_2"
- mock_requirement.Description = "Test requirement with failures"
- mock_requirement.Checks = ["check_3"]
- mock_requirement.Attributes = [MagicMock()]
-
- mock_compliance.Requirements = [mock_requirement]
-
- requirement_statistics = {
- "check_3": {"passed": 2, "total": 5},
- }
-
- attributes_by_id, requirements_list = (
- _calculate_requirements_data_from_statistics(
- mock_compliance, requirement_statistics
- )
- )
-
- assert len(requirements_list) == 1
- assert requirements_list[0]["attributes"]["status"] == StatusChoices.FAIL
- assert requirements_list[0]["attributes"]["passed_findings"] == 2
- assert requirements_list[0]["attributes"]["total_findings"] == 5
-
- def test_requirement_status_no_findings(self):
- """Status is MANUAL when no findings exist for requirement."""
- mock_compliance = MagicMock()
- mock_compliance.Framework = "TestFramework"
- mock_compliance.Version = "1.0"
-
- mock_requirement = MagicMock()
- mock_requirement.Id = "req_3"
- mock_requirement.Description = "Manual requirement"
- mock_requirement.Checks = ["check_nonexistent"]
- mock_requirement.Attributes = [MagicMock()]
-
- mock_compliance.Requirements = [mock_requirement]
-
- requirement_statistics = {}
-
- attributes_by_id, requirements_list = (
- _calculate_requirements_data_from_statistics(
- mock_compliance, requirement_statistics
- )
- )
-
- assert len(requirements_list) == 1
- assert requirements_list[0]["attributes"]["status"] == StatusChoices.MANUAL
- assert requirements_list[0]["attributes"]["passed_findings"] == 0
- assert requirements_list[0]["attributes"]["total_findings"] == 0
-
- def test_aggregates_multiple_checks(self):
- """Correctly sum stats across multiple checks in requirement."""
- mock_compliance = MagicMock()
- mock_compliance.Framework = "TestFramework"
- mock_compliance.Version = "1.0"
-
- mock_requirement = MagicMock()
- mock_requirement.Id = "req_4"
- mock_requirement.Description = "Multi-check requirement"
- mock_requirement.Checks = ["check_a", "check_b", "check_c"]
- mock_requirement.Attributes = [MagicMock()]
-
- mock_compliance.Requirements = [mock_requirement]
-
- requirement_statistics = {
- "check_a": {"passed": 10, "total": 15},
- "check_b": {"passed": 5, "total": 10},
- "check_c": {"passed": 0, "total": 5},
- }
-
- attributes_by_id, requirements_list = (
- _calculate_requirements_data_from_statistics(
- mock_compliance, requirement_statistics
- )
- )
-
- assert len(requirements_list) == 1
- # 10 + 5 + 0 = 15 passed
- assert requirements_list[0]["attributes"]["passed_findings"] == 15
- # 15 + 10 + 5 = 30 total
- assert requirements_list[0]["attributes"]["total_findings"] == 30
- # Not all passed, so should be FAIL
- assert requirements_list[0]["attributes"]["status"] == StatusChoices.FAIL
-
- def test_returns_correct_structure(self):
- """Verify tuple structure and dict keys are correct."""
- mock_compliance = MagicMock()
- mock_compliance.Framework = "TestFramework"
- mock_compliance.Version = "1.0"
-
- mock_attribute = MagicMock()
- mock_requirement = MagicMock()
- mock_requirement.Id = "req_5"
- mock_requirement.Description = "Structure test"
- mock_requirement.Checks = ["check_struct"]
- mock_requirement.Attributes = [mock_attribute]
-
- mock_compliance.Requirements = [mock_requirement]
-
- requirement_statistics = {"check_struct": {"passed": 1, "total": 1}}
-
- attributes_by_id, requirements_list = (
- _calculate_requirements_data_from_statistics(
- mock_compliance, requirement_statistics
- )
- )
-
- # Verify attributes_by_id structure
- assert "req_5" in attributes_by_id
- assert "attributes" in attributes_by_id["req_5"]
- assert "description" in attributes_by_id["req_5"]
- assert "req_attributes" in attributes_by_id["req_5"]["attributes"]
- assert "checks" in attributes_by_id["req_5"]["attributes"]
-
- # Verify requirements_list structure
- assert len(requirements_list) == 1
- req = requirements_list[0]
- assert "id" in req
- assert "attributes" in req
- assert "framework" in req["attributes"]
- assert "version" in req["attributes"]
- assert "status" in req["attributes"]
- assert "description" in req["attributes"]
- assert "passed_findings" in req["attributes"]
- assert "total_findings" in req["attributes"]
-
-
-@pytest.mark.django_db
-class TestGenerateThreatscoreReportFunction:
- def setup_method(self):
- self.scan_id = str(uuid.uuid4())
- self.provider_id = str(uuid.uuid4())
- self.tenant_id = str(uuid.uuid4())
- self.compliance_id = "prowler_threatscore_aws"
- self.output_path = "/tmp/test_threatscore_report.pdf"
-
- @patch("tasks.jobs.report.initialize_prowler_provider")
- @patch("tasks.jobs.report.Provider.objects.get")
- @patch("tasks.jobs.report.Compliance.get_bulk")
- @patch("tasks.jobs.report._aggregate_requirement_statistics_from_database")
- @patch("tasks.jobs.report._calculate_requirements_data_from_statistics")
- @patch("tasks.jobs.report._load_findings_for_requirement_checks")
- @patch("tasks.jobs.report.SimpleDocTemplate")
- @patch("tasks.jobs.report.Image")
- @patch("tasks.jobs.report.Spacer")
- @patch("tasks.jobs.report.Paragraph")
- @patch("tasks.jobs.report.PageBreak")
- @patch("tasks.jobs.report.Table")
- @patch("tasks.jobs.report.TableStyle")
- @patch("tasks.jobs.report.plt.subplots")
- @patch("tasks.jobs.report.plt.savefig")
- @patch("tasks.jobs.report.io.BytesIO")
- def test_generate_threatscore_report_success(
- self,
- mock_bytesio,
- mock_savefig,
- mock_subplots,
- mock_table_style,
- mock_table,
- mock_page_break,
- mock_paragraph,
- mock_spacer,
- mock_image,
- mock_doc_template,
- mock_load_findings,
- mock_calculate_requirements,
- mock_aggregate_statistics,
- mock_compliance_get_bulk,
- mock_provider_get,
- mock_initialize_provider,
- ):
- """Test the updated generate_threatscore_report using new memory-efficient architecture."""
- mock_provider = MagicMock()
- mock_provider.provider = "aws"
- mock_provider_get.return_value = mock_provider
-
- prowler_provider = MagicMock()
- mock_initialize_provider.return_value = prowler_provider
-
- # Mock compliance object with requirements
- mock_compliance_obj = MagicMock()
- mock_compliance_obj.Framework = "ProwlerThreatScore"
- mock_compliance_obj.Version = "1.0"
- mock_compliance_obj.Description = "Test Description"
-
- # Configure requirement with properly set numeric attributes for chart generation
- mock_requirement = MagicMock()
- mock_requirement.Id = "req_1"
- mock_requirement.Description = "Test requirement"
- mock_requirement.Checks = ["check_1"]
-
- # Create a properly configured attribute mock with numeric values
- mock_requirement_attr = MagicMock()
- mock_requirement_attr.Section = "1. IAM"
- mock_requirement_attr.SubSection = "1.1 Identity"
- mock_requirement_attr.Title = "Test Requirement Title"
- mock_requirement_attr.LevelOfRisk = 3
- mock_requirement_attr.Weight = 100
- mock_requirement_attr.AttributeDescription = "Test requirement description"
- mock_requirement_attr.AdditionalInformation = "Additional test information"
-
- mock_requirement.Attributes = [mock_requirement_attr]
- mock_compliance_obj.Requirements = [mock_requirement]
-
- mock_compliance_get_bulk.return_value = {
- self.compliance_id: mock_compliance_obj
- }
-
- # Mock the aggregated statistics from database
- mock_aggregate_statistics.return_value = {"check_1": {"passed": 5, "total": 10}}
-
- # Mock the calculated requirements data with properly configured attributes
- mock_attributes_by_id = {
- "req_1": {
- "attributes": {
- "req_attributes": [mock_requirement_attr],
- "checks": ["check_1"],
- },
- "description": "Test requirement",
- }
- }
- mock_requirements_list = [
- {
- "id": "req_1",
- "attributes": {
- "framework": "ProwlerThreatScore",
- "version": "1.0",
- "status": StatusChoices.FAIL,
- "description": "Test requirement",
- "passed_findings": 5,
- "total_findings": 10,
- },
- }
- ]
- mock_calculate_requirements.return_value = (
- mock_attributes_by_id,
- mock_requirements_list,
- )
-
- # Mock the on-demand loaded findings
- mock_finding_output = MagicMock()
- mock_finding_output.check_id = "check_1"
- mock_finding_output.status = "FAIL"
- mock_finding_output.metadata = MagicMock()
- mock_finding_output.metadata.CheckTitle = "Test Check"
- mock_finding_output.metadata.Severity = "HIGH"
- mock_finding_output.resource_name = "test-resource"
- mock_finding_output.region = "us-east-1"
-
- mock_load_findings.return_value = {"check_1": [mock_finding_output]}
-
- # Mock PDF generation components
- mock_doc = MagicMock()
- mock_doc_template.return_value = mock_doc
-
- mock_fig, mock_ax = MagicMock(), MagicMock()
- mock_subplots.return_value = (mock_fig, mock_ax)
- mock_buffer = MagicMock()
- mock_bytesio.return_value = mock_buffer
-
- mock_image.return_value = MagicMock()
- mock_spacer.return_value = MagicMock()
- mock_paragraph.return_value = MagicMock()
- mock_page_break.return_value = MagicMock()
- mock_table.return_value = MagicMock()
- mock_table_style.return_value = MagicMock()
-
- # Execute the function
- generate_threatscore_report(
- tenant_id=self.tenant_id,
- scan_id=self.scan_id,
- compliance_id=self.compliance_id,
- output_path=self.output_path,
- provider_id=self.provider_id,
- only_failed=True,
- min_risk_level=4,
- )
-
- # Verify the new workflow was followed
- mock_provider_get.assert_called_once_with(id=self.provider_id)
- mock_initialize_provider.assert_called_once_with(mock_provider)
- mock_compliance_get_bulk.assert_called_once_with("aws")
-
- # Verify the new functions were called in correct order with correct parameters
- mock_aggregate_statistics.assert_called_once_with(self.tenant_id, self.scan_id)
- mock_calculate_requirements.assert_called_once_with(
- mock_compliance_obj, {"check_1": {"passed": 5, "total": 10}}
- )
- mock_load_findings.assert_called_once_with(
- self.tenant_id, self.scan_id, ["check_1"], prowler_provider, None
- )
-
- # Verify PDF was built
- mock_doc_template.assert_called_once()
- mock_doc.build.assert_called_once()
-
- @patch("tasks.jobs.report.initialize_prowler_provider")
- @patch("tasks.jobs.report.Provider.objects.get")
- @patch("tasks.jobs.report.Compliance.get_bulk")
- @patch("tasks.jobs.threatscore_utils.Finding.all_objects.filter")
- def test_generate_threatscore_report_exception_handling(
- self,
- mock_finding_filter,
- mock_compliance_get_bulk,
- mock_provider_get,
- mock_initialize_provider,
- ):
- mock_provider_get.side_effect = Exception("Provider not found")
-
- with pytest.raises(Exception, match="Provider not found"):
- generate_threatscore_report(
- tenant_id=self.tenant_id,
- scan_id=self.scan_id,
- compliance_id=self.compliance_id,
- output_path=self.output_path,
- provider_id=self.provider_id,
- only_failed=True,
- min_risk_level=4,
- )
-
-
-@pytest.mark.django_db
-class TestColorHelperFunctions:
- """Test suite for color selection helper functions."""
-
- def test_get_color_for_risk_level_high(self):
- """High risk level (>=4) returns red color."""
- assert _get_color_for_risk_level(4) == COLOR_HIGH_RISK
- assert _get_color_for_risk_level(5) == COLOR_HIGH_RISK
-
- def test_get_color_for_risk_level_medium_high(self):
- """Medium-high risk level (3) returns orange color."""
- assert _get_color_for_risk_level(3) == COLOR_MEDIUM_RISK
-
- def test_get_color_for_risk_level_medium(self):
- """Medium risk level (2) returns yellow color."""
- assert _get_color_for_risk_level(2) == COLOR_LOW_RISK
-
- def test_get_color_for_risk_level_low(self):
- """Low risk level (<2) returns green color."""
- assert _get_color_for_risk_level(0) == COLOR_SAFE
- assert _get_color_for_risk_level(1) == COLOR_SAFE
-
- def test_get_color_for_weight_high(self):
- """High weight (>100) returns red color."""
- assert _get_color_for_weight(101) == COLOR_HIGH_RISK
- assert _get_color_for_weight(200) == COLOR_HIGH_RISK
-
- def test_get_color_for_weight_medium(self):
- """Medium weight (51-100) returns yellow color."""
- assert _get_color_for_weight(51) == COLOR_LOW_RISK
- assert _get_color_for_weight(100) == COLOR_LOW_RISK
-
- def test_get_color_for_weight_low(self):
- """Low weight (<=50) returns green color."""
- assert _get_color_for_weight(0) == COLOR_SAFE
- assert _get_color_for_weight(50) == COLOR_SAFE
-
- def test_get_color_for_compliance_high(self):
- """High compliance (>=80%) returns green color."""
- assert _get_color_for_compliance(80.0) == COLOR_SAFE
- assert _get_color_for_compliance(100.0) == COLOR_SAFE
-
- def test_get_color_for_compliance_medium(self):
- """Medium compliance (60-79%) returns yellow color."""
- assert _get_color_for_compliance(60.0) == COLOR_LOW_RISK
- assert _get_color_for_compliance(79.9) == COLOR_LOW_RISK
-
- def test_get_color_for_compliance_low(self):
- """Low compliance (<60%) returns red color."""
- assert _get_color_for_compliance(0.0) == COLOR_HIGH_RISK
- assert _get_color_for_compliance(59.9) == COLOR_HIGH_RISK
-
- def test_get_chart_color_for_percentage_excellent(self):
- """Excellent percentage (>=80%) returns green."""
- assert _get_chart_color_for_percentage(80.0) == CHART_COLOR_GREEN_1
- assert _get_chart_color_for_percentage(100.0) == CHART_COLOR_GREEN_1
-
- def test_get_chart_color_for_percentage_good(self):
- """Good percentage (60-79%) returns light green."""
- assert _get_chart_color_for_percentage(60.0) == CHART_COLOR_GREEN_2
- assert _get_chart_color_for_percentage(79.9) == CHART_COLOR_GREEN_2
-
- def test_get_chart_color_for_percentage_fair(self):
- """Fair percentage (40-59%) returns yellow."""
- assert _get_chart_color_for_percentage(40.0) == CHART_COLOR_YELLOW
- assert _get_chart_color_for_percentage(59.9) == CHART_COLOR_YELLOW
-
- def test_get_chart_color_for_percentage_poor(self):
- """Poor percentage (20-39%) returns orange."""
- assert _get_chart_color_for_percentage(20.0) == CHART_COLOR_ORANGE
- assert _get_chart_color_for_percentage(39.9) == CHART_COLOR_ORANGE
-
- def test_get_chart_color_for_percentage_critical(self):
- """Critical percentage (<20%) returns red."""
- assert _get_chart_color_for_percentage(0.0) == CHART_COLOR_RED
- assert _get_chart_color_for_percentage(19.9) == CHART_COLOR_RED
-
- def test_get_ens_nivel_color_alto(self):
- """Alto nivel returns red color."""
- assert _get_ens_nivel_color("alto") == COLOR_ENS_ALTO
- assert _get_ens_nivel_color("ALTO") == COLOR_ENS_ALTO
-
- def test_get_ens_nivel_color_medio(self):
- """Medio nivel returns yellow/orange color."""
- assert _get_ens_nivel_color("medio") == COLOR_ENS_MEDIO
- assert _get_ens_nivel_color("MEDIO") == COLOR_ENS_MEDIO
-
- def test_get_ens_nivel_color_bajo(self):
- """Bajo nivel returns green color."""
- assert _get_ens_nivel_color("bajo") == COLOR_ENS_BAJO
- assert _get_ens_nivel_color("BAJO") == COLOR_ENS_BAJO
-
- def test_get_ens_nivel_color_opcional(self):
- """Opcional and unknown nivels return gray color."""
- assert _get_ens_nivel_color("opcional") == COLOR_ENS_OPCIONAL
- assert _get_ens_nivel_color("unknown") == COLOR_ENS_OPCIONAL
-
-
-class TestSafeGetattr:
- """Test suite for _safe_getattr helper function."""
-
- def test_safe_getattr_attribute_exists(self):
- """Returns attribute value when it exists."""
- obj = Mock()
- obj.test_attr = "value"
- assert _safe_getattr(obj, "test_attr") == "value"
-
- def test_safe_getattr_attribute_missing_default(self):
- """Returns default 'N/A' when attribute doesn't exist."""
- obj = Mock(spec=[])
- result = _safe_getattr(obj, "missing_attr")
- assert result == "N/A"
-
- def test_safe_getattr_custom_default(self):
- """Returns custom default when specified."""
- obj = Mock(spec=[])
- result = _safe_getattr(obj, "missing_attr", "custom")
- assert result == "custom"
-
- def test_safe_getattr_none_value(self):
- """Returns None if attribute value is None."""
- obj = Mock()
- obj.test_attr = None
- assert _safe_getattr(obj, "test_attr") is None
-
-
-class TestPDFStylesCreation:
- """Test suite for PDF styles creation and caching."""
-
- def test_create_pdf_styles_returns_dict(self):
- """Returns a dictionary with all required styles."""
- styles = _create_pdf_styles()
-
- assert isinstance(styles, dict)
- assert "title" in styles
- assert "h1" in styles
- assert "h2" in styles
- assert "h3" in styles
- assert "normal" in styles
- assert "normal_center" in styles
-
- def test_create_pdf_styles_caches_result(self):
- """Subsequent calls return cached styles."""
- styles1 = _create_pdf_styles()
- styles2 = _create_pdf_styles()
-
- # Should return the exact same object (not just equal)
- assert styles1 is styles2
-
- def test_pdf_styles_have_correct_fonts(self):
- """Styles use the correct fonts."""
- styles = _create_pdf_styles()
-
- assert styles["title"].fontName == "PlusJakartaSans"
- assert styles["h1"].fontName == "PlusJakartaSans"
- assert styles["normal"].fontName == "PlusJakartaSans"
-
-
-class TestTableStyleFactories:
- """Test suite for table style factory functions."""
-
- def test_create_info_table_style_returns_table_style(self):
- """Returns a TableStyle object."""
- style = _create_info_table_style()
- assert isinstance(style, TableStyle)
-
- def test_create_header_table_style_default_color(self):
- """Uses default blue color when not specified."""
- style = _create_header_table_style()
- assert isinstance(style, TableStyle)
- # Verify it has styling commands
- assert len(style.getCommands()) > 0
-
- def test_create_header_table_style_custom_color(self):
- """Uses custom color when specified."""
- custom_color = colors.red
- style = _create_header_table_style(custom_color)
- assert isinstance(style, TableStyle)
-
- def test_create_findings_table_style(self):
- """Returns appropriate style for findings tables."""
- style = _create_findings_table_style()
- assert isinstance(style, TableStyle)
- assert len(style.getCommands()) > 0
-
-
-class TestRiskComponent:
- """Test suite for _create_risk_component function."""
-
- def test_create_risk_component_returns_table(self):
- """Returns a Table object."""
- table = _create_risk_component(risk_level=3, weight=100, score=50)
- assert isinstance(table, Table)
-
- def test_create_risk_component_high_risk(self):
- """High risk level uses red color."""
- table = _create_risk_component(risk_level=4, weight=50, score=0)
- assert isinstance(table, Table)
- # Table is created successfully
-
- def test_create_risk_component_low_risk(self):
- """Low risk level uses green color."""
- table = _create_risk_component(risk_level=1, weight=30, score=100)
- assert isinstance(table, Table)
-
- def test_create_risk_component_default_score(self):
- """Uses default score of 0 when not specified."""
- table = _create_risk_component(risk_level=2, weight=50)
- assert isinstance(table, Table)
-
-
-class TestStatusComponent:
- """Test suite for _create_status_component function."""
-
- def test_create_status_component_pass(self):
- """PASS status uses green color."""
- table = _create_status_component("pass")
- assert isinstance(table, Table)
-
- def test_create_status_component_fail(self):
- """FAIL status uses red color."""
- table = _create_status_component("fail")
- assert isinstance(table, Table)
-
- def test_create_status_component_manual(self):
- """MANUAL status uses gray color."""
- table = _create_status_component("manual")
- assert isinstance(table, Table)
-
- def test_create_status_component_uppercase(self):
- """Handles uppercase status strings."""
- table = _create_status_component("PASS")
- assert isinstance(table, Table)
-
-
-class TestENSBadges:
- """Test suite for ENS-specific badge creation functions."""
-
- def test_create_ens_nivel_badge_alto(self):
- """Creates badge for alto nivel."""
- table = _create_ens_nivel_badge("alto")
- assert isinstance(table, Table)
-
- def test_create_ens_nivel_badge_medio(self):
- """Creates badge for medio nivel."""
- table = _create_ens_nivel_badge("medio")
- assert isinstance(table, Table)
-
- def test_create_ens_nivel_badge_bajo(self):
- """Creates badge for bajo nivel."""
- table = _create_ens_nivel_badge("bajo")
- assert isinstance(table, Table)
-
- def test_create_ens_nivel_badge_opcional(self):
- """Creates badge for opcional nivel."""
- table = _create_ens_nivel_badge("opcional")
- assert isinstance(table, Table)
-
- def test_create_ens_tipo_badge_requisito(self):
- """Creates badge for requisito type."""
- table = _create_ens_tipo_badge("requisito")
- assert isinstance(table, Table)
-
- def test_create_ens_tipo_badge_unknown(self):
- """Handles unknown tipo gracefully."""
- table = _create_ens_tipo_badge("unknown")
- assert isinstance(table, Table)
-
- def test_create_ens_dimension_badges_single(self):
- """Creates badges for single dimension."""
- table = _create_ens_dimension_badges(["trazabilidad"])
- assert isinstance(table, Table)
-
- def test_create_ens_dimension_badges_multiple(self):
- """Creates badges for multiple dimensions."""
- dimensiones = ["trazabilidad", "autenticidad", "integridad"]
- table = _create_ens_dimension_badges(dimensiones)
- assert isinstance(table, Table)
-
- def test_create_ens_dimension_badges_empty(self):
- """Returns N/A table for empty dimensions list."""
- table = _create_ens_dimension_badges([])
- assert isinstance(table, Table)
-
- def test_create_ens_dimension_badges_invalid(self):
- """Filters out invalid dimensions."""
- table = _create_ens_dimension_badges(["invalid", "trazabilidad"])
- assert isinstance(table, Table)
-
-
-class TestChartCreation:
- """Test suite for chart generation functions."""
-
- @patch("tasks.jobs.report.plt.close")
- @patch("tasks.jobs.report.plt.savefig")
- @patch("tasks.jobs.report.plt.subplots")
- def test_create_section_score_chart_with_data(
- self, mock_subplots, mock_savefig, mock_close
- ):
- """Creates chart successfully with valid data."""
- mock_fig, mock_ax = MagicMock(), MagicMock()
- mock_subplots.return_value = (mock_fig, mock_ax)
- mock_ax.bar.return_value = [MagicMock(), MagicMock()]
-
- requirements_list = [
- {
- "id": "req_1",
- "attributes": {
- "passed_findings": 10,
- "total_findings": 10,
- },
- }
- ]
-
- mock_metadata = MagicMock()
- mock_metadata.Section = "1. IAM"
- mock_metadata.LevelOfRisk = 3
- mock_metadata.Weight = 100
-
- attributes_by_id = {
- "req_1": {
- "attributes": {
- "req_attributes": [mock_metadata],
- }
- }
- }
-
- result = _create_section_score_chart(requirements_list, attributes_by_id)
-
- assert isinstance(result, io.BytesIO)
- mock_subplots.assert_called_once()
- mock_close.assert_called_once_with(mock_fig)
-
- @patch("tasks.jobs.report.plt.close")
- @patch("tasks.jobs.report.plt.savefig")
- @patch("tasks.jobs.report.plt.subplots")
- def test_create_marco_category_chart_with_data(
- self, mock_subplots, mock_savefig, mock_close
- ):
- """Creates marco/category chart successfully."""
- mock_fig, mock_ax = MagicMock(), MagicMock()
- mock_subplots.return_value = (mock_fig, mock_ax)
- mock_ax.barh.return_value = [MagicMock()]
-
- requirements_list = [
- {
- "id": "req_1",
- "attributes": {
- "status": StatusChoices.PASS,
- },
- }
- ]
-
- mock_metadata = MagicMock()
- mock_metadata.Marco = "Marco1"
- mock_metadata.Categoria = "Cat1"
-
- attributes_by_id = {
- "req_1": {
- "attributes": {
- "req_attributes": [mock_metadata],
- }
- }
- }
-
- result = _create_marco_category_chart(requirements_list, attributes_by_id)
-
- assert isinstance(result, io.BytesIO)
- mock_close.assert_called_once_with(mock_fig)
-
- @patch("tasks.jobs.report.plt.close")
- @patch("tasks.jobs.report.plt.savefig")
- @patch("tasks.jobs.report.plt.subplots")
- def test_create_dimensions_radar_chart(
- self, mock_subplots, mock_savefig, mock_close
- ):
- """Creates radar chart for dimensions."""
- mock_fig, mock_ax = MagicMock(), MagicMock()
- mock_ax.plot = MagicMock()
- mock_ax.fill = MagicMock()
- mock_subplots.return_value = (mock_fig, mock_ax)
-
- requirements_list = [
- {
- "id": "req_1",
- "attributes": {
- "status": StatusChoices.PASS,
- },
- }
- ]
-
- mock_metadata = MagicMock()
- mock_metadata.Dimensiones = ["trazabilidad", "integridad"]
-
- attributes_by_id = {
- "req_1": {
- "attributes": {
- "req_attributes": [mock_metadata],
- }
- }
- }
-
- result = _create_dimensions_radar_chart(requirements_list, attributes_by_id)
-
- assert isinstance(result, io.BytesIO)
- mock_close.assert_called_once_with(mock_fig)
-
- @patch("tasks.jobs.report.plt.close")
- @patch("tasks.jobs.report.plt.savefig")
- @patch("tasks.jobs.report.plt.subplots")
- def test_create_chart_closes_figure_on_error(
- self, mock_subplots, mock_savefig, mock_close
- ):
- """Ensures figure is closed even if savefig fails."""
- mock_fig, mock_ax = MagicMock(), MagicMock()
- mock_subplots.return_value = (mock_fig, mock_ax)
- mock_savefig.side_effect = Exception("Save failed")
-
- requirements_list = []
- attributes_by_id = {}
-
- with pytest.raises(Exception):
- _create_section_score_chart(requirements_list, attributes_by_id)
-
- # Verify figure was still closed
- mock_close.assert_called_with(mock_fig)
-
-
-@pytest.mark.django_db
-class TestOptimizationImprovements:
- """Test suite to verify optimization improvements work correctly."""
-
- def test_constants_are_color_objects(self):
- """Verify color constants are properly instantiated Color objects."""
- assert isinstance(COLOR_BLUE, colors.Color)
- assert isinstance(COLOR_HIGH_RISK, colors.Color)
- assert isinstance(COLOR_SAFE, colors.Color)
-
- def test_chart_color_constants_are_strings(self):
- """Verify chart color constants are hex strings."""
- assert isinstance(CHART_COLOR_GREEN_1, str)
- assert CHART_COLOR_GREEN_1.startswith("#")
- assert len(CHART_COLOR_GREEN_1) == 7
-
- def test_style_cache_persists_across_calls(self):
- """Verify style caching reduces object creation."""
- # Clear any existing cache by calling directly
- styles1 = _create_pdf_styles()
- styles2 = _create_pdf_styles()
-
- # Should be the exact same cached object
- assert id(styles1) == id(styles2)
-
- def test_helper_functions_return_consistent_results(self):
- """Verify helper functions return consistent results."""
- # Same input should always return same output
- assert _get_color_for_risk_level(3) == _get_color_for_risk_level(3)
- assert _get_color_for_weight(100) == _get_color_for_weight(100)
- assert _get_chart_color_for_percentage(75.0) == _get_chart_color_for_percentage(
- 75.0
- )
-
-
-@pytest.mark.django_db
-class TestGenerateComplianceReportsOptimized:
- """Test suite for the optimized generate_compliance_reports_job function."""
-
- def setup_method(self):
- self.scan_id = str(uuid.uuid4())
- self.provider_id = str(uuid.uuid4())
- self.tenant_id = str(uuid.uuid4())
-
- def test_no_findings_returns_early_for_both_reports(self):
- """Test that function returns early when no findings exist."""
- with patch("tasks.jobs.report.ScanSummary.objects.filter") as mock_filter:
- mock_filter.return_value.exists.return_value = False
-
- result = generate_compliance_reports_job(
- tenant_id=self.tenant_id,
- scan_id=self.scan_id,
- provider_id=self.provider_id,
- )
-
- assert result["threatscore"] == {"upload": False, "path": ""}
- assert result["ens"] == {"upload": False, "path": ""}
- mock_filter.assert_called_once_with(scan_id=self.scan_id)
-
- @patch("tasks.jobs.report.rmtree")
- @patch("tasks.jobs.report._upload_to_s3")
- @patch("tasks.jobs.report.generate_nis2_report")
- @patch("tasks.jobs.report.generate_ens_report")
- @patch("tasks.jobs.report.generate_threatscore_report")
- @patch("tasks.jobs.report._generate_compliance_output_directory")
- @patch("tasks.jobs.report._aggregate_requirement_statistics_from_database")
- @patch("tasks.jobs.report.Provider")
- @patch("tasks.jobs.report.ScanSummary")
- def test_generates_reports_with_shared_queries(
- self,
- mock_scan_summary,
- mock_provider,
- mock_aggregate_stats,
- mock_gen_dir,
- mock_gen_threatscore,
- mock_gen_ens,
- mock_gen_nis2,
- mock_upload,
- mock_rmtree,
- ):
- """Test that requested reports are generated with shared database queries."""
- # Setup mocks
- mock_scan_summary.objects.filter.return_value.exists.return_value = True
- mock_provider_obj = Mock()
- mock_provider_obj.uid = "test-uid"
- mock_provider_obj.provider = "aws"
- mock_provider.objects.get.return_value = mock_provider_obj
-
- mock_aggregate_stats.return_value = {"check-1": {"passed": 10, "total": 15}}
- # Mock returns different paths for different compliance_framework calls
- mock_gen_dir.side_effect = [
- "/tmp/reports/threatscore/output", # First call with compliance_framework="threatscore"
- "/tmp/reports/ens/output", # Second call with compliance_framework="ens"
- "/tmp/reports/nis2/output", # Third call with compliance_framework="nis2"
- ]
- mock_upload.side_effect = [
- "s3://bucket/threatscore.pdf",
- "s3://bucket/ens.pdf",
- "s3://bucket/nis2.pdf",
- ]
-
- result = generate_compliance_reports_job(
- tenant_id=self.tenant_id,
- scan_id=self.scan_id,
- provider_id=self.provider_id,
- generate_threatscore=True,
- generate_ens=True,
- )
-
- # Verify Provider fetched only ONCE (optimization)
- mock_provider.objects.get.assert_called_once_with(id=self.provider_id)
-
- # Verify aggregation called only ONCE (optimization)
- mock_aggregate_stats.assert_called_once_with(self.tenant_id, self.scan_id)
-
- # Verify both report generation functions were called with shared data
- assert mock_gen_threatscore.call_count == 1
- assert mock_gen_ens.call_count == 1
- assert mock_gen_nis2.call_count == 1
-
- # Verify provider_obj and requirement_statistics were passed to both
- threatscore_call_kwargs = mock_gen_threatscore.call_args[1]
- assert threatscore_call_kwargs["provider_obj"] == mock_provider_obj
- assert threatscore_call_kwargs["requirement_statistics"] == {
- "check-1": {"passed": 10, "total": 15}
- }
-
- ens_call_kwargs = mock_gen_ens.call_args[1]
- assert ens_call_kwargs["provider_obj"] == mock_provider_obj
- assert ens_call_kwargs["requirement_statistics"] == {
- "check-1": {"passed": 10, "total": 15}
- }
-
- nis2_call_kwargs = mock_gen_nis2.call_args[1]
- assert nis2_call_kwargs["provider_obj"] == mock_provider_obj
- assert nis2_call_kwargs["requirement_statistics"] == {
- "check-1": {"passed": 10, "total": 15}
- }
-
- # Verify both reports were uploaded successfully
- assert result["threatscore"]["upload"] is True
- assert result["threatscore"]["path"] == "s3://bucket/threatscore.pdf"
- assert result["ens"]["upload"] is True
- assert result["ens"]["path"] == "s3://bucket/ens.pdf"
- assert result["nis2"]["upload"] is True
- assert result["nis2"]["path"] == "s3://bucket/nis2.pdf"
-
- # Cleanup should remove the temporary parent directory when everything uploads
- mock_rmtree.assert_called_once()
- cleanup_path_arg = mock_rmtree.call_args[0][0]
- assert str(cleanup_path_arg) == "/tmp/reports"
-
- @patch("tasks.jobs.report._aggregate_requirement_statistics_from_database")
- @patch("tasks.jobs.report.Provider")
- @patch("tasks.jobs.report.ScanSummary")
- def test_skips_ens_for_unsupported_provider(
- self, mock_scan_summary, mock_provider, mock_aggregate_stats
- ):
- """Test that ENS report is skipped for M365 provider."""
- mock_scan_summary.objects.filter.return_value.exists.return_value = True
- mock_provider_obj = Mock()
- mock_provider_obj.uid = "test-uid"
- mock_provider_obj.provider = "m365" # Not supported for ENS
- mock_provider.objects.get.return_value = mock_provider_obj
-
- result = generate_compliance_reports_job(
- tenant_id=self.tenant_id,
- scan_id=self.scan_id,
- provider_id=self.provider_id,
- )
-
- # ENS should be skipped, only ThreatScore key should have error/status
- assert "ens" in result
- assert result["ens"]["upload"] is False
-
- def test_findings_cache_reuses_loaded_findings(self):
- """Test that findings cache properly reuses findings across calls."""
- # Create mock findings
- mock_finding1 = Mock()
- mock_finding1.check_id = "check-1"
- mock_finding2 = Mock()
- mock_finding2.check_id = "check-2"
- mock_finding3 = Mock()
- mock_finding3.check_id = "check-1"
-
- mock_output1 = Mock()
- mock_output1.check_id = "check-1"
- mock_output2 = Mock()
- mock_output2.check_id = "check-2"
- mock_output3 = Mock()
- mock_output3.check_id = "check-1"
-
- # Pre-populate cache
- findings_cache = {
- "check-1": [mock_output1, mock_output3],
- }
-
- with (
- patch("tasks.jobs.threatscore_utils.Finding") as mock_finding_class,
- patch("tasks.jobs.threatscore_utils.FindingOutput") as mock_finding_output,
- patch("tasks.jobs.threatscore_utils.rls_transaction"),
- patch("tasks.jobs.threatscore_utils.batched") as mock_batched,
- ):
- # Setup mocks
- mock_finding_class.all_objects.filter.return_value.order_by.return_value.iterator.return_value = [
- mock_finding2
- ]
- mock_batched.return_value = [([mock_finding2], True)]
- mock_finding_output.transform_api_finding.return_value = mock_output2
-
- mock_provider = Mock()
-
- # Call with cache containing check-1, requesting check-1 and check-2
- result = _load_findings_for_requirement_checks(
- tenant_id=self.tenant_id,
- scan_id=self.scan_id,
- check_ids=["check-1", "check-2"],
- prowler_provider=mock_provider,
- findings_cache=findings_cache,
- )
-
- # Verify check-1 was reused from cache (no DB query)
- assert len(result["check-1"]) == 2
- assert result["check-1"] == [mock_output1, mock_output3]
-
- # Verify check-2 was loaded from DB
- assert len(result["check-2"]) == 1
- assert result["check-2"][0] == mock_output2
-
- # Verify cache was updated with check-2
- assert "check-2" in findings_cache
- assert findings_cache["check-2"] == [mock_output2]
-
- # Verify DB was only queried for check-2 (not check-1)
- filter_call = mock_finding_class.all_objects.filter.call_args
- assert filter_call[1]["check_id__in"] == ["check-2"]
-
-
-class TestNIS2SectionChart:
- """Test suite for _create_nis2_section_chart function."""
-
- @pytest.fixture(autouse=True)
- def setup_matplotlib(self):
- """Setup matplotlib backend for tests."""
- matplotlib.use("Agg")
-
- def test_creates_chart_with_sections(self):
- """Verify chart is created with correct sections and compliance data."""
- # Mock requirement with NIS2 section attribute
- mock_attr = Mock()
- mock_attr.Section = (
- "1 POLICY ON THE SECURITY OF NETWORK AND INFORMATION SYSTEMS"
- )
-
- requirements_list = [
- {
- "id": "1.1.1.a",
- "description": "Test requirement",
- "attributes": {
- "passed_findings": 5,
- "total_findings": 10,
- "status": StatusChoices.FAIL,
- },
- }
- ]
-
- attributes_by_requirement_id = {
- "1.1.1.a": {
- "attributes": {
- "req_attributes": [mock_attr],
- }
- }
- }
-
- # Call function
- result = _create_nis2_section_chart(
- requirements_list, attributes_by_requirement_id
- )
-
- # Verify result is a BytesIO buffer
- assert isinstance(result, io.BytesIO)
- assert result.tell() > 0 # Buffer has content
-
- def test_handles_empty_requirements(self):
- """Verify chart handles empty requirements gracefully."""
- result = _create_nis2_section_chart([], {})
-
- # Verify result is still a valid BytesIO buffer
- assert isinstance(result, io.BytesIO)
-
- def test_calculates_compliance_percentage_correctly(self):
- """Verify compliance percentage calculation is correct."""
- mock_attr1 = Mock()
- mock_attr1.Section = "11 ACCESS CONTROL"
-
- mock_attr2 = Mock()
- mock_attr2.Section = "11 ACCESS CONTROL"
-
- requirements_list = [
- {
- "id": "11.1.1",
- "description": "Test 1",
- "attributes": {
- "passed_findings": 8,
- "total_findings": 10, # 80%
- "status": StatusChoices.PASS,
- },
- },
- {
- "id": "11.1.2",
- "description": "Test 2",
- "attributes": {
- "passed_findings": 10,
- "total_findings": 10, # 100%
- "status": StatusChoices.PASS,
- },
- },
- ]
-
- attributes_by_requirement_id = {
- "11.1.1": {"attributes": {"req_attributes": [mock_attr1]}},
- "11.1.2": {"attributes": {"req_attributes": [mock_attr2]}},
- }
-
- # Call function
- result = _create_nis2_section_chart(
- requirements_list, attributes_by_requirement_id
- )
-
- # Expected: (8+10)/(10+10) = 18/20 = 90%
- assert isinstance(result, io.BytesIO)
-
-
-class TestNIS2SubsectionTable:
- """Test suite for _create_nis2_subsection_table function."""
-
- def test_creates_table_with_subsections(self):
- """Verify table is created with correct subsection breakdown."""
- mock_attr1 = Mock()
- mock_attr1.SubSection = (
- "1.1 Policy on the security of network and information systems"
- )
-
- mock_attr2 = Mock()
- mock_attr2.SubSection = "1.2 Roles, responsibilities and authorities"
-
- requirements_list = [
- {
- "id": "1.1.1.a",
- "description": "Test 1",
- "attributes": {"status": StatusChoices.PASS},
- },
- {
- "id": "1.1.1.b",
- "description": "Test 2",
- "attributes": {"status": StatusChoices.FAIL},
- },
- {
- "id": "1.2.1",
- "description": "Test 3",
- "attributes": {"status": StatusChoices.MANUAL},
- },
- ]
-
- attributes_by_requirement_id = {
- "1.1.1.a": {"attributes": {"req_attributes": [mock_attr1]}},
- "1.1.1.b": {"attributes": {"req_attributes": [mock_attr1]}},
- "1.2.1": {"attributes": {"req_attributes": [mock_attr2]}},
- }
-
- # Call function
- result = _create_nis2_subsection_table(
- requirements_list, attributes_by_requirement_id
- )
-
- # Verify result is a Table
- assert isinstance(result, Table)
-
- # Verify table has correct structure (header + data rows)
- assert len(result._cellvalues) > 1 # At least header + 1 row
-
- # Verify header row
- assert result._cellvalues[0][0] == "SubSection"
- assert result._cellvalues[0][1] == "Total"
- assert result._cellvalues[0][2] == "Pass"
- assert result._cellvalues[0][3] == "Fail"
- assert result._cellvalues[0][4] == "Manual"
- assert result._cellvalues[0][5] == "Compliance %"
-
- def test_table_has_correct_styling(self):
- """Verify table has NIS2 styling applied."""
- mock_attr = Mock()
- mock_attr.SubSection = "Test SubSection"
-
- requirements_list = [
- {
- "id": "1.1.1.a",
- "description": "Test",
- "attributes": {"status": StatusChoices.PASS},
- }
- ]
-
- attributes_by_requirement_id = {
- "1.1.1.a": {"attributes": {"req_attributes": [mock_attr]}}
- }
-
- result = _create_nis2_subsection_table(
- requirements_list, attributes_by_requirement_id
- )
-
- # Verify styling is applied
- assert isinstance(result._cellStyles, list)
- assert len(result._cellStyles) > 0
-
-
-class TestNIS2RequirementsIndex:
- """Test suite for _create_nis2_requirements_index function."""
-
- def test_creates_hierarchical_index(self):
- """Verify index creates hierarchical structure by Section and SubSection."""
- pdf_styles = _create_pdf_styles()
-
- mock_attr1 = Mock()
- mock_attr1.Section = "1 POLICY ON SECURITY"
- mock_attr1.SubSection = "1.1 Policy definition"
-
- mock_attr2 = Mock()
- mock_attr2.Section = "1 POLICY ON SECURITY"
- mock_attr2.SubSection = "1.2 Roles and responsibilities"
-
- requirements_list = [
- {
- "id": "1.1.1.a",
- "description": "Define security policies",
- "attributes": {"status": StatusChoices.PASS},
- },
- {
- "id": "1.2.1",
- "description": "Assign security roles",
- "attributes": {"status": StatusChoices.FAIL},
- },
- ]
-
- attributes_by_requirement_id = {
- "1.1.1.a": {"attributes": {"req_attributes": [mock_attr1]}},
- "1.2.1": {"attributes": {"req_attributes": [mock_attr2]}},
- }
-
- # Call function
- result = _create_nis2_requirements_index(
- requirements_list,
- attributes_by_requirement_id,
- pdf_styles["h2"],
- pdf_styles["h3"],
- pdf_styles["normal"],
- )
-
- # Verify result is a list of elements
- assert isinstance(result, list)
- assert len(result) > 0
-
- def test_includes_status_indicators(self):
- """Verify index includes status indicators (✓, ✗, ⊙)."""
- pdf_styles = _create_pdf_styles()
-
- mock_attr = Mock()
- mock_attr.Section = "Test Section"
- mock_attr.SubSection = "Test SubSection"
-
- requirements_list = [
- {
- "id": "test.1",
- "description": "Passed requirement",
- "attributes": {"status": StatusChoices.PASS},
- },
- {
- "id": "test.2",
- "description": "Failed requirement",
- "attributes": {"status": StatusChoices.FAIL},
- },
- {
- "id": "test.3",
- "description": "Manual requirement",
- "attributes": {"status": StatusChoices.MANUAL},
- },
- ]
-
- attributes_by_requirement_id = {
- "test.1": {"attributes": {"req_attributes": [mock_attr]}},
- "test.2": {"attributes": {"req_attributes": [mock_attr]}},
- "test.3": {"attributes": {"req_attributes": [mock_attr]}},
- }
-
- result = _create_nis2_requirements_index(
- requirements_list,
- attributes_by_requirement_id,
- pdf_styles["h2"],
- pdf_styles["h3"],
- pdf_styles["normal"],
- )
-
- # Convert paragraphs to text and check for status indicators
- str(result)
- # Status indicators should be present in the generated content
- assert len(result) > 0
-
-
-@pytest.mark.django_db
-class TestGenerateNIS2Report:
- """Test suite for generate_nis2_report function."""
-
- @patch("tasks.jobs.report.initialize_prowler_provider")
- @patch("tasks.jobs.report.Provider.objects.get")
- @patch("tasks.jobs.report.ScanSummary.objects.filter")
- @patch("tasks.jobs.report.Compliance.get_bulk")
- @patch("tasks.jobs.report.SimpleDocTemplate")
- def test_generates_nis2_report_successfully(
- self,
- mock_doc,
- mock_compliance,
- mock_scan_summary,
- mock_provider_get,
- mock_init_provider,
- tenants_fixture,
- scans_fixture,
- ):
- """Verify NIS2 report generation completes successfully."""
- tenant = tenants_fixture[0]
- scan = scans_fixture[0]
-
- # Setup mocks
- mock_provider = Mock()
- mock_provider.provider = "aws"
- mock_provider.uid = "provider-123"
- mock_provider_get.return_value = mock_provider
-
- mock_scan_summary.return_value.exists.return_value = True
-
- # Mock compliance object
- mock_compliance_obj = Mock()
- mock_compliance_obj.Framework = "NIS2"
- mock_compliance_obj.Name = "Network and Information Security Directive"
- mock_compliance_obj.Version = ""
- mock_compliance_obj.Description = "NIS2 Directive"
- mock_compliance_obj.Requirements = []
-
- mock_compliance.return_value = {"nis2_aws": mock_compliance_obj}
-
- mock_init_provider.return_value = MagicMock()
- mock_doc_instance = Mock()
- mock_doc.return_value = mock_doc_instance
-
- expected_output_path = "/tmp/test_nis2.pdf"
-
- # Call function
- with patch("tasks.jobs.report.rls_transaction"):
- with patch(
- "tasks.jobs.report._aggregate_requirement_statistics_from_database"
- ) as mock_aggregate:
- mock_aggregate.return_value = {}
-
- with patch(
- "tasks.jobs.report._calculate_requirements_data_from_statistics"
- ) as mock_calculate:
- mock_calculate.return_value = ({}, [])
-
- # Should not raise exception
- generate_nis2_report(
- tenant_id=str(tenant.id),
- scan_id=str(scan.id),
- compliance_id="nis2_aws",
- output_path=expected_output_path,
- provider_id="provider-123",
- only_failed=True,
- )
-
- # Verify SimpleDocTemplate was initialized with correct output path
- mock_doc.assert_called_once()
- call_args = mock_doc.call_args
- assert call_args[0][0] == expected_output_path, (
- f"Expected SimpleDocTemplate to be called with {expected_output_path}, "
- f"but got {call_args[0][0]}"
- )
-
- # Verify PDF was built
- mock_doc_instance.build.assert_called_once()
-
- # Verify initialize_prowler_provider was called with the provider
- mock_init_provider.assert_called_once_with(mock_provider)
-
- def test_nis2_colors_are_defined(self):
- """Verify NIS2 specific colors are defined."""
- # Check that NIS2 primary color exists
- assert COLOR_NIS2_PRIMARY is not None
- assert isinstance(COLOR_NIS2_PRIMARY, colors.Color)
diff --git a/api/src/backend/tasks/tests/test_reports.py b/api/src/backend/tasks/tests/test_reports.py
new file mode 100644
index 0000000000..530e0af472
--- /dev/null
+++ b/api/src/backend/tasks/tests/test_reports.py
@@ -0,0 +1,410 @@
+import uuid
+from unittest.mock import Mock, patch
+
+import matplotlib
+import pytest
+from reportlab.lib import colors
+from tasks.jobs.report import generate_compliance_reports, generate_threatscore_report
+from tasks.jobs.reports import (
+ CHART_COLOR_GREEN_1,
+ CHART_COLOR_GREEN_2,
+ CHART_COLOR_ORANGE,
+ CHART_COLOR_RED,
+ CHART_COLOR_YELLOW,
+ COLOR_BLUE,
+ COLOR_ENS_ALTO,
+ COLOR_HIGH_RISK,
+ COLOR_LOW_RISK,
+ COLOR_MEDIUM_RISK,
+ COLOR_NIS2_PRIMARY,
+ COLOR_SAFE,
+ create_pdf_styles,
+ get_chart_color_for_percentage,
+ get_color_for_compliance,
+ get_color_for_risk_level,
+ get_color_for_weight,
+)
+from tasks.jobs.threatscore_utils import (
+ _aggregate_requirement_statistics_from_database,
+ _load_findings_for_requirement_checks,
+)
+
+from api.models import Finding, StatusChoices
+from prowler.lib.check.models import Severity
+
+matplotlib.use("Agg") # Use non-interactive backend for tests
+
+
+@pytest.mark.django_db
+class TestAggregateRequirementStatistics:
+ """Test suite for _aggregate_requirement_statistics_from_database function."""
+
+ def test_aggregates_findings_correctly(self, tenants_fixture, scans_fixture):
+ """Verify correct pass/total counts per check are aggregated from database."""
+ tenant = tenants_fixture[0]
+ scan = scans_fixture[0]
+
+ Finding.objects.create(
+ tenant_id=tenant.id,
+ scan=scan,
+ uid="finding-1",
+ check_id="check_1",
+ status=StatusChoices.PASS,
+ severity=Severity.high,
+ impact=Severity.high,
+ check_metadata={},
+ raw_result={},
+ )
+ Finding.objects.create(
+ tenant_id=tenant.id,
+ scan=scan,
+ uid="finding-2",
+ check_id="check_1",
+ status=StatusChoices.FAIL,
+ severity=Severity.high,
+ impact=Severity.high,
+ check_metadata={},
+ raw_result={},
+ )
+ Finding.objects.create(
+ tenant_id=tenant.id,
+ scan=scan,
+ uid="finding-3",
+ check_id="check_2",
+ status=StatusChoices.PASS,
+ severity=Severity.medium,
+ impact=Severity.medium,
+ check_metadata={},
+ raw_result={},
+ )
+
+ result = _aggregate_requirement_statistics_from_database(
+ str(tenant.id), str(scan.id)
+ )
+
+ assert "check_1" in result
+ assert result["check_1"]["passed"] == 1
+ assert result["check_1"]["total"] == 2
+
+ assert "check_2" in result
+ assert result["check_2"]["passed"] == 1
+ assert result["check_2"]["total"] == 1
+
+ def test_handles_empty_scan(self, tenants_fixture, scans_fixture):
+ """Verify empty result is returned for scan with no findings."""
+ tenant = tenants_fixture[0]
+ scan = scans_fixture[0]
+
+ result = _aggregate_requirement_statistics_from_database(
+ str(tenant.id), str(scan.id)
+ )
+
+ assert result == {}
+
+ def test_only_failed_findings(self, tenants_fixture, scans_fixture):
+ """Verify correct counts when all findings are FAIL."""
+ tenant = tenants_fixture[0]
+ scan = scans_fixture[0]
+
+ Finding.objects.create(
+ tenant_id=tenant.id,
+ scan=scan,
+ uid="finding-1",
+ check_id="check_1",
+ status=StatusChoices.FAIL,
+ severity=Severity.high,
+ impact=Severity.high,
+ check_metadata={},
+ raw_result={},
+ )
+ Finding.objects.create(
+ tenant_id=tenant.id,
+ scan=scan,
+ uid="finding-2",
+ check_id="check_1",
+ status=StatusChoices.FAIL,
+ severity=Severity.high,
+ impact=Severity.high,
+ check_metadata={},
+ raw_result={},
+ )
+
+ result = _aggregate_requirement_statistics_from_database(
+ str(tenant.id), str(scan.id)
+ )
+
+ assert result["check_1"]["passed"] == 0
+ assert result["check_1"]["total"] == 2
+
+ def test_multiple_findings_same_check(self, tenants_fixture, scans_fixture):
+ """Verify multiple findings for same check are correctly aggregated."""
+ tenant = tenants_fixture[0]
+ scan = scans_fixture[0]
+
+ for i in range(5):
+ Finding.objects.create(
+ tenant_id=tenant.id,
+ scan=scan,
+ uid=f"finding-{i}",
+ check_id="check_1",
+ status=StatusChoices.PASS if i % 2 == 0 else StatusChoices.FAIL,
+ severity=Severity.high,
+ impact=Severity.high,
+ check_metadata={},
+ raw_result={},
+ )
+
+ result = _aggregate_requirement_statistics_from_database(
+ str(tenant.id), str(scan.id)
+ )
+
+ assert result["check_1"]["passed"] == 3
+ assert result["check_1"]["total"] == 5
+
+ def test_mixed_statuses(self, tenants_fixture, scans_fixture):
+ """Verify MANUAL status is counted in total but not passed."""
+ tenant = tenants_fixture[0]
+ scan = scans_fixture[0]
+
+ Finding.objects.create(
+ tenant_id=tenant.id,
+ scan=scan,
+ uid="finding-1",
+ check_id="check_1",
+ status=StatusChoices.PASS,
+ severity=Severity.high,
+ impact=Severity.high,
+ check_metadata={},
+ raw_result={},
+ )
+ Finding.objects.create(
+ tenant_id=tenant.id,
+ scan=scan,
+ uid="finding-2",
+ check_id="check_1",
+ status=StatusChoices.MANUAL,
+ severity=Severity.high,
+ impact=Severity.high,
+ check_metadata={},
+ raw_result={},
+ )
+
+ result = _aggregate_requirement_statistics_from_database(
+ str(tenant.id), str(scan.id)
+ )
+
+ # MANUAL findings are excluded from the aggregation query
+ # since it only counts PASS and FAIL statuses
+ assert result["check_1"]["passed"] == 1
+ assert result["check_1"]["total"] == 1
+
+
+class TestColorHelperFunctions:
+ """Test suite for color helper functions."""
+
+ def test_get_color_for_risk_level_high(self):
+ """Test high risk level returns correct color."""
+ result = get_color_for_risk_level(5)
+ assert result == COLOR_HIGH_RISK
+
+ def test_get_color_for_risk_level_medium_high(self):
+ """Test risk level 4 returns high risk color."""
+ result = get_color_for_risk_level(4)
+ assert result == COLOR_HIGH_RISK # >= 4 is high risk
+
+ def test_get_color_for_risk_level_medium(self):
+ """Test risk level 3 returns medium risk color."""
+ result = get_color_for_risk_level(3)
+ assert result == COLOR_MEDIUM_RISK # >= 3 is medium risk
+
+ def test_get_color_for_risk_level_low(self):
+ """Test low risk level returns safe color."""
+ result = get_color_for_risk_level(1)
+ assert result == COLOR_SAFE # < 2 is safe
+
+ def test_get_color_for_weight_high(self):
+ """Test high weight returns correct color."""
+ result = get_color_for_weight(150)
+ assert result == COLOR_HIGH_RISK # > 100 is high risk
+
+ def test_get_color_for_weight_medium(self):
+ """Test medium weight returns low risk color."""
+ result = get_color_for_weight(100)
+ assert result == COLOR_LOW_RISK # 51-100 is low risk
+
+ def test_get_color_for_weight_low(self):
+ """Test low weight returns safe color."""
+ result = get_color_for_weight(50)
+ assert result == COLOR_SAFE # <= 50 is safe
+
+ def test_get_color_for_compliance_high(self):
+ """Test high compliance returns green color."""
+ result = get_color_for_compliance(85)
+ assert result == COLOR_SAFE
+
+ def test_get_color_for_compliance_medium(self):
+ """Test medium compliance returns yellow color."""
+ result = get_color_for_compliance(70)
+ assert result == COLOR_LOW_RISK
+
+ def test_get_color_for_compliance_low(self):
+ """Test low compliance returns red color."""
+ result = get_color_for_compliance(50)
+ assert result == COLOR_HIGH_RISK
+
+ def test_get_chart_color_for_percentage_excellent(self):
+ """Test excellent percentage returns correct chart color."""
+ result = get_chart_color_for_percentage(90)
+ assert result == CHART_COLOR_GREEN_1
+
+ def test_get_chart_color_for_percentage_good(self):
+ """Test good percentage returns correct chart color."""
+ result = get_chart_color_for_percentage(70)
+ assert result == CHART_COLOR_GREEN_2
+
+ def test_get_chart_color_for_percentage_fair(self):
+ """Test fair percentage returns correct chart color."""
+ result = get_chart_color_for_percentage(50)
+ assert result == CHART_COLOR_YELLOW
+
+ def test_get_chart_color_for_percentage_poor(self):
+ """Test poor percentage returns correct chart color."""
+ result = get_chart_color_for_percentage(30)
+ assert result == CHART_COLOR_ORANGE
+
+ def test_get_chart_color_for_percentage_critical(self):
+ """Test critical percentage returns correct chart color."""
+ result = get_chart_color_for_percentage(10)
+ assert result == CHART_COLOR_RED
+
+
+class TestPDFStylesCreation:
+ """Test suite for PDF styles creation."""
+
+ def test_create_pdf_styles_returns_dict(self):
+ """Test that create_pdf_styles returns a dictionary."""
+ result = create_pdf_styles()
+ assert isinstance(result, dict)
+
+ def test_create_pdf_styles_caches_result(self):
+ """Test that create_pdf_styles caches the result."""
+ result1 = create_pdf_styles()
+ result2 = create_pdf_styles()
+ assert result1 is result2
+
+ def test_pdf_styles_have_correct_keys(self):
+ """Test that PDF styles dictionary has expected keys."""
+ result = create_pdf_styles()
+ expected_keys = ["title", "h1", "h2", "h3", "normal", "normal_center"]
+ for key in expected_keys:
+ assert key in result
+
+
+@pytest.mark.django_db
+class TestLoadFindingsForChecks:
+ """Test suite for _load_findings_for_requirement_checks function."""
+
+ def test_empty_check_ids_returns_empty(self, tenants_fixture, providers_fixture):
+ """Test that empty check_ids list returns empty dict."""
+ tenant = tenants_fixture[0]
+
+ mock_prowler_provider = Mock()
+ mock_prowler_provider.identity.account = "test-account"
+
+ result = _load_findings_for_requirement_checks(
+ str(tenant.id), str(uuid.uuid4()), [], mock_prowler_provider
+ )
+
+ assert result == {}
+
+
+@pytest.mark.django_db
+class TestGenerateThreatscoreReportFunction:
+ """Test suite for generate_threatscore_report function."""
+
+ @patch("tasks.jobs.reports.base.initialize_prowler_provider")
+ def test_generate_threatscore_report_exception_handling(
+ self,
+ mock_initialize_provider,
+ tenants_fixture,
+ scans_fixture,
+ providers_fixture,
+ ):
+ """Test that exceptions during report generation are properly handled."""
+ tenant = tenants_fixture[0]
+ scan = scans_fixture[0]
+ provider = providers_fixture[0]
+
+ mock_initialize_provider.side_effect = Exception("Test exception")
+
+ with pytest.raises(Exception) as exc_info:
+ generate_threatscore_report(
+ tenant_id=str(tenant.id),
+ scan_id=str(scan.id),
+ compliance_id="prowler_threatscore_aws",
+ output_path="/tmp/test_report.pdf",
+ provider_id=str(provider.id),
+ )
+
+ assert "Test exception" in str(exc_info.value)
+
+
+@pytest.mark.django_db
+class TestGenerateComplianceReportsOptimized:
+ """Test suite for generate_compliance_reports function."""
+
+ @patch("tasks.jobs.report._upload_to_s3")
+ @patch("tasks.jobs.report.generate_threatscore_report")
+ @patch("tasks.jobs.report.generate_ens_report")
+ @patch("tasks.jobs.report.generate_nis2_report")
+ def test_no_findings_returns_early_for_both_reports(
+ self,
+ mock_nis2,
+ mock_ens,
+ mock_threatscore,
+ mock_upload,
+ tenants_fixture,
+ scans_fixture,
+ providers_fixture,
+ ):
+ """Test that function returns early when scan has no findings."""
+ tenant = tenants_fixture[0]
+ scan = scans_fixture[0]
+ provider = providers_fixture[0]
+
+ result = generate_compliance_reports(
+ tenant_id=str(tenant.id),
+ scan_id=str(scan.id),
+ provider_id=str(provider.id),
+ generate_threatscore=True,
+ generate_ens=True,
+ generate_nis2=True,
+ )
+
+ assert result["threatscore"]["upload"] is False
+ assert result["ens"]["upload"] is False
+ assert result["nis2"]["upload"] is False
+
+ mock_threatscore.assert_not_called()
+ mock_ens.assert_not_called()
+ mock_nis2.assert_not_called()
+
+
+class TestOptimizationImprovements:
+ """Test suite for optimization-related functionality."""
+
+ def test_chart_color_constants_are_strings(self):
+ """Verify chart color constants are valid hex color strings."""
+ assert CHART_COLOR_GREEN_1.startswith("#")
+ assert CHART_COLOR_GREEN_2.startswith("#")
+ assert CHART_COLOR_YELLOW.startswith("#")
+ assert CHART_COLOR_ORANGE.startswith("#")
+ assert CHART_COLOR_RED.startswith("#")
+
+ def test_color_constants_are_color_objects(self):
+ """Verify color constants are Color objects."""
+ assert isinstance(COLOR_BLUE, colors.Color)
+ assert isinstance(COLOR_HIGH_RISK, colors.Color)
+ assert isinstance(COLOR_SAFE, colors.Color)
+ assert isinstance(COLOR_ENS_ALTO, colors.Color)
+ assert isinstance(COLOR_NIS2_PRIMARY, colors.Color)
diff --git a/api/src/backend/tasks/tests/test_reports_base.py b/api/src/backend/tasks/tests/test_reports_base.py
new file mode 100644
index 0000000000..d2fda4f830
--- /dev/null
+++ b/api/src/backend/tasks/tests/test_reports_base.py
@@ -0,0 +1,1346 @@
+import io
+
+import pytest
+from reportlab.lib.units import inch
+from reportlab.platypus import Image, LongTable, Paragraph, Spacer, Table
+from tasks.jobs.reports import ( # Configuration; Colors; Components; Charts; Base
+ CHART_COLOR_GREEN_1,
+ CHART_COLOR_GREEN_2,
+ CHART_COLOR_ORANGE,
+ CHART_COLOR_RED,
+ CHART_COLOR_YELLOW,
+ COLOR_BLUE,
+ COLOR_DARK_GRAY,
+ COLOR_HIGH_RISK,
+ COLOR_LOW_RISK,
+ COLOR_MEDIUM_RISK,
+ COLOR_SAFE,
+ FRAMEWORK_REGISTRY,
+ BaseComplianceReportGenerator,
+ ColumnConfig,
+ ComplianceData,
+ FrameworkConfig,
+ RequirementData,
+ create_badge,
+ create_data_table,
+ create_findings_table,
+ create_horizontal_bar_chart,
+ create_info_table,
+ create_multi_badge_row,
+ create_pdf_styles,
+ create_pie_chart,
+ create_radar_chart,
+ create_risk_component,
+ create_section_header,
+ create_stacked_bar_chart,
+ create_status_badge,
+ create_summary_table,
+ create_vertical_bar_chart,
+ get_chart_color_for_percentage,
+ get_color_for_compliance,
+ get_color_for_risk_level,
+ get_color_for_weight,
+ get_framework_config,
+ get_status_color,
+)
+
+# =============================================================================
+# Configuration Tests
+# =============================================================================
+
+
+class TestFrameworkConfig:
+ """Tests for FrameworkConfig dataclass."""
+
+ def test_framework_config_creation(self):
+ """Test creating a FrameworkConfig with required fields."""
+ config = FrameworkConfig(
+ name="test_framework",
+ display_name="Test Framework",
+ )
+
+ assert config.name == "test_framework"
+ assert config.display_name == "Test Framework"
+ assert config.logo_filename is None
+ assert config.language == "en"
+ assert config.has_risk_levels is False
+
+ def test_framework_config_with_all_fields(self):
+ """Test creating a FrameworkConfig with all fields."""
+ config = FrameworkConfig(
+ name="custom",
+ display_name="Custom Framework",
+ logo_filename="custom_logo.png",
+ primary_color=COLOR_BLUE,
+ secondary_color=COLOR_SAFE,
+ attribute_fields=["Section", "SubSection"],
+ sections=["1. Security", "2. Compliance"],
+ language="es",
+ has_risk_levels=True,
+ has_dimensions=True,
+ has_niveles=True,
+ has_weight=True,
+ )
+
+ assert config.name == "custom"
+ assert config.logo_filename == "custom_logo.png"
+ assert config.language == "es"
+ assert config.has_risk_levels is True
+ assert config.has_dimensions is True
+ assert len(config.attribute_fields) == 2
+ assert len(config.sections) == 2
+
+
+class TestFrameworkRegistry:
+ """Tests for the framework registry."""
+
+ def test_registry_contains_threatscore(self):
+ """Test that ThreatScore is in the registry."""
+ assert "prowler_threatscore" in FRAMEWORK_REGISTRY
+ config = FRAMEWORK_REGISTRY["prowler_threatscore"]
+ assert config.has_risk_levels is True
+ assert config.has_weight is True
+
+ def test_registry_contains_ens(self):
+ """Test that ENS is in the registry."""
+ assert "ens" in FRAMEWORK_REGISTRY
+ config = FRAMEWORK_REGISTRY["ens"]
+ assert config.language == "es"
+ assert config.has_niveles is True
+ assert config.has_dimensions is True
+
+ def test_registry_contains_nis2(self):
+ """Test that NIS2 is in the registry."""
+ assert "nis2" in FRAMEWORK_REGISTRY
+ config = FRAMEWORK_REGISTRY["nis2"]
+ assert config.language == "en"
+
+ def test_get_framework_config_threatscore(self):
+ """Test getting ThreatScore config."""
+ config = get_framework_config("prowler_threatscore_aws")
+ assert config is not None
+ assert config.name == "prowler_threatscore"
+
+ def test_get_framework_config_ens(self):
+ """Test getting ENS config."""
+ config = get_framework_config("ens_rd2022_aws")
+ assert config is not None
+ assert config.name == "ens"
+
+ def test_get_framework_config_nis2(self):
+ """Test getting NIS2 config."""
+ config = get_framework_config("nis2_aws")
+ assert config is not None
+ assert config.name == "nis2"
+
+ def test_get_framework_config_unknown(self):
+ """Test getting unknown framework returns None."""
+ config = get_framework_config("unknown_framework")
+ assert config is None
+
+
+# =============================================================================
+# Color Helper Tests
+# =============================================================================
+
+
+class TestColorHelpers:
+ """Tests for color helper functions."""
+
+ def test_get_color_for_risk_level_high(self):
+ """Test high risk level returns red."""
+ assert get_color_for_risk_level(5) == COLOR_HIGH_RISK
+ assert get_color_for_risk_level(4) == COLOR_HIGH_RISK
+
+ def test_get_color_for_risk_level_very_high(self):
+ """Test very high risk level (>5) still returns high risk color."""
+ assert get_color_for_risk_level(10) == COLOR_HIGH_RISK
+ assert get_color_for_risk_level(100) == COLOR_HIGH_RISK
+
+ def test_get_color_for_risk_level_medium(self):
+ """Test medium risk level returns orange."""
+ assert get_color_for_risk_level(3) == COLOR_MEDIUM_RISK
+
+ def test_get_color_for_risk_level_low(self):
+ """Test low risk level returns yellow."""
+ assert get_color_for_risk_level(2) == COLOR_LOW_RISK
+
+ def test_get_color_for_risk_level_safe(self):
+ """Test safe risk level returns green."""
+ assert get_color_for_risk_level(1) == COLOR_SAFE
+ assert get_color_for_risk_level(0) == COLOR_SAFE
+
+ def test_get_color_for_risk_level_negative(self):
+ """Test negative risk level returns safe color."""
+ assert get_color_for_risk_level(-1) == COLOR_SAFE
+
+ def test_get_color_for_weight_high(self):
+ """Test high weight returns red."""
+ assert get_color_for_weight(150) == COLOR_HIGH_RISK
+ assert get_color_for_weight(101) == COLOR_HIGH_RISK
+
+ def test_get_color_for_weight_medium(self):
+ """Test medium weight returns yellow."""
+ assert get_color_for_weight(100) == COLOR_LOW_RISK
+ assert get_color_for_weight(51) == COLOR_LOW_RISK
+
+ def test_get_color_for_weight_low(self):
+ """Test low weight returns green."""
+ assert get_color_for_weight(50) == COLOR_SAFE
+ assert get_color_for_weight(0) == COLOR_SAFE
+
+ def test_get_color_for_compliance_high(self):
+ """Test high compliance returns green."""
+ assert get_color_for_compliance(100) == COLOR_SAFE
+ assert get_color_for_compliance(80) == COLOR_SAFE
+
+ def test_get_color_for_compliance_medium(self):
+ """Test medium compliance returns yellow."""
+ assert get_color_for_compliance(79) == COLOR_LOW_RISK
+ assert get_color_for_compliance(60) == COLOR_LOW_RISK
+
+ def test_get_color_for_compliance_low(self):
+ """Test low compliance returns red."""
+ assert get_color_for_compliance(59) == COLOR_HIGH_RISK
+ assert get_color_for_compliance(0) == COLOR_HIGH_RISK
+
+ def test_get_status_color_pass(self):
+ """Test PASS status returns green."""
+ assert get_status_color("PASS") == COLOR_SAFE
+ assert get_status_color("pass") == COLOR_SAFE
+
+ def test_get_status_color_fail(self):
+ """Test FAIL status returns red."""
+ assert get_status_color("FAIL") == COLOR_HIGH_RISK
+ assert get_status_color("fail") == COLOR_HIGH_RISK
+
+ def test_get_status_color_manual(self):
+ """Test MANUAL status returns gray."""
+ assert get_status_color("MANUAL") == COLOR_DARK_GRAY
+
+
+class TestChartColorHelpers:
+ """Tests for chart color functions."""
+
+ def test_chart_color_for_high_percentage(self):
+ """Test high percentage returns green."""
+ assert get_chart_color_for_percentage(100) == CHART_COLOR_GREEN_1
+ assert get_chart_color_for_percentage(80) == CHART_COLOR_GREEN_1
+
+ def test_chart_color_for_medium_high_percentage(self):
+ """Test medium-high percentage returns light green."""
+ assert get_chart_color_for_percentage(79) == CHART_COLOR_GREEN_2
+ assert get_chart_color_for_percentage(60) == CHART_COLOR_GREEN_2
+
+ def test_chart_color_for_medium_percentage(self):
+ """Test medium percentage returns yellow."""
+ assert get_chart_color_for_percentage(59) == CHART_COLOR_YELLOW
+ assert get_chart_color_for_percentage(40) == CHART_COLOR_YELLOW
+
+ def test_chart_color_for_medium_low_percentage(self):
+ """Test medium-low percentage returns orange."""
+ assert get_chart_color_for_percentage(39) == CHART_COLOR_ORANGE
+ assert get_chart_color_for_percentage(20) == CHART_COLOR_ORANGE
+
+ def test_chart_color_for_low_percentage(self):
+ """Test low percentage returns red."""
+ assert get_chart_color_for_percentage(19) == CHART_COLOR_RED
+ assert get_chart_color_for_percentage(0) == CHART_COLOR_RED
+
+ def test_chart_color_boundary_values(self):
+ """Test chart color at exact boundary values."""
+ # Exact boundaries
+ assert get_chart_color_for_percentage(80) == CHART_COLOR_GREEN_1
+ assert get_chart_color_for_percentage(60) == CHART_COLOR_GREEN_2
+ assert get_chart_color_for_percentage(40) == CHART_COLOR_YELLOW
+ assert get_chart_color_for_percentage(20) == CHART_COLOR_ORANGE
+
+
+# =============================================================================
+# Component Tests
+# =============================================================================
+
+
+class TestBadgeComponents:
+ """Tests for badge component functions."""
+
+ def test_create_badge_returns_table(self):
+ """Test create_badge returns a Table object."""
+ badge = create_badge("Test", COLOR_BLUE)
+ assert isinstance(badge, Table)
+
+ def test_create_badge_with_custom_width(self):
+ """Test create_badge with custom width."""
+ badge = create_badge("Test", COLOR_BLUE, width=2 * inch)
+ assert badge is not None
+
+ def test_create_status_badge_pass(self):
+ """Test status badge for PASS."""
+ badge = create_status_badge("PASS")
+ assert isinstance(badge, Table)
+
+ def test_create_status_badge_fail(self):
+ """Test status badge for FAIL."""
+ badge = create_status_badge("FAIL")
+ assert badge is not None
+
+ def test_create_multi_badge_row_with_badges(self):
+ """Test multi-badge row with data."""
+ badges = [
+ ("A", COLOR_BLUE),
+ ("B", COLOR_SAFE),
+ ]
+ table = create_multi_badge_row(badges)
+ assert isinstance(table, Table)
+
+ def test_create_multi_badge_row_empty(self):
+ """Test multi-badge row with empty list."""
+ table = create_multi_badge_row([])
+ assert table is not None
+
+
+class TestRiskComponent:
+ """Tests for risk component function."""
+
+ def test_create_risk_component_returns_table(self):
+ """Test risk component returns a Table."""
+ component = create_risk_component(risk_level=4, weight=100, score=50)
+ assert isinstance(component, Table)
+
+ def test_create_risk_component_high_risk(self):
+ """Test risk component with high risk level."""
+ component = create_risk_component(risk_level=5, weight=150, score=100)
+ assert component is not None
+
+ def test_create_risk_component_low_risk(self):
+ """Test risk component with low risk level."""
+ component = create_risk_component(risk_level=1, weight=10, score=10)
+ assert component is not None
+
+
+class TestTableComponents:
+ """Tests for table component functions."""
+
+ def test_create_info_table(self):
+ """Test info table creation."""
+ rows = [
+ ("Label 1:", "Value 1"),
+ ("Label 2:", "Value 2"),
+ ]
+ table = create_info_table(rows)
+ assert isinstance(table, Table)
+
+ def test_create_info_table_with_custom_widths(self):
+ """Test info table with custom column widths."""
+ rows = [("Test:", "Value")]
+ table = create_info_table(rows, label_width=3 * inch, value_width=3 * inch)
+ assert table is not None
+
+ def test_create_data_table(self):
+ """Test data table creation."""
+ data = [
+ {"name": "Item 1", "value": "100"},
+ {"name": "Item 2", "value": "200"},
+ ]
+ columns = [
+ ColumnConfig("Name", 2 * inch, "name"),
+ ColumnConfig("Value", 1 * inch, "value"),
+ ]
+ table = create_data_table(data, columns)
+ assert isinstance(table, Table)
+
+ def test_create_data_table_with_callable_field(self):
+ """Test data table with callable field."""
+ data = [{"raw_value": 100}]
+ columns = [
+ ColumnConfig("Formatted", 2 * inch, lambda x: f"${x['raw_value']}"),
+ ]
+ table = create_data_table(data, columns)
+ assert table is not None
+
+ def test_create_summary_table(self):
+ """Test summary table creation."""
+ table = create_summary_table(
+ label="Score:",
+ value="85%",
+ value_color=COLOR_SAFE,
+ )
+ assert isinstance(table, Table)
+
+ def test_create_summary_table_with_custom_widths(self):
+ """Test summary table with custom widths."""
+ table = create_summary_table(
+ label="ThreatScore:",
+ value="92.5%",
+ value_color=COLOR_SAFE,
+ label_width=3 * inch,
+ value_width=2.5 * inch,
+ )
+ assert isinstance(table, Table)
+
+
+class TestFindingsTable:
+ """Tests for findings table component."""
+
+ def test_create_findings_table_with_dicts(self):
+ """Test findings table creation with dict data."""
+ findings = [
+ {
+ "title": "Finding 1",
+ "resource_name": "resource-1",
+ "severity": "HIGH",
+ "status": "FAIL",
+ "region": "us-east-1",
+ },
+ {
+ "title": "Finding 2",
+ "resource_name": "resource-2",
+ "severity": "LOW",
+ "status": "PASS",
+ "region": "eu-west-1",
+ },
+ ]
+ table = create_findings_table(findings)
+ assert isinstance(table, Table)
+
+ def test_create_findings_table_with_custom_columns(self):
+ """Test findings table with custom column configuration."""
+ findings = [{"name": "Test", "value": "100"}]
+ columns = [
+ ColumnConfig("Name", 2 * inch, "name"),
+ ColumnConfig("Value", 1 * inch, "value"),
+ ]
+ table = create_findings_table(findings, columns=columns)
+ assert table is not None
+
+ def test_create_findings_table_empty(self):
+ """Test findings table with empty list."""
+ table = create_findings_table([])
+ assert table is not None
+
+
+class TestSectionHeader:
+ """Tests for section header component."""
+
+ def test_create_section_header_with_spacer(self):
+ """Test section header with spacer."""
+ styles = create_pdf_styles()
+ elements = create_section_header("Test Header", styles["h1"])
+
+ assert len(elements) == 2
+ assert isinstance(elements[0], Paragraph)
+ assert isinstance(elements[1], Spacer)
+
+ def test_create_section_header_without_spacer(self):
+ """Test section header without spacer."""
+ styles = create_pdf_styles()
+ elements = create_section_header("Test Header", styles["h1"], add_spacer=False)
+
+ assert len(elements) == 1
+ assert isinstance(elements[0], Paragraph)
+
+ def test_create_section_header_custom_spacer_height(self):
+ """Test section header with custom spacer height."""
+ styles = create_pdf_styles()
+ elements = create_section_header("Test Header", styles["h2"], spacer_height=0.5)
+
+ assert len(elements) == 2
+
+
+# =============================================================================
+# Chart Tests
+# =============================================================================
+
+
+class TestChartCreation:
+ """Tests for chart creation functions."""
+
+ def test_create_vertical_bar_chart(self):
+ """Test vertical bar chart creation."""
+ buffer = create_vertical_bar_chart(
+ labels=["A", "B", "C"],
+ values=[80, 60, 40],
+ )
+ assert isinstance(buffer, io.BytesIO)
+ assert buffer.getvalue() # Not empty
+
+ def test_create_vertical_bar_chart_with_options(self):
+ """Test vertical bar chart with custom options."""
+ buffer = create_vertical_bar_chart(
+ labels=["Section 1", "Section 2"],
+ values=[90, 70],
+ ylabel="Compliance",
+ title="Test Chart",
+ figsize=(8, 6),
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_create_horizontal_bar_chart(self):
+ """Test horizontal bar chart creation."""
+ buffer = create_horizontal_bar_chart(
+ labels=["Category 1", "Category 2", "Category 3"],
+ values=[85, 65, 45],
+ )
+ assert isinstance(buffer, io.BytesIO)
+ assert buffer.getvalue()
+
+ def test_create_horizontal_bar_chart_with_options(self):
+ """Test horizontal bar chart with custom options."""
+ buffer = create_horizontal_bar_chart(
+ labels=["A", "B"],
+ values=[100, 50],
+ xlabel="Percentage",
+ title="Custom Chart",
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_create_radar_chart(self):
+ """Test radar chart creation."""
+ buffer = create_radar_chart(
+ labels=["Dim 1", "Dim 2", "Dim 3", "Dim 4", "Dim 5"],
+ values=[80, 70, 60, 90, 75],
+ )
+ assert isinstance(buffer, io.BytesIO)
+ assert buffer.getvalue()
+
+ def test_create_radar_chart_with_options(self):
+ """Test radar chart with custom options."""
+ buffer = create_radar_chart(
+ labels=["A", "B", "C"],
+ values=[50, 60, 70],
+ color="#FF0000",
+ fill_alpha=0.5,
+ title="Custom Radar",
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_create_pie_chart(self):
+ """Test pie chart creation."""
+ buffer = create_pie_chart(
+ labels=["Pass", "Fail"],
+ values=[80, 20],
+ )
+ assert isinstance(buffer, io.BytesIO)
+ assert buffer.getvalue()
+
+ def test_create_pie_chart_with_options(self):
+ """Test pie chart with custom options."""
+ buffer = create_pie_chart(
+ labels=["Pass", "Fail", "Manual"],
+ values=[60, 30, 10],
+ colors=["#4CAF50", "#F44336", "#9E9E9E"],
+ title="Status Distribution",
+ autopct="%1.0f%%",
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_create_stacked_bar_chart(self):
+ """Test stacked bar chart creation."""
+ buffer = create_stacked_bar_chart(
+ labels=["Section 1", "Section 2", "Section 3"],
+ data_series={
+ "Pass": [8, 6, 4],
+ "Fail": [2, 4, 6],
+ },
+ )
+ assert isinstance(buffer, io.BytesIO)
+ assert buffer.getvalue()
+
+ def test_create_stacked_bar_chart_with_options(self):
+ """Test stacked bar chart with custom options."""
+ buffer = create_stacked_bar_chart(
+ labels=["A", "B"],
+ data_series={
+ "Pass": [10, 5],
+ "Fail": [2, 3],
+ "Manual": [1, 2],
+ },
+ colors={
+ "Pass": "#4CAF50",
+ "Fail": "#F44336",
+ "Manual": "#9E9E9E",
+ },
+ xlabel="Categories",
+ ylabel="Requirements",
+ title="Requirements by Status",
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_create_stacked_bar_chart_without_legend(self):
+ """Test stacked bar chart without legend."""
+ buffer = create_stacked_bar_chart(
+ labels=["X", "Y"],
+ data_series={"A": [1, 2]},
+ show_legend=False,
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_create_vertical_bar_chart_without_labels(self):
+ """Test vertical bar chart without value labels."""
+ buffer = create_vertical_bar_chart(
+ labels=["A", "B"],
+ values=[50, 75],
+ show_labels=False,
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_create_vertical_bar_chart_with_explicit_colors(self):
+ """Test vertical bar chart with explicit color list."""
+ buffer = create_vertical_bar_chart(
+ labels=["Pass", "Fail"],
+ values=[80, 20],
+ colors=["#4CAF50", "#F44336"],
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_create_horizontal_bar_chart_auto_figsize(self):
+ """Test horizontal bar chart auto-calculates figure size for many items."""
+ labels = [f"Item {i}" for i in range(20)]
+ values = [50 + i * 2 for i in range(20)]
+ buffer = create_horizontal_bar_chart(
+ labels=labels,
+ values=values,
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_create_horizontal_bar_chart_with_explicit_colors(self):
+ """Test horizontal bar chart with explicit colors."""
+ buffer = create_horizontal_bar_chart(
+ labels=["A", "B", "C"],
+ values=[80, 60, 40],
+ colors=["#4CAF50", "#FFEB3B", "#F44336"],
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_create_radar_chart_with_custom_ticks(self):
+ """Test radar chart with custom y-axis ticks."""
+ buffer = create_radar_chart(
+ labels=["A", "B", "C", "D"],
+ values=[25, 50, 75, 100],
+ y_ticks=[0, 25, 50, 75, 100],
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+
+# =============================================================================
+# Data Class Tests
+# =============================================================================
+
+
+class TestDataClasses:
+ """Tests for data classes."""
+
+ def test_requirement_data_creation(self):
+ """Test RequirementData creation."""
+ req = RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status="PASS",
+ passed_findings=10,
+ total_findings=10,
+ )
+ assert req.id == "REQ-001"
+ assert req.status == "PASS"
+ assert req.passed_findings == 10
+
+ def test_requirement_data_with_failed_findings(self):
+ """Test RequirementData with failed findings."""
+ req = RequirementData(
+ id="REQ-002",
+ description="Failed requirement",
+ status="FAIL",
+ passed_findings=3,
+ failed_findings=7,
+ total_findings=10,
+ )
+ assert req.failed_findings == 7
+ assert req.total_findings == 10
+
+ def test_requirement_data_defaults(self):
+ """Test RequirementData default values."""
+ req = RequirementData(
+ id="REQ-003",
+ description="Minimal requirement",
+ status="MANUAL",
+ )
+ assert req.passed_findings == 0
+ assert req.failed_findings == 0
+ assert req.total_findings == 0
+
+ def test_compliance_data_creation(self):
+ """Test ComplianceData creation."""
+ data = ComplianceData(
+ tenant_id="tenant-123",
+ scan_id="scan-456",
+ provider_id="provider-789",
+ compliance_id="test_compliance",
+ framework="Test",
+ name="Test Compliance",
+ version="1.0",
+ description="Test description",
+ )
+ assert data.tenant_id == "tenant-123"
+ assert data.framework == "Test"
+ assert data.requirements == []
+
+ def test_compliance_data_with_requirements(self):
+ """Test ComplianceData with requirements list."""
+ reqs = [
+ RequirementData(id="R1", description="Req 1", status="PASS"),
+ RequirementData(id="R2", description="Req 2", status="FAIL"),
+ ]
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="s1",
+ provider_id="p1",
+ compliance_id="c1",
+ framework="Test",
+ name="Test",
+ version="1.0",
+ description="",
+ requirements=reqs,
+ )
+ assert len(data.requirements) == 2
+ assert data.requirements[0].id == "R1"
+
+ def test_compliance_data_with_attributes(self):
+ """Test ComplianceData with attributes dictionary."""
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="s1",
+ provider_id="p1",
+ compliance_id="c1",
+ framework="Test",
+ name="Test",
+ version="1.0",
+ description="",
+ attributes_by_requirement_id={
+ "R1": {"attributes": {"key": "value"}},
+ },
+ )
+ assert "R1" in data.attributes_by_requirement_id
+ assert data.attributes_by_requirement_id["R1"]["attributes"]["key"] == "value"
+
+
+# =============================================================================
+# PDF Styles Tests
+# =============================================================================
+
+
+class TestPDFStyles:
+ """Tests for PDF styles."""
+
+ def test_create_pdf_styles_returns_dict(self):
+ """Test that create_pdf_styles returns a dictionary."""
+ styles = create_pdf_styles()
+ assert isinstance(styles, dict)
+
+ def test_create_pdf_styles_has_required_keys(self):
+ """Test that styles dict has all required keys."""
+ styles = create_pdf_styles()
+ required_keys = ["title", "h1", "h2", "h3", "normal", "normal_center"]
+ for key in required_keys:
+ assert key in styles
+
+ def test_create_pdf_styles_caches_result(self):
+ """Test that styles are cached."""
+ styles1 = create_pdf_styles()
+ styles2 = create_pdf_styles()
+ assert styles1 is styles2
+
+
+# =============================================================================
+# Base Generator Tests
+# =============================================================================
+
+
+class TestBaseComplianceReportGenerator:
+ """Tests for BaseComplianceReportGenerator."""
+
+ def test_cannot_instantiate_directly(self):
+ """Test that base class cannot be instantiated directly."""
+ config = FrameworkConfig(name="test", display_name="Test")
+ with pytest.raises(TypeError):
+ BaseComplianceReportGenerator(config)
+
+ def test_concrete_implementation(self):
+ """Test that a concrete implementation can be created."""
+
+ class ConcreteGenerator(BaseComplianceReportGenerator):
+ def create_executive_summary(self, data):
+ return []
+
+ def create_charts_section(self, data):
+ return []
+
+ def create_requirements_index(self, data):
+ return []
+
+ config = FrameworkConfig(name="test", display_name="Test")
+ generator = ConcreteGenerator(config)
+ assert generator.config.name == "test"
+ assert generator.styles is not None
+
+ def test_get_footer_text_english(self):
+ """Test footer text in English."""
+
+ class ConcreteGenerator(BaseComplianceReportGenerator):
+ def create_executive_summary(self, data):
+ return []
+
+ def create_charts_section(self, data):
+ return []
+
+ def create_requirements_index(self, data):
+ return []
+
+ config = FrameworkConfig(name="test", display_name="Test", language="en")
+ generator = ConcreteGenerator(config)
+ left, right = generator.get_footer_text(1)
+ assert left == "Page 1"
+ assert right == "Powered by Prowler"
+
+ def test_get_footer_text_spanish(self):
+ """Test footer text in Spanish."""
+
+ class ConcreteGenerator(BaseComplianceReportGenerator):
+ def create_executive_summary(self, data):
+ return []
+
+ def create_charts_section(self, data):
+ return []
+
+ def create_requirements_index(self, data):
+ return []
+
+ config = FrameworkConfig(name="test", display_name="Test", language="es")
+ generator = ConcreteGenerator(config)
+ left, right = generator.get_footer_text(1)
+ assert left == "Página 1"
+
+
+class TestBuildInfoRows:
+ """Tests for _build_info_rows helper method."""
+
+ def _create_generator(self, language="en"):
+ """Create a concrete generator for testing."""
+
+ class ConcreteGenerator(BaseComplianceReportGenerator):
+ def create_executive_summary(self, data):
+ return []
+
+ def create_charts_section(self, data):
+ return []
+
+ def create_requirements_index(self, data):
+ return []
+
+ config = FrameworkConfig(name="test", display_name="Test", language=language)
+ return ConcreteGenerator(config)
+
+ def test_build_info_rows_english(self):
+ """Test info rows are built with English labels."""
+ generator = self._create_generator(language="en")
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="scan-123",
+ provider_id="p1",
+ compliance_id="test_compliance",
+ framework="Test Framework",
+ name="Test Name",
+ version="1.0",
+ description="Test description",
+ )
+
+ rows = generator._build_info_rows(data, language="en")
+
+ assert ("Framework:", "Test Framework") in rows
+ assert ("Name:", "Test Name") in rows
+ assert ("Version:", "1.0") in rows
+ assert ("Scan ID:", "scan-123") in rows
+ assert ("Description:", "Test description") in rows
+
+ def test_build_info_rows_spanish(self):
+ """Test info rows are built with Spanish labels."""
+ generator = self._create_generator(language="es")
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="scan-123",
+ provider_id="p1",
+ compliance_id="test_compliance",
+ framework="Test Framework",
+ name="Test Name",
+ version="1.0",
+ description="Test description",
+ )
+
+ rows = generator._build_info_rows(data, language="es")
+
+ assert ("Framework:", "Test Framework") in rows
+ assert ("Nombre:", "Test Name") in rows
+ assert ("Versión:", "1.0") in rows
+ assert ("Scan ID:", "scan-123") in rows
+ assert ("Descripción:", "Test description") in rows
+
+ def test_build_info_rows_with_provider(self):
+ """Test info rows include provider info when available."""
+ from unittest.mock import Mock
+
+ generator = self._create_generator(language="en")
+
+ mock_provider = Mock()
+ mock_provider.provider = "aws"
+ mock_provider.uid = "123456789012"
+ mock_provider.alias = "my-account"
+
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="scan-123",
+ provider_id="p1",
+ compliance_id="test_compliance",
+ framework="Test",
+ name="Test",
+ version="1.0",
+ description="",
+ provider_obj=mock_provider,
+ )
+
+ rows = generator._build_info_rows(data, language="en")
+
+ assert ("Provider:", "AWS") in rows
+ assert ("Account ID:", "123456789012") in rows
+ assert ("Alias:", "my-account") in rows
+
+ def test_build_info_rows_with_provider_spanish(self):
+ """Test provider info uses Spanish labels."""
+ from unittest.mock import Mock
+
+ generator = self._create_generator(language="es")
+
+ mock_provider = Mock()
+ mock_provider.provider = "azure"
+ mock_provider.uid = "subscription-id"
+ mock_provider.alias = "mi-suscripcion"
+
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="scan-123",
+ provider_id="p1",
+ compliance_id="test_compliance",
+ framework="Test",
+ name="Test",
+ version="1.0",
+ description="",
+ provider_obj=mock_provider,
+ )
+
+ rows = generator._build_info_rows(data, language="es")
+
+ assert ("Proveedor:", "AZURE") in rows
+ assert ("Account ID:", "subscription-id") in rows
+ assert ("Alias:", "mi-suscripcion") in rows
+
+ def test_build_info_rows_without_provider(self):
+ """Test info rows work without provider info."""
+ generator = self._create_generator(language="en")
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="scan-123",
+ provider_id="p1",
+ compliance_id="test_compliance",
+ framework="Test",
+ name="Test",
+ version="1.0",
+ description="",
+ provider_obj=None,
+ )
+
+ rows = generator._build_info_rows(data, language="en")
+
+ # Provider info should not be present
+ labels = [label for label, _ in rows]
+ assert "Provider:" not in labels
+ assert "Account ID:" not in labels
+ assert "Alias:" not in labels
+
+ def test_build_info_rows_provider_with_missing_fields(self):
+ """Test provider info handles None values gracefully."""
+ from unittest.mock import Mock
+
+ generator = self._create_generator(language="en")
+
+ mock_provider = Mock()
+ mock_provider.provider = "gcp"
+ mock_provider.uid = None
+ mock_provider.alias = None
+
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="scan-123",
+ provider_id="p1",
+ compliance_id="test_compliance",
+ framework="Test",
+ name="Test",
+ version="1.0",
+ description="",
+ provider_obj=mock_provider,
+ )
+
+ rows = generator._build_info_rows(data, language="en")
+
+ assert ("Provider:", "GCP") in rows
+ assert ("Account ID:", "N/A") in rows
+ assert ("Alias:", "N/A") in rows
+
+ def test_build_info_rows_without_description(self):
+ """Test info rows exclude description when empty."""
+ generator = self._create_generator(language="en")
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="scan-123",
+ provider_id="p1",
+ compliance_id="test_compliance",
+ framework="Test",
+ name="Test",
+ version="1.0",
+ description="",
+ )
+
+ rows = generator._build_info_rows(data, language="en")
+
+ labels = [label for label, _ in rows]
+ assert "Description:" not in labels
+
+ def test_build_info_rows_defaults_to_english(self):
+ """Test unknown language defaults to English labels."""
+ generator = self._create_generator(language="en")
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="scan-123",
+ provider_id="p1",
+ compliance_id="test_compliance",
+ framework="Test",
+ name="Test",
+ version="1.0",
+ description="Desc",
+ )
+
+ rows = generator._build_info_rows(data, language="fr") # Unknown language
+
+ # Should use English labels as fallback
+ assert ("Name:", "Test") in rows
+ assert ("Description:", "Desc") in rows
+
+
+# =============================================================================
+# Integration Tests
+# =============================================================================
+
+
+class TestExampleReportGenerator:
+ """Integration tests using an example report generator."""
+
+ def setup_method(self):
+ """Set up test fixtures."""
+
+ class ExampleGenerator(BaseComplianceReportGenerator):
+ """Example concrete implementation for testing."""
+
+ def create_executive_summary(self, data):
+ return [
+ Paragraph("Executive Summary", self.styles["h1"]),
+ Paragraph(
+ f"Total requirements: {len(data.requirements)}",
+ self.styles["normal"],
+ ),
+ ]
+
+ def create_charts_section(self, data):
+ chart_buffer = create_vertical_bar_chart(
+ labels=["Pass", "Fail"],
+ values=[80, 20],
+ )
+ return [Image(chart_buffer, width=6 * inch, height=4 * inch)]
+
+ def create_requirements_index(self, data):
+ elements = [Paragraph("Requirements Index", self.styles["h1"])]
+ for req in data.requirements:
+ elements.append(
+ Paragraph(
+ f"- {req.id}: {req.description}", self.styles["normal"]
+ )
+ )
+ return elements
+
+ self.generator_class = ExampleGenerator
+
+ def test_example_generator_creation(self):
+ """Test creating example generator."""
+ config = FrameworkConfig(name="example", display_name="Example Framework")
+ generator = self.generator_class(config)
+ assert generator is not None
+
+ def test_example_generator_executive_summary(self):
+ """Test executive summary generation."""
+ config = FrameworkConfig(name="example", display_name="Example Framework")
+ generator = self.generator_class(config)
+
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="s1",
+ provider_id="p1",
+ compliance_id="c1",
+ framework="Test",
+ name="Test",
+ version="1.0",
+ description="",
+ requirements=[
+ RequirementData(id="R1", description="Req 1", status="PASS"),
+ RequirementData(id="R2", description="Req 2", status="FAIL"),
+ ],
+ )
+
+ elements = generator.create_executive_summary(data)
+ assert len(elements) == 2
+
+ def test_example_generator_charts_section(self):
+ """Test charts section generation."""
+ config = FrameworkConfig(name="example", display_name="Example Framework")
+ generator = self.generator_class(config)
+
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="s1",
+ provider_id="p1",
+ compliance_id="c1",
+ framework="Test",
+ name="Test",
+ version="1.0",
+ description="",
+ )
+
+ elements = generator.create_charts_section(data)
+ assert len(elements) == 1
+
+ def test_example_generator_requirements_index(self):
+ """Test requirements index generation."""
+ config = FrameworkConfig(name="example", display_name="Example Framework")
+ generator = self.generator_class(config)
+
+ data = ComplianceData(
+ tenant_id="t1",
+ scan_id="s1",
+ provider_id="p1",
+ compliance_id="c1",
+ framework="Test",
+ name="Test",
+ version="1.0",
+ description="",
+ requirements=[
+ RequirementData(id="R1", description="Requirement 1", status="PASS"),
+ ],
+ )
+
+ elements = generator.create_requirements_index(data)
+ assert len(elements) == 2 # Header + 1 requirement
+
+
+# =============================================================================
+# Edge Case Tests
+# =============================================================================
+
+
+class TestChartEdgeCases:
+ """Tests for chart edge cases."""
+
+ def test_vertical_bar_chart_empty_data(self):
+ """Test vertical bar chart with empty data."""
+ buffer = create_vertical_bar_chart(labels=[], values=[])
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_vertical_bar_chart_single_item(self):
+ """Test vertical bar chart with single item."""
+ buffer = create_vertical_bar_chart(labels=["Single"], values=[75.0])
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_horizontal_bar_chart_empty_data(self):
+ """Test horizontal bar chart with empty data."""
+ buffer = create_horizontal_bar_chart(labels=[], values=[])
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_horizontal_bar_chart_single_item(self):
+ """Test horizontal bar chart with single item."""
+ buffer = create_horizontal_bar_chart(labels=["Single"], values=[50.0])
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_radar_chart_minimum_points(self):
+ """Test radar chart with minimum number of points (3)."""
+ buffer = create_radar_chart(
+ labels=["A", "B", "C"],
+ values=[30.0, 60.0, 90.0],
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_pie_chart_single_slice(self):
+ """Test pie chart with single slice."""
+ buffer = create_pie_chart(labels=["Only"], values=[100.0])
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_pie_chart_many_slices(self):
+ """Test pie chart with many slices."""
+ labels = [f"Item {i}" for i in range(10)]
+ values = [10.0] * 10
+ buffer = create_pie_chart(labels=labels, values=values)
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_stacked_bar_chart_single_series(self):
+ """Test stacked bar chart with single series."""
+ buffer = create_stacked_bar_chart(
+ labels=["A", "B"],
+ data_series={"Only": [10.0, 20.0]},
+ )
+ assert isinstance(buffer, io.BytesIO)
+
+ def test_stacked_bar_chart_empty_data(self):
+ """Test stacked bar chart with empty data."""
+ buffer = create_stacked_bar_chart(labels=[], data_series={})
+ assert isinstance(buffer, io.BytesIO)
+
+
+class TestComponentEdgeCases:
+ """Tests for component edge cases."""
+
+ def test_create_badge_empty_text(self):
+ """Test badge with empty text."""
+ badge = create_badge("", COLOR_BLUE)
+ assert badge is not None
+
+ def test_create_badge_long_text(self):
+ """Test badge with very long text."""
+ long_text = "A" * 100
+ badge = create_badge(long_text, COLOR_BLUE, width=5 * inch)
+ assert badge is not None
+
+ def test_create_status_badge_unknown_status(self):
+ """Test status badge with unknown status."""
+ badge = create_status_badge("UNKNOWN")
+ assert badge is not None
+
+ def test_create_multi_badge_row_single_badge(self):
+ """Test multi-badge row with single badge."""
+ badges = [("A", COLOR_BLUE)]
+ table = create_multi_badge_row(badges)
+ assert table is not None
+
+ def test_create_multi_badge_row_many_badges(self):
+ """Test multi-badge row with many badges."""
+ badges = [(chr(65 + i), COLOR_BLUE) for i in range(10)] # A-J
+ table = create_multi_badge_row(badges)
+ assert table is not None
+
+ def test_create_info_table_empty(self):
+ """Test info table with empty rows."""
+ table = create_info_table([])
+ assert isinstance(table, Table)
+
+ def test_create_info_table_long_values(self):
+ """Test info table with very long values wraps properly."""
+ rows = [
+ ("Key:", "A" * 200), # Very long value
+ ]
+ styles = create_pdf_styles()
+ table = create_info_table(rows, normal_style=styles["normal"])
+ assert table is not None
+
+ def test_create_data_table_empty(self):
+ """Test data table with empty data."""
+ columns = [
+ ColumnConfig("Name", 2 * inch, "name"),
+ ]
+ table = create_data_table([], columns)
+ assert table is not None
+
+ def test_create_data_table_large_dataset(self):
+ """Test data table with large dataset uses LongTable."""
+ # Create more than 50 rows to trigger LongTable
+ data = [{"name": f"Item {i}"} for i in range(60)]
+ columns = [ColumnConfig("Name", 2 * inch, "name")]
+ table = create_data_table(data, columns)
+ # Should be a LongTable for large datasets
+ assert isinstance(table, LongTable)
+
+ def test_create_risk_component_zero_values(self):
+ """Test risk component with zero values."""
+ component = create_risk_component(risk_level=0, weight=0, score=0)
+ assert component is not None
+
+ def test_create_risk_component_max_values(self):
+ """Test risk component with maximum values."""
+ component = create_risk_component(risk_level=5, weight=200, score=1000)
+ assert component is not None
+
+
+class TestColorEdgeCases:
+ """Tests for color function edge cases."""
+
+ def test_get_color_for_compliance_boundary_80(self):
+ """Test compliance color at exactly 80%."""
+ assert get_color_for_compliance(80) == COLOR_SAFE
+
+ def test_get_color_for_compliance_boundary_60(self):
+ """Test compliance color at exactly 60%."""
+ assert get_color_for_compliance(60) == COLOR_LOW_RISK
+
+ def test_get_color_for_compliance_over_100(self):
+ """Test compliance color for values over 100."""
+ assert get_color_for_compliance(150) == COLOR_SAFE
+
+ def test_get_color_for_weight_boundary_100(self):
+ """Test weight color at exactly 100."""
+ assert get_color_for_weight(100) == COLOR_LOW_RISK
+
+ def test_get_color_for_weight_boundary_50(self):
+ """Test weight color at exactly 50."""
+ assert get_color_for_weight(50) == COLOR_SAFE
+
+ def test_get_status_color_case_insensitive(self):
+ """Test that status color is case insensitive."""
+ assert get_status_color("PASS") == get_status_color("pass")
+ assert get_status_color("FAIL") == get_status_color("Fail")
+ assert get_status_color("MANUAL") == get_status_color("manual")
+
+
+class TestFrameworkConfigEdgeCases:
+ """Tests for FrameworkConfig edge cases."""
+
+ def test_framework_config_empty_sections(self):
+ """Test FrameworkConfig with empty sections list."""
+ config = FrameworkConfig(
+ name="test",
+ display_name="Test",
+ sections=[],
+ )
+ assert config.sections == []
+
+ def test_framework_config_empty_attribute_fields(self):
+ """Test FrameworkConfig with empty attribute fields."""
+ config = FrameworkConfig(
+ name="test",
+ display_name="Test",
+ attribute_fields=[],
+ )
+ assert config.attribute_fields == []
+
+ def test_get_framework_config_case_variations(self):
+ """Test get_framework_config with different case variations."""
+ # Test case insensitivity
+ assert get_framework_config("PROWLER_THREATSCORE_AWS") is not None
+ assert get_framework_config("ENS_RD2022_AWS") is not None
+ assert get_framework_config("NIS2_AWS") is not None
+
+ def test_get_framework_config_partial_match(self):
+ """Test that partial matches work correctly."""
+ # Should match based on substring
+ assert get_framework_config("my_custom_threatscore_compliance") is not None
+ assert get_framework_config("ens_something_else") is not None
+ assert get_framework_config("nis2_gcp") is not None
diff --git a/api/src/backend/tasks/tests/test_reports_ens.py b/api/src/backend/tasks/tests/test_reports_ens.py
new file mode 100644
index 0000000000..91eb6d6f3a
--- /dev/null
+++ b/api/src/backend/tasks/tests/test_reports_ens.py
@@ -0,0 +1,1227 @@
+import io
+from unittest.mock import Mock, patch
+
+import pytest
+from reportlab.platypus import PageBreak, Paragraph, Table
+from tasks.jobs.reports import FRAMEWORK_REGISTRY, ComplianceData, RequirementData
+from tasks.jobs.reports.ens import ENSReportGenerator
+
+
+# Use string status values directly to avoid Django DB initialization
+# These match api.models.StatusChoices values
+class StatusChoices:
+ """Mock StatusChoices to avoid Django DB initialization."""
+
+ PASS = "PASS"
+ FAIL = "FAIL"
+ MANUAL = "MANUAL"
+
+
+# =============================================================================
+# Fixtures
+# =============================================================================
+
+
+@pytest.fixture
+def ens_generator():
+ """Create an ENSReportGenerator instance for testing."""
+ config = FRAMEWORK_REGISTRY["ens"]
+ return ENSReportGenerator(config)
+
+
+@pytest.fixture
+def mock_ens_requirement_attribute():
+ """Create a mock ENS requirement attribute with all fields."""
+ mock = Mock()
+ mock.Marco = "Operacional"
+ mock.Categoria = "Gestión de incidentes"
+ mock.DescripcionControl = "Control de gestión de incidentes de seguridad"
+ mock.Tipo = "requisito"
+ mock.Nivel = "alto"
+ mock.Dimensiones = ["confidencialidad", "integridad"]
+ mock.ModoEjecucion = "automatico"
+ mock.IdGrupoControl = "op.ext.1"
+ return mock
+
+
+@pytest.fixture
+def mock_ens_requirement_attribute_medio():
+ """Create a mock ENS requirement attribute with nivel medio."""
+ mock = Mock()
+ mock.Marco = "Organizativo"
+ mock.Categoria = "Seguridad en los recursos humanos"
+ mock.DescripcionControl = "Control de seguridad del personal"
+ mock.Tipo = "refuerzo"
+ mock.Nivel = "medio"
+ mock.Dimensiones = "trazabilidad, autenticidad" # String format
+ mock.ModoEjecucion = "manual"
+ mock.IdGrupoControl = "org.rh.1"
+ return mock
+
+
+@pytest.fixture
+def mock_ens_requirement_attribute_bajo():
+ """Create a mock ENS requirement attribute with nivel bajo."""
+ mock = Mock()
+ mock.Marco = "Medidas de Protección"
+ mock.Categoria = "Protección de las instalaciones"
+ mock.DescripcionControl = "Control de acceso físico"
+ mock.Tipo = "recomendacion"
+ mock.Nivel = "bajo"
+ mock.Dimensiones = ["disponibilidad"]
+ mock.ModoEjecucion = "automatico"
+ mock.IdGrupoControl = "mp.if.1"
+ return mock
+
+
+@pytest.fixture
+def mock_ens_requirement_attribute_opcional():
+ """Create a mock ENS requirement attribute with nivel opcional."""
+ mock = Mock()
+ mock.Marco = "Marco de Organización"
+ mock.Categoria = "Política de seguridad"
+ mock.DescripcionControl = "Política de seguridad de la información"
+ mock.Tipo = "medida"
+ mock.Nivel = "opcional"
+ mock.Dimensiones = []
+ mock.ModoEjecucion = "automatico"
+ mock.IdGrupoControl = "org.1"
+ return mock
+
+
+@pytest.fixture
+def basic_ens_compliance_data():
+ """Create basic ComplianceData for ENS testing."""
+ return ComplianceData(
+ tenant_id="tenant-123",
+ scan_id="scan-456",
+ provider_id="provider-789",
+ compliance_id="ens_rd2022_aws",
+ framework="ENS RD2022",
+ name="Esquema Nacional de Seguridad RD 311/2022",
+ version="2022",
+ description="Marco de seguridad para la administración electrónica española",
+ )
+
+
+# =============================================================================
+# Generator Initialization Tests
+# =============================================================================
+
+
+class TestENSGeneratorInitialization:
+ """Test suite for ENS generator initialization."""
+
+ def test_generator_creation(self, ens_generator):
+ """Test that ENS generator is created correctly."""
+ assert ens_generator is not None
+ assert ens_generator.config.name == "ens"
+ assert ens_generator.config.language == "es"
+
+ def test_generator_has_niveles(self, ens_generator):
+ """Test that ENS config has niveles enabled."""
+ assert ens_generator.config.has_niveles is True
+
+ def test_generator_has_dimensions(self, ens_generator):
+ """Test that ENS config has dimensions enabled."""
+ assert ens_generator.config.has_dimensions is True
+
+ def test_generator_no_risk_levels(self, ens_generator):
+ """Test that ENS config does not use risk levels."""
+ assert ens_generator.config.has_risk_levels is False
+
+ def test_generator_no_weight(self, ens_generator):
+ """Test that ENS config does not use weight."""
+ assert ens_generator.config.has_weight is False
+
+
+# =============================================================================
+# Cover Page Tests
+# =============================================================================
+
+
+class TestENSCoverPage:
+ """Test suite for ENS cover page generation."""
+
+ @patch("tasks.jobs.reports.ens.Image")
+ def test_cover_page_has_logos(
+ self, mock_image, ens_generator, basic_ens_compliance_data
+ ):
+ """Test that cover page contains logos."""
+ basic_ens_compliance_data.requirements = []
+ basic_ens_compliance_data.attributes_by_requirement_id = {}
+
+ elements = ens_generator.create_cover_page(basic_ens_compliance_data)
+
+ assert len(elements) > 0
+ # Should have called Image at least twice (prowler + ens logos)
+ assert mock_image.call_count >= 2
+
+ def test_cover_page_has_title(self, ens_generator, basic_ens_compliance_data):
+ """Test that cover page contains the ENS title."""
+ basic_ens_compliance_data.requirements = []
+ basic_ens_compliance_data.attributes_by_requirement_id = {}
+
+ elements = ens_generator.create_cover_page(basic_ens_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "ENS" in content or "Informe" in content
+
+ def test_cover_page_has_info_table(self, ens_generator, basic_ens_compliance_data):
+ """Test that cover page contains info table with metadata."""
+ basic_ens_compliance_data.requirements = []
+ basic_ens_compliance_data.attributes_by_requirement_id = {}
+
+ elements = ens_generator.create_cover_page(basic_ens_compliance_data)
+
+ tables = [e for e in elements if isinstance(e, Table)]
+ assert len(tables) >= 1 # At least info table
+
+ def test_cover_page_has_warning_about_manual(
+ self, ens_generator, basic_ens_compliance_data
+ ):
+ """Test that cover page has warning about manual requirements."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Manual requirement",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ )
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {}
+
+ elements = ens_generator.create_cover_page(basic_ens_compliance_data)
+
+ # Find paragraphs (including those inside tables) that mention manual
+ all_paragraphs = []
+ for e in elements:
+ if isinstance(e, Paragraph):
+ all_paragraphs.append(e)
+ elif isinstance(e, Table):
+ # Check table cells for Paragraph objects
+ cell_values = getattr(e, "_cellvalues", [])
+ for row in cell_values:
+ for cell in row:
+ if isinstance(cell, Paragraph):
+ all_paragraphs.append(cell)
+ content = " ".join(str(p.text) for p in all_paragraphs)
+ assert "manual" in content.lower() or "AVISO" in content
+
+ def test_cover_page_has_legend(self, ens_generator, basic_ens_compliance_data):
+ """Test that cover page contains the ENS values legend."""
+ basic_ens_compliance_data.requirements = []
+ basic_ens_compliance_data.attributes_by_requirement_id = {}
+
+ elements = ens_generator.create_cover_page(basic_ens_compliance_data)
+
+ # Legend should be a table with explanations
+ tables = [e for e in elements if isinstance(e, Table)]
+ # At least 3 tables: logos, info, warning, legend
+ assert len(tables) >= 3
+
+
+# =============================================================================
+# Executive Summary Tests
+# =============================================================================
+
+
+class TestENSExecutiveSummary:
+ """Test suite for ENS executive summary generation."""
+
+ def test_executive_summary_has_title(
+ self, ens_generator, basic_ens_compliance_data
+ ):
+ """Test that executive summary has Spanish title."""
+ basic_ens_compliance_data.requirements = []
+ basic_ens_compliance_data.attributes_by_requirement_id = {}
+
+ elements = ens_generator.create_executive_summary(basic_ens_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "Resumen Ejecutivo" in content
+
+ def test_executive_summary_calculates_compliance(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that executive summary calculates compliance percentage."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passed requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Failed requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ "REQ-002": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_executive_summary(basic_ens_compliance_data)
+
+ # Should contain tables with metrics
+ tables = [e for e in elements if isinstance(e, Table)]
+ assert len(tables) >= 1
+
+ def test_executive_summary_excludes_manual_from_compliance(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that manual requirements are excluded from compliance calculation."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passed requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Manual requirement",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ "REQ-002": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_executive_summary(basic_ens_compliance_data)
+
+ # Should calculate 100% compliance (only 1 auto requirement that passed)
+ assert len(elements) > 0
+
+ def test_executive_summary_has_nivel_table(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that executive summary includes compliance by nivel table."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Alto requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_executive_summary(basic_ens_compliance_data)
+
+ # Should have nivel table
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "Nivel" in content or "nivel" in content.lower()
+
+
+# =============================================================================
+# Charts Section Tests
+# =============================================================================
+
+
+class TestENSChartsSection:
+ """Test suite for ENS charts section generation."""
+
+ def test_charts_section_has_page_breaks(
+ self, ens_generator, basic_ens_compliance_data
+ ):
+ """Test that charts section has page breaks between charts."""
+ basic_ens_compliance_data.requirements = []
+ basic_ens_compliance_data.attributes_by_requirement_id = {}
+
+ elements = ens_generator.create_charts_section(basic_ens_compliance_data)
+
+ page_breaks = [e for e in elements if isinstance(e, PageBreak)]
+ assert len(page_breaks) >= 2 # At least 2 page breaks for different charts
+
+ def test_charts_section_has_marco_category_chart(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that charts section contains Marco/Categoría chart."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_charts_section(basic_ens_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "Marco" in content or "Categoría" in content
+
+ def test_charts_section_has_dimensions_radar(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that charts section contains dimensions radar chart."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_charts_section(basic_ens_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "Dimensiones" in content or "dimensiones" in content.lower()
+
+ def test_charts_section_has_tipo_distribution(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that charts section contains tipo distribution."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_charts_section(basic_ens_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "Tipo" in content or "tipo" in content.lower()
+
+
+# =============================================================================
+# Critical Failed Requirements Tests
+# =============================================================================
+
+
+class TestENSCriticalFailedRequirements:
+ """Test suite for ENS critical failed requirements (nivel alto)."""
+
+ def test_no_critical_failures_shows_success_message(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that no critical failures shows success message."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passed alto requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator._create_critical_failed_section(
+ basic_ens_compliance_data
+ )
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "No hay" in content or "✅" in content
+
+ def test_critical_failures_shows_table(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that critical failures shows requirements table."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Failed alto requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator._create_critical_failed_section(
+ basic_ens_compliance_data
+ )
+
+ tables = [e for e in elements if isinstance(e, Table)]
+ assert len(tables) >= 1
+
+ def test_critical_failures_only_includes_alto(
+ self,
+ ens_generator,
+ basic_ens_compliance_data,
+ mock_ens_requirement_attribute,
+ mock_ens_requirement_attribute_medio,
+ ):
+ """Test that only nivel alto failures are included."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Failed alto requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Failed medio requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=5,
+ total_findings=5,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ "REQ-002": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute_medio]}
+ },
+ }
+
+ elements = ens_generator._create_critical_failed_section(
+ basic_ens_compliance_data
+ )
+
+ # Should have table but only with alto requirement
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ # Should mention 1 critical requirement
+ assert "1" in content
+
+
+# =============================================================================
+# Requirements Index Tests
+# =============================================================================
+
+
+class TestENSRequirementsIndex:
+ """Test suite for ENS requirements index generation."""
+
+ def test_requirements_index_has_title(
+ self, ens_generator, basic_ens_compliance_data
+ ):
+ """Test that requirements index has Spanish title."""
+ basic_ens_compliance_data.requirements = []
+ basic_ens_compliance_data.attributes_by_requirement_id = {}
+
+ elements = ens_generator.create_requirements_index(basic_ens_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "Índice" in content or "Requisitos" in content
+
+ def test_requirements_index_organized_by_marco(
+ self,
+ ens_generator,
+ basic_ens_compliance_data,
+ mock_ens_requirement_attribute,
+ mock_ens_requirement_attribute_medio,
+ ):
+ """Test that requirements index is organized by Marco."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Operacional requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Organizativo requirement",
+ status=StatusChoices.PASS,
+ passed_findings=5,
+ failed_findings=0,
+ total_findings=5,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ "REQ-002": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute_medio]}
+ },
+ }
+
+ elements = ens_generator.create_requirements_index(basic_ens_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert (
+ "Operacional" in content or "Organizativo" in content or "Marco" in content
+ )
+
+ def test_requirements_index_excludes_manual(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that manual requirements are excluded from index."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Auto requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Manual requirement",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ "REQ-002": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_requirements_index(basic_ens_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ # REQ-001 should be there, REQ-002 should not
+ assert "REQ-001" in content
+ assert "REQ-002" not in content
+
+ def test_requirements_index_shows_status_indicators(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that requirements index shows pass/fail indicators."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passed requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Failed requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ "REQ-002": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_requirements_index(basic_ens_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ # Should have status indicators
+ assert "✓" in content or "✗" in content
+
+
+# =============================================================================
+# Detailed Findings Tests
+# =============================================================================
+
+
+class TestENSDetailedFindings:
+ """Test suite for ENS detailed findings generation."""
+
+ def test_detailed_findings_has_title(
+ self, ens_generator, basic_ens_compliance_data
+ ):
+ """Test that detailed findings section has title."""
+ basic_ens_compliance_data.requirements = []
+ basic_ens_compliance_data.attributes_by_requirement_id = {}
+
+ elements = ens_generator.create_detailed_findings(basic_ens_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "Detalle" in content or "Requisitos" in content
+
+ def test_detailed_findings_no_failures_message(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test message when no failed requirements exist."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passed requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_detailed_findings(basic_ens_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "No hay" in content or "requisitos fallidos" in content.lower()
+
+ def test_detailed_findings_shows_failed_requirements(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that failed requirements are shown in detail."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Failed requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_detailed_findings(basic_ens_compliance_data)
+
+ # Should have tables showing requirement details
+ tables = [e for e in elements if isinstance(e, Table)]
+ assert len(tables) >= 1
+
+ def test_detailed_findings_shows_nivel_badges(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that detailed findings show nivel badges."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Failed requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_detailed_findings(basic_ens_compliance_data)
+
+ # Should generate without errors
+ assert len(elements) > 0
+
+ def test_detailed_findings_shows_dimensiones_badges(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that detailed findings show dimension badges."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Failed requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator.create_detailed_findings(basic_ens_compliance_data)
+
+ # Should generate without errors with dimension badges
+ assert len(elements) > 0
+
+
+# =============================================================================
+# Dimension Handling Tests
+# =============================================================================
+
+
+class TestENSDimensionHandling:
+ """Test suite for ENS security dimension handling."""
+
+ def test_dimensions_as_list(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test handling dimensions as a list."""
+ # mock_ens_requirement_attribute has Dimensiones as list
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ # Should not raise any errors
+ chart_buffer = ens_generator._create_dimensions_radar_chart(
+ basic_ens_compliance_data
+ )
+ assert isinstance(chart_buffer, io.BytesIO)
+
+ def test_dimensions_as_string(
+ self,
+ ens_generator,
+ basic_ens_compliance_data,
+ mock_ens_requirement_attribute_medio,
+ ):
+ """Test handling dimensions as comma-separated string."""
+ # mock_ens_requirement_attribute_medio has Dimensiones as string
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute_medio]}
+ },
+ }
+
+ # Should not raise any errors
+ chart_buffer = ens_generator._create_dimensions_radar_chart(
+ basic_ens_compliance_data
+ )
+ assert isinstance(chart_buffer, io.BytesIO)
+
+ def test_dimensions_empty(
+ self,
+ ens_generator,
+ basic_ens_compliance_data,
+ mock_ens_requirement_attribute_opcional,
+ ):
+ """Test handling empty dimensions."""
+ # mock_ens_requirement_attribute_opcional has empty Dimensiones
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_ens_requirement_attribute_opcional]
+ }
+ },
+ }
+
+ # Should not raise any errors
+ chart_buffer = ens_generator._create_dimensions_radar_chart(
+ basic_ens_compliance_data
+ )
+ assert isinstance(chart_buffer, io.BytesIO)
+
+
+# =============================================================================
+# Footer Tests
+# =============================================================================
+
+
+class TestENSFooter:
+ """Test suite for ENS footer generation."""
+
+ def test_footer_is_spanish(self, ens_generator):
+ """Test that footer text is in Spanish."""
+ left, right = ens_generator.get_footer_text(1)
+
+ assert "Página" in left
+ assert "Prowler" in right
+
+ def test_footer_includes_page_number(self, ens_generator):
+ """Test that footer includes page number."""
+ left, right = ens_generator.get_footer_text(5)
+
+ assert "5" in left
+
+
+# =============================================================================
+# Nivel Table Tests
+# =============================================================================
+
+
+class TestENSNivelTable:
+ """Test suite for ENS nivel compliance table."""
+
+ def test_nivel_table_all_niveles(
+ self,
+ ens_generator,
+ basic_ens_compliance_data,
+ mock_ens_requirement_attribute,
+ mock_ens_requirement_attribute_medio,
+ mock_ens_requirement_attribute_bajo,
+ mock_ens_requirement_attribute_opcional,
+ ):
+ """Test nivel table with all niveles represented."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Alto requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Medio requirement",
+ status=StatusChoices.PASS,
+ passed_findings=5,
+ failed_findings=0,
+ total_findings=5,
+ ),
+ RequirementData(
+ id="REQ-003",
+ description="Bajo requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=5,
+ total_findings=5,
+ ),
+ RequirementData(
+ id="REQ-004",
+ description="Opcional requirement",
+ status=StatusChoices.PASS,
+ passed_findings=3,
+ failed_findings=0,
+ total_findings=3,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ "REQ-002": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute_medio]}
+ },
+ "REQ-003": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute_bajo]}
+ },
+ "REQ-004": {
+ "attributes": {
+ "req_attributes": [mock_ens_requirement_attribute_opcional]
+ }
+ },
+ }
+
+ elements = ens_generator._create_nivel_table(basic_ens_compliance_data)
+
+ # Should have at least one table
+ tables = [e for e in elements if isinstance(e, Table)]
+ assert len(tables) >= 1
+
+ def test_nivel_table_excludes_manual(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that manual requirements are excluded from nivel table."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Auto requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Manual requirement",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ "REQ-002": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator._create_nivel_table(basic_ens_compliance_data)
+
+ # Should generate without errors
+ assert len(elements) > 0
+
+
+# =============================================================================
+# Marco Category Chart Tests
+# =============================================================================
+
+
+class TestENSMarcoCategoryChart:
+ """Test suite for ENS Marco/Categoría chart."""
+
+ def test_marco_category_chart_creation(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that Marco/Categoría chart is created successfully."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ chart_buffer = ens_generator._create_marco_category_chart(
+ basic_ens_compliance_data
+ )
+
+ assert isinstance(chart_buffer, io.BytesIO)
+ assert chart_buffer.getvalue() # Not empty
+
+ def test_marco_category_chart_excludes_manual(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that manual requirements are excluded from chart."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Auto requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Manual requirement",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ "REQ-002": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ # Should not raise any errors
+ chart_buffer = ens_generator._create_marco_category_chart(
+ basic_ens_compliance_data
+ )
+ assert isinstance(chart_buffer, io.BytesIO)
+
+
+# =============================================================================
+# Tipo Section Tests
+# =============================================================================
+
+
+class TestENSTipoSection:
+ """Test suite for ENS tipo distribution section."""
+
+ def test_tipo_section_creation(
+ self, ens_generator, basic_ens_compliance_data, mock_ens_requirement_attribute
+ ):
+ """Test that tipo section is created successfully."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Requisito type",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ }
+
+ elements = ens_generator._create_tipo_section(basic_ens_compliance_data)
+
+ assert len(elements) > 0
+ # Should have a table with tipo distribution
+ tables = [e for e in elements if isinstance(e, Table)]
+ assert len(tables) >= 1
+
+ def test_tipo_section_all_types(
+ self,
+ ens_generator,
+ basic_ens_compliance_data,
+ mock_ens_requirement_attribute,
+ mock_ens_requirement_attribute_medio,
+ mock_ens_requirement_attribute_bajo,
+ mock_ens_requirement_attribute_opcional,
+ ):
+ """Test tipo section with all requirement types."""
+ basic_ens_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Requisito type",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Refuerzo type",
+ status=StatusChoices.PASS,
+ passed_findings=5,
+ failed_findings=0,
+ total_findings=5,
+ ),
+ RequirementData(
+ id="REQ-003",
+ description="Recomendacion type",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=5,
+ total_findings=5,
+ ),
+ RequirementData(
+ id="REQ-004",
+ description="Medida type",
+ status=StatusChoices.PASS,
+ passed_findings=3,
+ failed_findings=0,
+ total_findings=3,
+ ),
+ ]
+ basic_ens_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute]}
+ },
+ "REQ-002": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute_medio]}
+ },
+ "REQ-003": {
+ "attributes": {"req_attributes": [mock_ens_requirement_attribute_bajo]}
+ },
+ "REQ-004": {
+ "attributes": {
+ "req_attributes": [mock_ens_requirement_attribute_opcional]
+ }
+ },
+ }
+
+ elements = ens_generator._create_tipo_section(basic_ens_compliance_data)
+
+ # Should generate without errors
+ assert len(elements) > 0
diff --git a/api/src/backend/tasks/tests/test_reports_nis2.py b/api/src/backend/tasks/tests/test_reports_nis2.py
new file mode 100644
index 0000000000..07e88ec7ca
--- /dev/null
+++ b/api/src/backend/tasks/tests/test_reports_nis2.py
@@ -0,0 +1,1093 @@
+import io
+from unittest.mock import Mock, patch
+
+import pytest
+from reportlab.platypus import PageBreak, Paragraph, Table
+from tasks.jobs.reports import FRAMEWORK_REGISTRY, ComplianceData, RequirementData
+from tasks.jobs.reports.nis2 import NIS2ReportGenerator, _extract_section_number
+
+
+# Use string status values directly to avoid Django DB initialization
+# These match api.models.StatusChoices values
+class StatusChoices:
+ """Mock StatusChoices to avoid Django DB initialization."""
+
+ PASS = "PASS"
+ FAIL = "FAIL"
+ MANUAL = "MANUAL"
+
+
+# =============================================================================
+# Fixtures
+# =============================================================================
+
+
+@pytest.fixture
+def nis2_generator():
+ """Create a NIS2ReportGenerator instance for testing."""
+ config = FRAMEWORK_REGISTRY["nis2"]
+ return NIS2ReportGenerator(config)
+
+
+@pytest.fixture
+def mock_nis2_requirement_attribute_section1():
+ """Create a mock NIS2 requirement attribute for Section 1."""
+ mock = Mock()
+ mock.Section = "1 POLICY ON THE SECURITY OF NETWORK AND INFORMATION SYSTEMS"
+ mock.SubSection = "1.1 Policy establishment"
+ mock.Description = "Establish security policies for network and information systems"
+ return mock
+
+
+@pytest.fixture
+def mock_nis2_requirement_attribute_section2():
+ """Create a mock NIS2 requirement attribute for Section 2."""
+ mock = Mock()
+ mock.Section = "2 RISK MANAGEMENT"
+ mock.SubSection = "2.1 Risk assessment"
+ mock.Description = "Conduct risk assessments for critical infrastructure"
+ return mock
+
+
+@pytest.fixture
+def mock_nis2_requirement_attribute_section11():
+ """Create a mock NIS2 requirement attribute for Section 11."""
+ mock = Mock()
+ mock.Section = "11 ACCESS CONTROL"
+ mock.SubSection = "11.2 User access management"
+ mock.Description = "Manage user access to systems and data"
+ return mock
+
+
+@pytest.fixture
+def mock_nis2_requirement_attribute_no_subsection():
+ """Create a mock NIS2 requirement attribute without subsection."""
+ mock = Mock()
+ mock.Section = "3 INCIDENT HANDLING"
+ mock.SubSection = ""
+ mock.Description = "Handle security incidents effectively"
+ return mock
+
+
+@pytest.fixture
+def basic_nis2_compliance_data():
+ """Create basic ComplianceData for NIS2 testing."""
+ return ComplianceData(
+ tenant_id="tenant-123",
+ scan_id="scan-456",
+ provider_id="provider-789",
+ compliance_id="nis2_aws",
+ framework="NIS2",
+ name="NIS2 Directive (EU) 2022/2555",
+ version="2022",
+ description="EU directive on security of network and information systems",
+ )
+
+
+# =============================================================================
+# Section Number Extraction Tests
+# =============================================================================
+
+
+class TestSectionNumberExtraction:
+ """Test suite for section number extraction utility."""
+
+ def test_extract_simple_section_number(self):
+ """Test extracting single digit section number."""
+ result = _extract_section_number("1 POLICY ON SECURITY")
+ assert result == "1"
+
+ def test_extract_double_digit_section_number(self):
+ """Test extracting double digit section number."""
+ result = _extract_section_number("11 ACCESS CONTROL")
+ assert result == "11"
+
+ def test_extract_section_number_with_spaces(self):
+ """Test extracting section number with leading/trailing spaces."""
+ result = _extract_section_number(" 2 RISK MANAGEMENT ")
+ assert result == "2"
+
+ def test_extract_section_number_empty_string(self):
+ """Test extracting from empty string returns 'Other'."""
+ result = _extract_section_number("")
+ assert result == "Other"
+
+ def test_extract_section_number_none_like(self):
+ """Test extracting from empty/None-like returns 'Other'."""
+ # Note: The function expects str, so we test empty string behavior
+ result = _extract_section_number("")
+ assert result == "Other"
+
+ def test_extract_section_number_no_number(self):
+ """Test extracting from string without number returns 'Other'."""
+ result = _extract_section_number("POLICY ON SECURITY")
+ assert result == "Other"
+
+ def test_extract_section_number_letter_first(self):
+ """Test extracting from string starting with letter returns 'Other'."""
+ result = _extract_section_number("A. Some Section")
+ assert result == "Other"
+
+
+# =============================================================================
+# Generator Initialization Tests
+# =============================================================================
+
+
+class TestNIS2GeneratorInitialization:
+ """Test suite for NIS2 generator initialization."""
+
+ def test_generator_creation(self, nis2_generator):
+ """Test that NIS2 generator is created correctly."""
+ assert nis2_generator is not None
+ assert nis2_generator.config.name == "nis2"
+ assert nis2_generator.config.language == "en"
+
+ def test_generator_no_niveles(self, nis2_generator):
+ """Test that NIS2 config does not use niveles."""
+ assert nis2_generator.config.has_niveles is False
+
+ def test_generator_no_dimensions(self, nis2_generator):
+ """Test that NIS2 config does not use dimensions."""
+ assert nis2_generator.config.has_dimensions is False
+
+ def test_generator_no_risk_levels(self, nis2_generator):
+ """Test that NIS2 config does not use risk levels."""
+ assert nis2_generator.config.has_risk_levels is False
+
+ def test_generator_no_weight(self, nis2_generator):
+ """Test that NIS2 config does not use weight."""
+ assert nis2_generator.config.has_weight is False
+
+
+# =============================================================================
+# Cover Page Tests
+# =============================================================================
+
+
+class TestNIS2CoverPage:
+ """Test suite for NIS2 cover page generation."""
+
+ @patch("tasks.jobs.reports.nis2.Image")
+ def test_cover_page_has_logos(
+ self, mock_image, nis2_generator, basic_nis2_compliance_data
+ ):
+ """Test that cover page contains logos."""
+ basic_nis2_compliance_data.requirements = []
+ basic_nis2_compliance_data.attributes_by_requirement_id = {}
+
+ elements = nis2_generator.create_cover_page(basic_nis2_compliance_data)
+
+ assert len(elements) > 0
+ # Should have called Image at least twice (prowler + nis2 logos)
+ assert mock_image.call_count >= 2
+
+ def test_cover_page_has_title(self, nis2_generator, basic_nis2_compliance_data):
+ """Test that cover page contains the NIS2 title."""
+ basic_nis2_compliance_data.requirements = []
+ basic_nis2_compliance_data.attributes_by_requirement_id = {}
+
+ elements = nis2_generator.create_cover_page(basic_nis2_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "NIS2" in content or "Directive" in content
+
+ def test_cover_page_has_metadata_table(
+ self, nis2_generator, basic_nis2_compliance_data
+ ):
+ """Test that cover page contains metadata table."""
+ basic_nis2_compliance_data.requirements = []
+ basic_nis2_compliance_data.attributes_by_requirement_id = {}
+
+ elements = nis2_generator.create_cover_page(basic_nis2_compliance_data)
+
+ tables = [e for e in elements if isinstance(e, Table)]
+ assert len(tables) >= 1
+
+
+# =============================================================================
+# Executive Summary Tests
+# =============================================================================
+
+
+class TestNIS2ExecutiveSummary:
+ """Test suite for NIS2 executive summary generation."""
+
+ def test_executive_summary_has_english_title(
+ self, nis2_generator, basic_nis2_compliance_data
+ ):
+ """Test that executive summary has English title."""
+ basic_nis2_compliance_data.requirements = []
+ basic_nis2_compliance_data.attributes_by_requirement_id = {}
+
+ elements = nis2_generator.create_executive_summary(basic_nis2_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "Executive Summary" in content
+
+ def test_executive_summary_calculates_compliance(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test that executive summary calculates compliance percentage."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passed requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Failed requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-002": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ }
+
+ elements = nis2_generator.create_executive_summary(basic_nis2_compliance_data)
+
+ # Should contain tables with metrics
+ tables = [e for e in elements if isinstance(e, Table)]
+ assert len(tables) >= 1
+
+ def test_executive_summary_shows_all_statuses(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test that executive summary shows passed, failed, and manual counts."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passed",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Failed",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-003",
+ description="Manual",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-002": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-003": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ }
+
+ elements = nis2_generator.create_executive_summary(basic_nis2_compliance_data)
+
+ # Should have a summary table with all statuses
+ assert len(elements) > 0
+
+ def test_executive_summary_excludes_manual_from_percentage(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test that manual requirements are excluded from compliance percentage."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passed",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Manual",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-002": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ }
+
+ elements = nis2_generator.create_executive_summary(basic_nis2_compliance_data)
+
+ # Should calculate 100% (only 1 evaluated requirement that passed)
+ assert len(elements) > 0
+
+
+# =============================================================================
+# Charts Section Tests
+# =============================================================================
+
+
+class TestNIS2ChartsSection:
+ """Test suite for NIS2 charts section generation."""
+
+ def test_charts_section_has_section_chart_title(
+ self, nis2_generator, basic_nis2_compliance_data
+ ):
+ """Test that charts section has section compliance title."""
+ basic_nis2_compliance_data.requirements = []
+ basic_nis2_compliance_data.attributes_by_requirement_id = {}
+
+ elements = nis2_generator.create_charts_section(basic_nis2_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "Section" in content or "Compliance" in content
+
+ def test_charts_section_has_page_break(
+ self, nis2_generator, basic_nis2_compliance_data
+ ):
+ """Test that charts section has page breaks."""
+ basic_nis2_compliance_data.requirements = []
+ basic_nis2_compliance_data.attributes_by_requirement_id = {}
+
+ elements = nis2_generator.create_charts_section(basic_nis2_compliance_data)
+
+ page_breaks = [e for e in elements if isinstance(e, PageBreak)]
+ assert len(page_breaks) >= 1
+
+ def test_charts_section_has_subsection_breakdown(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test that charts section includes subsection breakdown table."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ }
+
+ elements = nis2_generator.create_charts_section(basic_nis2_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "SubSection" in content or "Breakdown" in content
+
+
+# =============================================================================
+# Section Chart Tests
+# =============================================================================
+
+
+class TestNIS2SectionChart:
+ """Test suite for NIS2 section compliance chart."""
+
+ def test_section_chart_creation(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test that section chart is created successfully."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ }
+
+ chart_buffer = nis2_generator._create_section_chart(basic_nis2_compliance_data)
+
+ assert isinstance(chart_buffer, io.BytesIO)
+ assert chart_buffer.getvalue() # Not empty
+
+ def test_section_chart_excludes_manual(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test that manual requirements are excluded from section chart."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Auto requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Manual requirement",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-002": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ }
+
+ # Should not raise any errors
+ chart_buffer = nis2_generator._create_section_chart(basic_nis2_compliance_data)
+ assert isinstance(chart_buffer, io.BytesIO)
+
+ def test_section_chart_multiple_sections(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ mock_nis2_requirement_attribute_section2,
+ mock_nis2_requirement_attribute_section11,
+ ):
+ """Test section chart with multiple sections."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Section 1 requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Section 2 requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-003",
+ description="Section 11 requirement",
+ status=StatusChoices.PASS,
+ passed_findings=5,
+ failed_findings=0,
+ total_findings=5,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-002": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section2]
+ }
+ },
+ "REQ-003": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section11]
+ }
+ },
+ }
+
+ chart_buffer = nis2_generator._create_section_chart(basic_nis2_compliance_data)
+ assert isinstance(chart_buffer, io.BytesIO)
+
+
+# =============================================================================
+# SubSection Table Tests
+# =============================================================================
+
+
+class TestNIS2SubSectionTable:
+ """Test suite for NIS2 subsection breakdown table."""
+
+ def test_subsection_table_creation(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test that subsection table is created successfully."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ }
+
+ table = nis2_generator._create_subsection_table(basic_nis2_compliance_data)
+
+ assert isinstance(table, Table)
+
+ def test_subsection_table_counts_statuses(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test that subsection table counts passed, failed, and manual."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passed",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Failed",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-003",
+ description="Manual",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-002": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-003": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ }
+
+ table = nis2_generator._create_subsection_table(basic_nis2_compliance_data)
+ assert isinstance(table, Table)
+
+ def test_subsection_table_no_subsection(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_no_subsection,
+ ):
+ """Test subsection table when requirements have no subsection."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="No subsection requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_no_subsection]
+ }
+ },
+ }
+
+ table = nis2_generator._create_subsection_table(basic_nis2_compliance_data)
+ assert isinstance(table, Table)
+
+
+# =============================================================================
+# Requirements Index Tests
+# =============================================================================
+
+
+class TestNIS2RequirementsIndex:
+ """Test suite for NIS2 requirements index generation."""
+
+ def test_requirements_index_has_title(
+ self, nis2_generator, basic_nis2_compliance_data
+ ):
+ """Test that requirements index has English title."""
+ basic_nis2_compliance_data.requirements = []
+ basic_nis2_compliance_data.attributes_by_requirement_id = {}
+
+ elements = nis2_generator.create_requirements_index(basic_nis2_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "Requirements Index" in content
+
+ def test_requirements_index_organized_by_section(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ mock_nis2_requirement_attribute_section2,
+ ):
+ """Test that requirements index is organized by section."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Section 1 requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Section 2 requirement",
+ status=StatusChoices.PASS,
+ passed_findings=5,
+ failed_findings=0,
+ total_findings=5,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-002": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section2]
+ }
+ },
+ }
+
+ elements = nis2_generator.create_requirements_index(basic_nis2_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ # Should have section headers
+ assert "Policy" in content or "Risk" in content or "1." in content
+
+ def test_requirements_index_shows_status_indicators(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test that requirements index shows pass/fail/manual indicators."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passed requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Failed requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-003",
+ description="Manual requirement",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-002": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-003": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ }
+
+ elements = nis2_generator.create_requirements_index(basic_nis2_compliance_data)
+
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ # Should have status indicators
+ assert "✓" in content or "✗" in content or "⊙" in content
+
+ def test_requirements_index_truncates_long_descriptions(
+ self, nis2_generator, basic_nis2_compliance_data
+ ):
+ """Test that long descriptions are truncated."""
+ mock_attr = Mock()
+ mock_attr.Section = "1 POLICY"
+ mock_attr.SubSection = "1.1 Long subsection name"
+ mock_attr.Description = "A" * 100 # Very long description
+
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="A" * 100,
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_attr]}},
+ }
+
+ # Should not raise errors
+ elements = nis2_generator.create_requirements_index(basic_nis2_compliance_data)
+ assert len(elements) > 0
+
+
+# =============================================================================
+# Section Key Sorting Tests
+# =============================================================================
+
+
+class TestNIS2SectionKeySorting:
+ """Test suite for NIS2 section key sorting."""
+
+ def test_sort_simple_sections(self, nis2_generator):
+ """Test sorting simple section numbers."""
+ result = nis2_generator._sort_section_key("1")
+ assert result == (1,)
+
+ result = nis2_generator._sort_section_key("2")
+ assert result == (2,)
+
+ def test_sort_subsections(self, nis2_generator):
+ """Test sorting subsection numbers."""
+ result = nis2_generator._sort_section_key("1.1")
+ assert result == (1, 1)
+
+ result = nis2_generator._sort_section_key("1.2")
+ assert result == (1, 2)
+
+ def test_sort_double_digit_sections(self, nis2_generator):
+ """Test sorting double digit section numbers."""
+ result = nis2_generator._sort_section_key("11")
+ assert result == (11,)
+
+ result = nis2_generator._sort_section_key("11.2")
+ assert result == (11, 2)
+
+ def test_sort_order_is_correct(self, nis2_generator):
+ """Test that sort order is numerically correct."""
+ keys = ["11", "1", "2", "1.2", "1.1", "11.2", "2.1"]
+ sorted_keys = sorted(keys, key=nis2_generator._sort_section_key)
+
+ assert sorted_keys == ["1", "1.1", "1.2", "2", "2.1", "11", "11.2"]
+
+ def test_sort_invalid_key(self, nis2_generator):
+ """Test sorting invalid section key."""
+ result = nis2_generator._sort_section_key("Other")
+ # Should contain infinity for non-numeric parts
+ assert result[0] == float("inf")
+
+
+# =============================================================================
+# Empty Data Tests
+# =============================================================================
+
+
+class TestNIS2EmptyData:
+ """Test suite for NIS2 with empty or minimal data."""
+
+ def test_executive_summary_empty_requirements(
+ self, nis2_generator, basic_nis2_compliance_data
+ ):
+ """Test executive summary with no requirements."""
+ basic_nis2_compliance_data.requirements = []
+ basic_nis2_compliance_data.attributes_by_requirement_id = {}
+
+ elements = nis2_generator.create_executive_summary(basic_nis2_compliance_data)
+
+ assert len(elements) > 0
+
+ def test_charts_section_empty_requirements(
+ self, nis2_generator, basic_nis2_compliance_data
+ ):
+ """Test charts section with no requirements."""
+ basic_nis2_compliance_data.requirements = []
+ basic_nis2_compliance_data.attributes_by_requirement_id = {}
+
+ elements = nis2_generator.create_charts_section(basic_nis2_compliance_data)
+
+ assert len(elements) > 0
+
+ def test_requirements_index_empty(self, nis2_generator, basic_nis2_compliance_data):
+ """Test requirements index with no requirements."""
+ basic_nis2_compliance_data.requirements = []
+ basic_nis2_compliance_data.attributes_by_requirement_id = {}
+
+ elements = nis2_generator.create_requirements_index(basic_nis2_compliance_data)
+
+ # Should at least have the title
+ assert len(elements) >= 1
+
+
+# =============================================================================
+# All Pass / All Fail Tests
+# =============================================================================
+
+
+class TestNIS2EdgeCases:
+ """Test suite for NIS2 edge cases."""
+
+ def test_all_requirements_pass(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test with all requirements passing."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id=f"REQ-{i:03d}",
+ description=f"Passing requirement {i}",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ )
+ for i in range(1, 6)
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ f"REQ-{i:03d}": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ }
+ for i in range(1, 6)
+ }
+
+ elements = nis2_generator.create_executive_summary(basic_nis2_compliance_data)
+ assert len(elements) > 0
+
+ def test_all_requirements_fail(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test with all requirements failing."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id=f"REQ-{i:03d}",
+ description=f"Failing requirement {i}",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ )
+ for i in range(1, 6)
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ f"REQ-{i:03d}": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ }
+ for i in range(1, 6)
+ }
+
+ elements = nis2_generator.create_executive_summary(basic_nis2_compliance_data)
+ assert len(elements) > 0
+
+ def test_all_requirements_manual(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ ):
+ """Test with all requirements being manual."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id=f"REQ-{i:03d}",
+ description=f"Manual requirement {i}",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ )
+ for i in range(1, 6)
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ f"REQ-{i:03d}": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ }
+ for i in range(1, 6)
+ }
+
+ # Should handle gracefully - compliance should be 100% when no evaluated
+ elements = nis2_generator.create_executive_summary(basic_nis2_compliance_data)
+ assert len(elements) > 0
+
+
+# =============================================================================
+# Integration Tests
+# =============================================================================
+
+
+class TestNIS2Integration:
+ """Integration tests for NIS2 report generation."""
+
+ def test_full_report_generation_flow(
+ self,
+ nis2_generator,
+ basic_nis2_compliance_data,
+ mock_nis2_requirement_attribute_section1,
+ mock_nis2_requirement_attribute_section2,
+ ):
+ """Test the complete report generation flow."""
+ basic_nis2_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Section 1 passed",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Section 2 failed",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=5,
+ total_findings=5,
+ ),
+ ]
+ basic_nis2_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section1]
+ }
+ },
+ "REQ-002": {
+ "attributes": {
+ "req_attributes": [mock_nis2_requirement_attribute_section2]
+ }
+ },
+ }
+
+ # Generate all sections
+ exec_summary = nis2_generator.create_executive_summary(
+ basic_nis2_compliance_data
+ )
+ charts = nis2_generator.create_charts_section(basic_nis2_compliance_data)
+ index = nis2_generator.create_requirements_index(basic_nis2_compliance_data)
+
+ # All sections should generate without errors
+ assert len(exec_summary) > 0
+ assert len(charts) > 0
+ assert len(index) > 0
diff --git a/api/src/backend/tasks/tests/test_reports_threatscore.py b/api/src/backend/tasks/tests/test_reports_threatscore.py
new file mode 100644
index 0000000000..c79c0b16e9
--- /dev/null
+++ b/api/src/backend/tasks/tests/test_reports_threatscore.py
@@ -0,0 +1,1093 @@
+import io
+from unittest.mock import Mock
+
+import pytest
+from reportlab.platypus import Image, PageBreak, Paragraph, Table
+from tasks.jobs.reports import (
+ FRAMEWORK_REGISTRY,
+ ComplianceData,
+ RequirementData,
+ ThreatScoreReportGenerator,
+)
+
+from api.models import StatusChoices
+
+# =============================================================================
+# Fixtures
+# =============================================================================
+
+
+@pytest.fixture
+def threatscore_generator():
+ """Create a ThreatScoreReportGenerator instance for testing."""
+ config = FRAMEWORK_REGISTRY["prowler_threatscore"]
+ return ThreatScoreReportGenerator(config)
+
+
+@pytest.fixture
+def mock_requirement_attribute():
+ """Create a mock requirement attribute with numeric values."""
+ mock = Mock()
+ mock.LevelOfRisk = 4
+ mock.Weight = 100
+ mock.Section = "1. IAM"
+ mock.SubSection = "1.1 Access Control"
+ mock.Title = "Test Requirement"
+ mock.AttributeDescription = "Test Description"
+ return mock
+
+
+@pytest.fixture
+def mock_requirement_attribute_string_values():
+ """Create a mock requirement attribute with string values (edge case)."""
+ mock = Mock()
+ mock.LevelOfRisk = "5" # String instead of int
+ mock.Weight = "150" # String instead of int
+ mock.Section = "2. Attack Surface"
+ mock.SubSection = "2.1 Exposure"
+ mock.Title = "String Values Requirement"
+ mock.AttributeDescription = "Test with string numeric values"
+ return mock
+
+
+@pytest.fixture
+def mock_requirement_attribute_invalid_values():
+ """Create a mock requirement attribute with invalid values (edge case)."""
+ mock = Mock()
+ mock.LevelOfRisk = "High" # Invalid string
+ mock.Weight = "Critical" # Invalid string
+ mock.Section = "3. Logging"
+ mock.SubSection = "3.1 Audit"
+ mock.Title = "Invalid Values Requirement"
+ mock.AttributeDescription = "Test with invalid string values"
+ return mock
+
+
+@pytest.fixture
+def mock_requirement_attribute_empty_values():
+ """Create a mock requirement attribute with empty values."""
+ mock = Mock()
+ mock.LevelOfRisk = ""
+ mock.Weight = ""
+ mock.Section = "4. Encryption"
+ mock.SubSection = "4.1 Data at Rest"
+ mock.Title = "Empty Values Requirement"
+ mock.AttributeDescription = "Test with empty values"
+ return mock
+
+
+@pytest.fixture
+def mock_requirement_attribute_none_values():
+ """Create a mock requirement attribute with None values."""
+ mock = Mock()
+ mock.LevelOfRisk = None
+ mock.Weight = None
+ mock.Section = "1. IAM"
+ mock.SubSection = "1.2 Policies"
+ mock.Title = "None Values Requirement"
+ mock.AttributeDescription = "Test with None values"
+ return mock
+
+
+@pytest.fixture
+def basic_compliance_data():
+ """Create basic ComplianceData for testing."""
+ return ComplianceData(
+ tenant_id="tenant-123",
+ scan_id="scan-456",
+ provider_id="provider-789",
+ compliance_id="prowler_threatscore_aws",
+ framework="Prowler ThreatScore",
+ name="ThreatScore AWS",
+ version="1.0",
+ description="Security assessment framework",
+ )
+
+
+# =============================================================================
+# ThreatScore Calculation Tests
+# =============================================================================
+
+
+class TestThreatScoreCalculation:
+ """Test suite for ThreatScore calculation logic."""
+
+ def test_calculate_threatscore_no_findings_returns_100(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test that 100% is returned when there are no findings."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {}
+
+ result = threatscore_generator._calculate_threatscore(basic_compliance_data)
+
+ assert result == 100.0
+
+ def test_calculate_threatscore_all_passed(
+ self, threatscore_generator, basic_compliance_data, mock_requirement_attribute
+ ):
+ """Test ThreatScore calculation when all findings pass."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_requirement_attribute]},
+ }
+ }
+
+ result = threatscore_generator._calculate_threatscore(basic_compliance_data)
+
+ assert result == 100.0
+
+ def test_calculate_threatscore_all_failed(
+ self, threatscore_generator, basic_compliance_data, mock_requirement_attribute
+ ):
+ """Test ThreatScore calculation when all findings fail."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_requirement_attribute]},
+ }
+ }
+
+ result = threatscore_generator._calculate_threatscore(basic_compliance_data)
+
+ assert result == 0.0
+
+ def test_calculate_threatscore_mixed_findings(
+ self, threatscore_generator, basic_compliance_data, mock_requirement_attribute
+ ):
+ """Test ThreatScore calculation with mixed pass/fail findings."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=7,
+ failed_findings=3,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {"req_attributes": [mock_requirement_attribute]},
+ }
+ }
+
+ result = threatscore_generator._calculate_threatscore(basic_compliance_data)
+
+ # rate_i = 7/10 = 0.7
+ # rfac_i = 1 + 0.25 * 4 = 2.0
+ # numerator = 0.7 * 10 * 100 * 2.0 = 1400
+ # denominator = 10 * 100 * 2.0 = 2000
+ # score = (1400 / 2000) * 100 = 70.0
+ assert result == 70.0
+
+ def test_calculate_threatscore_multiple_requirements(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test ThreatScore calculation with multiple requirements."""
+ mock_attr_1 = Mock()
+ mock_attr_1.LevelOfRisk = 5
+ mock_attr_1.Weight = 100
+
+ mock_attr_2 = Mock()
+ mock_attr_2.LevelOfRisk = 3
+ mock_attr_2.Weight = 50
+
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="High risk requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=8,
+ failed_findings=2,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Low risk requirement",
+ status=StatusChoices.PASS,
+ passed_findings=5,
+ failed_findings=0,
+ total_findings=5,
+ ),
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_attr_1]}},
+ "REQ-002": {"attributes": {"req_attributes": [mock_attr_2]}},
+ }
+
+ result = threatscore_generator._calculate_threatscore(basic_compliance_data)
+
+ # REQ-001: rate=0.8, rfac=2.25, num=0.8*10*100*2.25=1800, den=10*100*2.25=2250
+ # REQ-002: rate=1.0, rfac=1.75, num=1.0*5*50*1.75=437.5, den=5*50*1.75=437.5
+ # total_num = 1800 + 437.5 = 2237.5
+ # total_den = 2250 + 437.5 = 2687.5
+ # score = (2237.5 / 2687.5) * 100 ≈ 83.26%
+ assert 83.0 < result < 84.0
+
+ def test_calculate_threatscore_zero_weight(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test ThreatScore calculation with zero weight."""
+ mock_attr = Mock()
+ mock_attr.LevelOfRisk = 4
+ mock_attr.Weight = 0
+
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Zero weight requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=5,
+ failed_findings=5,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_attr]}},
+ }
+
+ result = threatscore_generator._calculate_threatscore(basic_compliance_data)
+
+ # With weight=0, denominator will be 0, should return 0.0
+ assert result == 0.0
+
+
+# =============================================================================
+# Type Conversion Tests (Critical for bug fix validation)
+# =============================================================================
+
+
+class TestTypeConversionSafety:
+ """Test suite for type conversion safety in ThreatScore calculations."""
+
+ def test_calculate_threatscore_with_string_risk_level(
+ self,
+ threatscore_generator,
+ basic_compliance_data,
+ mock_requirement_attribute_string_values,
+ ):
+ """Test that string LevelOfRisk is correctly converted to int."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="String values test",
+ status=StatusChoices.FAIL,
+ passed_findings=5,
+ failed_findings=5,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_requirement_attribute_string_values]
+ }
+ },
+ }
+
+ # Should not raise TypeError: '<=' not supported between 'str' and 'int'
+ result = threatscore_generator._calculate_threatscore(basic_compliance_data)
+
+ # LevelOfRisk="5" -> 5, Weight="150" -> 150
+ # rate_i = 0.5, rfac_i = 1 + 0.25*5 = 2.25
+ # numerator = 0.5 * 10 * 150 * 2.25 = 1687.5
+ # denominator = 10 * 150 * 2.25 = 3375
+ # score = 50.0
+ assert result == 50.0
+
+ def test_calculate_threatscore_with_invalid_string_values(
+ self,
+ threatscore_generator,
+ basic_compliance_data,
+ mock_requirement_attribute_invalid_values,
+ ):
+ """Test that invalid string values default to 0."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Invalid values test",
+ status=StatusChoices.FAIL,
+ passed_findings=5,
+ failed_findings=5,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_requirement_attribute_invalid_values]
+ }
+ },
+ }
+
+ # Should not raise ValueError, should default to 0
+ result = threatscore_generator._calculate_threatscore(basic_compliance_data)
+
+ # With weight=0 (from invalid string), denominator is 0
+ assert result == 0.0
+
+ def test_calculate_threatscore_with_empty_values(
+ self,
+ threatscore_generator,
+ basic_compliance_data,
+ mock_requirement_attribute_empty_values,
+ ):
+ """Test that empty string values default to 0."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Empty values test",
+ status=StatusChoices.FAIL,
+ passed_findings=5,
+ failed_findings=5,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_requirement_attribute_empty_values]
+ }
+ },
+ }
+
+ result = threatscore_generator._calculate_threatscore(basic_compliance_data)
+
+ # Empty strings should default to 0
+ assert result == 0.0
+
+ def test_calculate_threatscore_with_none_values(
+ self,
+ threatscore_generator,
+ basic_compliance_data,
+ mock_requirement_attribute_none_values,
+ ):
+ """Test that None values default to 0."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="None values test",
+ status=StatusChoices.FAIL,
+ passed_findings=5,
+ failed_findings=5,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_requirement_attribute_none_values]
+ }
+ },
+ }
+
+ result = threatscore_generator._calculate_threatscore(basic_compliance_data)
+
+ # None values should default to 0
+ assert result == 0.0
+
+ def test_critical_failed_requirements_with_string_risk_level(
+ self,
+ threatscore_generator,
+ basic_compliance_data,
+ mock_requirement_attribute_string_values,
+ ):
+ """Test that critical requirements filter works with string LevelOfRisk."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="High risk with string",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_requirement_attribute_string_values]
+ }
+ },
+ }
+
+ # Should not raise TypeError
+ result = threatscore_generator._get_critical_failed_requirements(
+ basic_compliance_data, min_risk_level=4
+ )
+
+ # LevelOfRisk="5" should be converted to 5, which is >= 4
+ assert len(result) == 1
+ assert result[0]["id"] == "REQ-001"
+ assert result[0]["risk_level"] == 5
+ assert result[0]["weight"] == 150
+
+ def test_critical_failed_requirements_with_invalid_risk_level(
+ self,
+ threatscore_generator,
+ basic_compliance_data,
+ mock_requirement_attribute_invalid_values,
+ ):
+ """Test that invalid LevelOfRisk is excluded from critical requirements."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Invalid risk level",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {
+ "attributes": {
+ "req_attributes": [mock_requirement_attribute_invalid_values]
+ }
+ },
+ }
+
+ result = threatscore_generator._get_critical_failed_requirements(
+ basic_compliance_data, min_risk_level=4
+ )
+
+ # Invalid string defaults to 0, which is < 4
+ assert len(result) == 0
+
+
+# =============================================================================
+# Critical Failed Requirements Tests
+# =============================================================================
+
+
+class TestCriticalFailedRequirements:
+ """Test suite for critical failed requirements identification."""
+
+ def test_get_critical_failed_no_failures(
+ self, threatscore_generator, basic_compliance_data, mock_requirement_attribute
+ ):
+ """Test that no critical requirements are returned when all pass."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passing requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_requirement_attribute]}},
+ }
+
+ result = threatscore_generator._get_critical_failed_requirements(
+ basic_compliance_data, min_risk_level=4
+ )
+
+ assert len(result) == 0
+
+ def test_get_critical_failed_below_threshold(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test that low risk failures are not included."""
+ mock_attr = Mock()
+ mock_attr.LevelOfRisk = 2 # Below threshold of 4
+ mock_attr.Weight = 100
+ mock_attr.Title = "Low Risk"
+ mock_attr.Section = "1. IAM"
+
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Low risk failure",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_attr]}},
+ }
+
+ result = threatscore_generator._get_critical_failed_requirements(
+ basic_compliance_data, min_risk_level=4
+ )
+
+ assert len(result) == 0
+
+ def test_get_critical_failed_at_threshold(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test that requirements at exactly the threshold are included."""
+ mock_attr = Mock()
+ mock_attr.LevelOfRisk = 4 # Exactly at threshold
+ mock_attr.Weight = 100
+ mock_attr.Title = "At Threshold"
+ mock_attr.Section = "1. IAM"
+
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="At threshold failure",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_attr]}},
+ }
+
+ result = threatscore_generator._get_critical_failed_requirements(
+ basic_compliance_data, min_risk_level=4
+ )
+
+ assert len(result) == 1
+ assert result[0]["risk_level"] == 4
+
+ def test_get_critical_failed_sorted_by_risk_and_weight(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test that critical requirements are sorted by risk level then weight."""
+ mock_attr_1 = Mock()
+ mock_attr_1.LevelOfRisk = 4
+ mock_attr_1.Weight = 150
+ mock_attr_1.Title = "Mid risk, high weight"
+ mock_attr_1.Section = "1. IAM"
+
+ mock_attr_2 = Mock()
+ mock_attr_2.LevelOfRisk = 5
+ mock_attr_2.Weight = 50
+ mock_attr_2.Title = "High risk, low weight"
+ mock_attr_2.Section = "2. Attack Surface"
+
+ mock_attr_3 = Mock()
+ mock_attr_3.LevelOfRisk = 5
+ mock_attr_3.Weight = 100
+ mock_attr_3.Title = "High risk, mid weight"
+ mock_attr_3.Section = "3. Logging"
+
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="First",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=5,
+ total_findings=5,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Second",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=5,
+ total_findings=5,
+ ),
+ RequirementData(
+ id="REQ-003",
+ description="Third",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=5,
+ total_findings=5,
+ ),
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_attr_1]}},
+ "REQ-002": {"attributes": {"req_attributes": [mock_attr_2]}},
+ "REQ-003": {"attributes": {"req_attributes": [mock_attr_3]}},
+ }
+
+ result = threatscore_generator._get_critical_failed_requirements(
+ basic_compliance_data, min_risk_level=4
+ )
+
+ assert len(result) == 3
+ # Sorted by (risk_level, weight) descending
+ # First: risk=5, weight=100 (REQ-003)
+ # Second: risk=5, weight=50 (REQ-002)
+ # Third: risk=4, weight=150 (REQ-001)
+ assert result[0]["id"] == "REQ-003"
+ assert result[1]["id"] == "REQ-002"
+ assert result[2]["id"] == "REQ-001"
+
+ def test_get_critical_failed_manual_status_excluded(
+ self, threatscore_generator, basic_compliance_data, mock_requirement_attribute
+ ):
+ """Test that MANUAL status requirements are excluded."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Manual requirement",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_requirement_attribute]}},
+ }
+
+ result = threatscore_generator._get_critical_failed_requirements(
+ basic_compliance_data, min_risk_level=4
+ )
+
+ assert len(result) == 0
+
+
+# =============================================================================
+# Section Score Chart Tests
+# =============================================================================
+
+
+class TestSectionScoreChart:
+ """Test suite for section score chart generation."""
+
+ def test_create_section_chart_empty_data(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test chart creation with no requirements."""
+ basic_compliance_data.requirements = []
+ basic_compliance_data.attributes_by_requirement_id = {}
+
+ result = threatscore_generator._create_section_score_chart(
+ basic_compliance_data
+ )
+
+ assert isinstance(result, io.BytesIO)
+ assert result.getvalue() # Should have content
+
+ def test_create_section_chart_single_section(
+ self, threatscore_generator, basic_compliance_data, mock_requirement_attribute
+ ):
+ """Test chart creation with a single section."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="IAM requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_requirement_attribute]}},
+ }
+
+ result = threatscore_generator._create_section_score_chart(
+ basic_compliance_data
+ )
+
+ assert isinstance(result, io.BytesIO)
+
+ def test_create_section_chart_multiple_sections(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test chart creation with multiple sections."""
+ mock_attr_1 = Mock()
+ mock_attr_1.LevelOfRisk = 4
+ mock_attr_1.Weight = 100
+ mock_attr_1.Section = "1. IAM"
+
+ mock_attr_2 = Mock()
+ mock_attr_2.LevelOfRisk = 3
+ mock_attr_2.Weight = 50
+ mock_attr_2.Section = "2. Attack Surface"
+
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="IAM requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ ),
+ RequirementData(
+ id="REQ-002",
+ description="Attack Surface requirement",
+ status=StatusChoices.FAIL,
+ passed_findings=5,
+ failed_findings=5,
+ total_findings=10,
+ ),
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_attr_1]}},
+ "REQ-002": {"attributes": {"req_attributes": [mock_attr_2]}},
+ }
+
+ result = threatscore_generator._create_section_score_chart(
+ basic_compliance_data
+ )
+
+ assert isinstance(result, io.BytesIO)
+
+ def test_create_section_chart_no_findings_section_gets_100(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test that sections without findings get 100% score."""
+ mock_attr = Mock()
+ mock_attr.LevelOfRisk = 4
+ mock_attr.Weight = 100
+ mock_attr.Section = "1. IAM"
+
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="No findings requirement",
+ status=StatusChoices.MANUAL,
+ passed_findings=0,
+ failed_findings=0,
+ total_findings=0,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_attr]}},
+ }
+
+ # Chart should be created without errors
+ result = threatscore_generator._create_section_score_chart(
+ basic_compliance_data
+ )
+
+ assert isinstance(result, io.BytesIO)
+
+
+# =============================================================================
+# Executive Summary Tests
+# =============================================================================
+
+
+class TestExecutiveSummary:
+ """Test suite for executive summary generation."""
+
+ def test_executive_summary_contains_chart(
+ self, threatscore_generator, basic_compliance_data, mock_requirement_attribute
+ ):
+ """Test that executive summary contains a chart."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_requirement_attribute]}},
+ }
+
+ elements = threatscore_generator.create_executive_summary(basic_compliance_data)
+
+ assert len(elements) > 0
+ assert any(isinstance(e, Image) for e in elements)
+
+ def test_executive_summary_contains_score_table(
+ self, threatscore_generator, basic_compliance_data, mock_requirement_attribute
+ ):
+ """Test that executive summary contains a score table."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_requirement_attribute]}},
+ }
+
+ elements = threatscore_generator.create_executive_summary(basic_compliance_data)
+
+ assert any(isinstance(e, Table) for e in elements)
+
+
+# =============================================================================
+# Charts Section Tests
+# =============================================================================
+
+
+class TestChartsSection:
+ """Test suite for charts section generation."""
+
+ def test_charts_section_no_critical_failures(
+ self, threatscore_generator, basic_compliance_data, mock_requirement_attribute
+ ):
+ """Test charts section when no critical failures exist."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Passing requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_requirement_attribute]}},
+ }
+
+ elements = threatscore_generator.create_charts_section(basic_compliance_data)
+
+ assert len(elements) > 0
+ # Should contain success message
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "No critical failed requirements" in content or "Great job" in content
+
+ def test_charts_section_with_critical_failures(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test charts section when critical failures exist."""
+ mock_attr = Mock()
+ mock_attr.LevelOfRisk = 5
+ mock_attr.Weight = 100
+ mock_attr.Title = "Critical Failure"
+ mock_attr.Section = "1. IAM"
+
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Critical failure",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_attr]}},
+ }
+
+ elements = threatscore_generator.create_charts_section(basic_compliance_data)
+
+ assert len(elements) > 0
+ # Should contain a table with critical requirements
+ assert any(isinstance(e, Table) for e in elements)
+
+ def test_charts_section_starts_with_page_break(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test that charts section starts with a page break."""
+ basic_compliance_data.requirements = []
+ basic_compliance_data.attributes_by_requirement_id = {}
+
+ elements = threatscore_generator.create_charts_section(basic_compliance_data)
+
+ assert len(elements) > 0
+ assert isinstance(elements[0], PageBreak)
+
+ def test_charts_section_respects_min_risk_level(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test that charts section respects the min_risk_level setting."""
+ threatscore_generator._min_risk_level = 5 # Higher threshold
+
+ mock_attr = Mock()
+ mock_attr.LevelOfRisk = 4 # Below the new threshold
+ mock_attr.Weight = 100
+ mock_attr.Title = "Medium Risk"
+ mock_attr.Section = "1. IAM"
+
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Medium risk failure",
+ status=StatusChoices.FAIL,
+ passed_findings=0,
+ failed_findings=10,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_attr]}},
+ }
+
+ elements = threatscore_generator.create_charts_section(basic_compliance_data)
+
+ # Should not contain a table since risk=4 < min=5
+ tables = [e for e in elements if isinstance(e, Table)]
+ assert len(tables) == 0
+
+
+# =============================================================================
+# Requirements Index Tests
+# =============================================================================
+
+
+class TestRequirementsIndex:
+ """Test suite for requirements index generation."""
+
+ def test_requirements_index_empty(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test requirements index with no requirements."""
+ basic_compliance_data.requirements = []
+ basic_compliance_data.attributes_by_requirement_id = {}
+
+ elements = threatscore_generator.create_requirements_index(
+ basic_compliance_data
+ )
+
+ assert len(elements) >= 1 # At least the header
+ assert isinstance(elements[0], Paragraph)
+
+ def test_requirements_index_single_requirement(
+ self, threatscore_generator, basic_compliance_data, mock_requirement_attribute
+ ):
+ """Test requirements index with a single requirement."""
+ basic_compliance_data.requirements = [
+ RequirementData(
+ id="REQ-001",
+ description="Test requirement",
+ status=StatusChoices.PASS,
+ passed_findings=10,
+ failed_findings=0,
+ total_findings=10,
+ )
+ ]
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_requirement_attribute]}},
+ }
+
+ elements = threatscore_generator.create_requirements_index(
+ basic_compliance_data
+ )
+
+ assert len(elements) >= 2 # Header + at least section header
+
+ def test_requirements_index_organized_by_section(
+ self, threatscore_generator, basic_compliance_data
+ ):
+ """Test that requirements index is organized by section."""
+ mock_attr_1 = Mock()
+ mock_attr_1.Section = "1. IAM"
+ mock_attr_1.SubSection = "1.1 Access"
+ mock_attr_1.Title = "IAM Requirement"
+
+ mock_attr_2 = Mock()
+ mock_attr_2.Section = "2. Attack Surface"
+ mock_attr_2.SubSection = "2.1 Exposure"
+ mock_attr_2.Title = "Attack Surface Requirement"
+
+ basic_compliance_data.requirements = []
+ basic_compliance_data.attributes_by_requirement_id = {
+ "REQ-001": {"attributes": {"req_attributes": [mock_attr_1]}},
+ "REQ-002": {"attributes": {"req_attributes": [mock_attr_2]}},
+ }
+
+ elements = threatscore_generator.create_requirements_index(
+ basic_compliance_data
+ )
+
+ # Check that section headers are present
+ paragraphs = [e for e in elements if isinstance(e, Paragraph)]
+ content = " ".join(str(p.text) for p in paragraphs)
+ assert "IAM" in content or "1." in content
+
+
+# =============================================================================
+# Critical Requirements Table Tests
+# =============================================================================
+
+
+class TestCriticalRequirementsTable:
+ """Test suite for critical requirements table generation."""
+
+ def test_create_table_single_requirement(self, threatscore_generator):
+ """Test table creation with a single requirement."""
+ critical = [
+ {
+ "id": "REQ-001",
+ "risk_level": 5,
+ "weight": 100,
+ "title": "Test Requirement",
+ "section": "1. IAM",
+ }
+ ]
+
+ table = threatscore_generator._create_critical_requirements_table(critical)
+
+ assert isinstance(table, Table)
+
+ def test_create_table_truncates_long_titles(self, threatscore_generator):
+ """Test that long titles are truncated."""
+ critical = [
+ {
+ "id": "REQ-001",
+ "risk_level": 5,
+ "weight": 100,
+ "title": "A" * 100, # Very long title
+ "section": "1. IAM",
+ }
+ ]
+
+ table = threatscore_generator._create_critical_requirements_table(critical)
+
+ # Table should be created without errors
+ assert isinstance(table, Table)
+
+ def test_create_table_multiple_requirements(self, threatscore_generator):
+ """Test table creation with multiple requirements."""
+ critical = [
+ {
+ "id": "REQ-001",
+ "risk_level": 5,
+ "weight": 150,
+ "title": "First",
+ "section": "1. IAM",
+ },
+ {
+ "id": "REQ-002",
+ "risk_level": 4,
+ "weight": 100,
+ "title": "Second",
+ "section": "2. Attack Surface",
+ },
+ ]
+
+ table = threatscore_generator._create_critical_requirements_table(critical)
+
+ assert isinstance(table, Table)