Compare commits

..

16 Commits

Author SHA1 Message Date
Elizabeth W 7f366204a9 fixing confusing repeated lists of scanners 2026-04-20 01:43:14 -06:00
Elizabeth W 1036fce55e other changes 2026-04-20 01:25:44 -06:00
Elizabeth W 38ff2f4fde tests for enforce policy 2026-04-20 01:25:11 -06:00
Elizabeth W f0b937deb7 remove embedded python that moved to defectdojo and enforce policy and change to standalone typescript 2026-04-20 01:24:13 -06:00
Elizabeth W 251070dd77 rename componenets 2026-04-20 01:23:13 -06:00
Elizabeth W 7587c285e7 renovate bot 2026-04-19 22:55:44 -06:00
Elizabeth W d8ee53395a infiniscal refactor 2026-04-19 22:55:19 -06:00
Elizabeth W 5e31efd464 remove old initcontainers 2026-04-19 22:52:23 -06:00
Elizabeth W ebd29176d0 remove old initcontainers 2026-04-19 22:52:17 -06:00
Elizabeth W aa907060a4 all phases complete! 2026-04-19 22:52:02 -06:00
Elizabeth W a9224a41c1 note to split into multiple files 2026-04-19 22:29:53 -06:00
Elizabeth W 35d8630bf2 implement scan trufflehog 2026-04-19 22:29:36 -06:00
Elizabeth W df10609df5 implementing first steps 2026-04-19 22:29:13 -06:00
Elizabeth W 8c2c420bff final plan notes 2026-04-19 22:12:10 -06:00
Elizabeth W 963e020efa plan files 2026-04-19 22:12:00 -06:00
Elizabeth W 89b3586030 noted outdated features 2026-04-19 21:17:14 -06:00
49 changed files with 3068 additions and 5 deletions
+66 -1
View File
@@ -1,3 +1,68 @@
# agentguard-ci
A DevSecOps Argo Workflows pipeline to protect against AI coding agent hallucinations and supply chain attacks.
A DevSecOps Argo Workflows pipeline specifically designed to protect against AI coding agent hallucinations, supply chain attacks, and security misconfigurations in a homelab or solo-developer environment.
## 📖 The Problem
AI coding agents are highly productive "junior developers," but they lack intrinsic context. They frequently hallucinate dummy credentials, introduce insecure application logic, or pull in new, potentially typosquatted dependencies.
This pipeline acts as a strict, automated gatekeeper that prioritizes zero-noise alerting, allowing you to maintain high development velocity without compromising the security of your exposed homelab.
## 🏗️ Architecture & Features
This project deploys an **Argo ClusterWorkflowTemplate** that orchestrates a parallel security scanning matrix whenever code is pushed:
* **TruffleHog**: Verifies leaked API keys dynamically to prevent false-positives from AI hallucinations.
* **Semgrep**: Scans first-party application logic for vulnerabilities (e.g., SQLi, XSS).
* **Socket.dev**: Analyzes dependencies for supply chain attacks, malware, and typosquatting.
* **Pulumi CrossGuard**: Validates Infrastructure as Code against policy packs.
* **Syft + Grype**: Generates SBOMs and scans for container vulnerabilities scored via EPSS.
* **KICS**: Scans infrastructure misconfigurations.
* **DefectDojo & MinIO**: Uploads findings to a centralized ASPM dashboard and raw SARIF/JSON reports to S3-compatible storage.
* **Policy Enforcement**: Custom TypeScript logic automatically fails the build if any findings exceed your defined CVSS severity threshold.
For deep-dive architecture decisions, see the [Pipeline Overview ADR](docs/pipeline-overview.md) and [Secret Strategy ADR](docs/secret-strategy.md).
## 🚀 Prerequisites
Before installing the pipeline, ensure your Kubernetes cluster has the following installed:
* **Argo Workflows**
* **Infisical Kubernetes Operator** (for secret injection)
* **DefectDojo** (for vulnerability dashboards)
* **MinIO / S3** (for raw report storage)
You will also need API keys or tokens for: Socket.dev, Pulumi, AWS/MinIO, and DefectDojo.
## 🛠️ Installation
### 1. Build the Pipeline Tools Image
The pipeline relies on custom TypeScript logic (e.g., CVSS enforcement and API uploads). Build and push this image to your registry:
```bash
cd tools
docker build -t your-registry/agentguard-tools:latest .
docker push your-registry/agentguard-tools:latest
```
*(Make sure to update `clusterworkflowtemplate.yaml` with your custom image if you do not use `agentguard-tools:latest`)*
### 2. Configure Helm Values
Update `helm/values.yaml` (if applicable) and configure your Infisical integration:
```yaml
pipeline:
enabled: true
infisical:
workspaceSlug: "your-workspace-id"
projectSlug: "your-project-id"
```
### 3. Deploy via Helm
Install the pipeline and its associated resources to your cluster:
```bash
helm upgrade --install agentguard-ci ./helm -n argo
```
## 🔐 Secret Management Integration
To prevent hardcoded secrets in the pipeline, this project uses the **Infisical Kubernetes Operator**.
When you deploy the Helm chart, it creates an `InfisicalSecret` Custom Resource (`helm/templates/infisical-secret.yaml`). The Infisical Operator securely fetches your vault secrets (like `SOCKET_DEV_API_KEY` and `DEFECTDOJO_API_TOKEN`) and synchronizes them into a standard Kubernetes `Secret` named `amp-security-pipeline-secrets`.
The Argo Workflow then mounts this standard secret as environment variables inside the scanning containers, ensuring zero secret leakage in the Git repository.
+16
View File
@@ -0,0 +1,16 @@
import glob, re, os
files = glob.glob("helm/templates/scan-*.yaml") + glob.glob("helm/templates/upload-*.yaml") + ["helm/templates/enforce-policy.yaml"]
for f in files:
with open(f) as file:
content = file.read()
match = re.search(r'spec:\n templates:\n(.*)(?:{{- end }})', content, re.DOTALL)
if match:
template_content = match.group(1).strip()
# Extract the base name e.g. scan-kics
base_name = os.path.basename(f).replace('.yaml', '')
new_content = f'{{{{- define "template.{base_name}" }}}}\n{template_content}\n{{{{- end }}}}\n'
new_filename = os.path.join(os.path.dirname(f), f"_{base_name}.yaml")
with open(new_filename, "w") as out:
out.write(new_content)
os.remove(f)
+47
View File
@@ -0,0 +1,47 @@
# for the pipeline
## languages
#### The tools we are using to write this in and deploy it
helm
pulumi
argo workflows?
## pipeline
#### The actual steps in the pipeline
pulumi
pulumi crossguard
socket.dev
argo workflows
semgrep
trufflehog
syft // do we need this as socket.dev or semgrep can do sbom?
grype
renovate bot
kics (keeping infrastructure as code secure)
## k8's
#### Things I assume I need installed in my k8's cluster
infisical
argo workflows
defectdojo
## repository
#### Things to set on the repository
branch protection
## local
#### Things to add to my chezmoi install so that they are always available but should be mentioned as things the user should have
eslint-plugin-security
gitleaks
socket cli
## Might be needed
#### Things that we might need. I am unsure if we have other tools that sufficiently cover the security concerns
trivy
# For homelab
## optional things
#### These are things that will exist in my homelab eventually, however they are not needed for this pipeline I think
harbor containe registry
suse security (neuvector)
nexus package caching
@@ -0,0 +1,26 @@
# Improvement Plan: Refactor Infisical Secrets to Native CRD
## Objective
The previous implementation used a Mutating Webhook (Infisical Agent Injector) and an `initContainer` polling loop to wait for secrets to be injected into the Argo Workflow pods. Best practices indicate this causes race conditions and ArgoCD "OutOfSync" issues. We need to refactor the pipeline to use the native `InfisicalSecret` CRD and standard Kubernetes `secretKeyRef` environment variables.
## Requirements
- **Remove Webhook Logic**: Strip out any Infisical annotations (e.g., `secrets.infisical.com/auto-reload`) from the Argo Workflows pod metadata.
- **Remove initContainer**: Delete the `initContainer` polling logic that was waiting for environment variables to populate.
- **Create InfisicalSecret CRD**: Create a new Helm template (e.g., `helm/templates/infisical-secret.yaml`) defining an `InfisicalSecret` resource. This resource should sync the required secrets (Socket.dev API key, Pulumi credentials, S3/MinIO credentials, DefectDojo API keys) into a standard Kubernetes `Secret` (e.g., named `amp-security-pipeline-secrets`).
- **Update Workflow Tasks**: Modify the `ClusterWorkflowTemplate` (and any other files where tasks are defined). Instead of expecting the webhook to inject the secrets directly, configure the task containers to pull their required environment variables using native Kubernetes syntax:
```yaml
env:
- name: SOCKET_DEV_API_KEY
valueFrom:
secretKeyRef:
name: amp-security-pipeline-secrets
key: SOCKET_DEV_API_KEY
```
## Agent Instructions
1. Find and open the implemented `ClusterWorkflowTemplate` and task definition YAML files in `helm/templates/`.
2. Find and remove all instances of the `initContainer` secret-waiting logic.
3. Find and remove all Infisical mutating webhook annotations from the workflow/pod templates.
4. Create a new file `helm/templates/infisical-secret.yaml` defining the `InfisicalSecret` CRD. Make sure it targets the necessary secrets for Socket.dev, Pulumi, Storage, and DefectDojo.
5. Update the `scan-socketdev`, `scan-crossguard`, `upload-storage`, and `upload-defectdojo` tasks in the workflow template to use native `valueFrom: secretKeyRef` for their required environment variables, referencing the new native Kubernetes Secret.
6. Verify the YAML is valid and clean.
+4 -3
View File
@@ -24,15 +24,15 @@ To achieve this, the architecture utilizes "Defense in Depth," split across seve
---
2. Part 1: Local Development & Repository Tooling
2.1 Secret Scanning: Gitleaks (Local)
## 2. Part 1: Local Development & Repository Tooling
### 2.1 Secret Scanning: Gitleaks (Local)
What it does: Fast, static regex matching for secrets.
Where it runs: Local developer machine (via Pre-commit hook).
Detailed Rationale: Developers make human errors. Gitleaks runs in milliseconds and acts as a "spell-check for secrets." It prevents accidentally committing a .env file or hardcoded token before it ever enters the local Git history.
Trade-offs: It relies on the developer actively using the pre-commit hook. If a commit is forced (--no-verify), the local check is bypassed.
2.2 Supply Chain Defense: Socket CLI (Local Wrapper)
### 2.2 Supply Chain Defense: Socket CLI (Local Wrapper)
What it does: Intercepts package installation to check for malicious code, typosquatting, and hijacked packages.
Where it runs: Local machine (aliased: alias pnpm="socket pnpm").
@@ -62,6 +62,7 @@ To achieve this, the architecture utilizes "Defense in Depth," split across seve
* **Detailed Rationale:** Traditional CVE scanners check for accidental developer mistakes. Socket checks for active malice (install scripts that steal SSH keys, typosquatting, hijacked maintainer accounts). Because AI agents regularly pull in new dependencies to solve coding problems, Socket ensures neither the local machine nor the pipeline executes malicious code during dependency resolution.
* **Trade-offs:** API-dependent. To conserve free-tier API quotas, the pipeline step must be strictly configured to trigger *only* when lockfiles (`pnpm-lock.yaml`) change, requiring careful CI optimization.
**outdated, using pulumi crossguard**
### 2.5 Infrastructure Validation (IaC): Checkov
* **What it does:** Parses Kubernetes manifests, Terraform, and Dockerfiles to ensure they adhere to security best practices.
* **Detailed Rationale:** A homelab exposed to the internet cannot afford basic infrastructure misconfigurations, such as running containers as `root` or mapping sensitive host volumes. Checkov acts as an automated senior cloud architect, validating the AI's generated Kubernetes manifests before Argo CD syncs them.
+19
View File
@@ -0,0 +1,19 @@
# Implementation Plan: Base ClusterWorkflowTemplate
## Objective
Create the foundational Argo `ClusterWorkflowTemplate` for the security pipeline. It must use semantic versioning (e.g., `amp-security-pipeline-v1.0.0`) so projects can pin to a stable version.
## Requirements
- Define a `ClusterWorkflowTemplate` resource.
- Name the template with a semver tag (e.g., `name: amp-security-pipeline-v1.0.0`).
- Define inputs/parameters:
- `working-dir` (default: `.`)
- `fail-on-cvss` (default: `7.0`)
- `repo-url` (required)
- `git-revision` (default: `main`)
- Define the DAG (Directed Acyclic Graph) structure that will orchestrate the phases (Clone -> Parallel Scanners -> Sinks/Enforcement).
## Agent Instructions
1. Create `helm/templates/clusterworkflowtemplate.yaml`.
2. Ensure the template is structured to accept the parameters and orchestrate downstream DAG tasks.
3. Keep the actual task implementations (like git clone or scanners) as empty stubs for now; they will be filled by subsequent steps.
+15
View File
@@ -0,0 +1,15 @@
# Implementation Plan: Shared PVC Workspace & Git Clone
## Objective
Implement a shared Persistent Volume Claim (PVC) strategy to ensure the repository is only cloned once and all parallel scanners can access the same codebase without re-downloading it.
## Requirements
- Use Argo Workflows `volumeClaimTemplates` to define a temporary PVC for the workflow duration.
- Create a `clone-repo` task in the DAG.
- The `clone-repo` task should use a standard git image (e.g., Alpine/Git) to clone the `repo-url` at `git-revision` into the shared PVC mounted at `/workspace`.
- Ensure all subsequent tasks will mount this PVC at `/workspace`.
## Agent Instructions
1. Modify the `ClusterWorkflowTemplate` to add the `volumeClaimTemplates`.
2. Add the `clone-repo` task template that executes `git clone`.
3. Configure the DAG so the parallel scanning steps depend on the successful completion of `clone-repo`.
+14
View File
@@ -0,0 +1,14 @@
# Implementation Plan: Infisical Secrets Injection InitContainer
## Objective
Ensure that Infisical secrets are injected as **Environment Variables** securely before any main container logic runs in the Argo Workflows steps.
## Requirements
- Use the Infisical Kubernetes operator approach.
- Add the necessary Infisical annotations (e.g., `secrets.infisical.com/auto-reload: "true"`) to the pod metadata templates.
- **Crucial:** Because Argo Workflows pods start quickly, inject an `initContainer` into tasks that require secrets. This initContainer should run a simple polling script (e.g., a loop checking if a specific expected environment variable exists) to pause the pod's main container execution until the Infisical mutating webhook has successfully injected the environment variables.
## Agent Instructions
1. Create a reusable snippet or template property for the `initContainer` wait logic.
2. Apply the required Infisical annotations to the `ClusterWorkflowTemplate`'s `podSpecPatch` or task metadata.
3. Document which steps will require which secrets (e.g., DefectDojo API keys, Socket.dev keys).
+17
View File
@@ -0,0 +1,17 @@
# Implementation Plan: TruffleHog Scanner
## Objective
Implement the TruffleHog secrets scanning step as a parallel task in the DAG.
## Requirements
- Define a task template named `scan-trufflehog`.
- Depend on the `clone-repo` task.
- Mount the shared PVC at `/workspace`.
- Run TruffleHog against the `/workspace` directory.
- Configure TruffleHog to output its findings in JSON or SARIF format.
- Save the output to `/workspace/reports/trufflehog.json` (or `.sarif`).
- Ensure the task exits successfully (exit code 0) even if secrets are found, so the pipeline can proceed to the aggregation step (Phase 3). (Use `continueOn` or `ignoreError` or a wrapper script like `trufflehog ... || true`).
## Agent Instructions
1. Add the `scan-trufflehog` template to the `ClusterWorkflowTemplate`.
2. Wire it into the DAG alongside the other scanners.
+18
View File
@@ -0,0 +1,18 @@
# Implementation Plan: Semgrep Scanner
## Objective
Implement the Semgrep SAST (Static Application Security Testing) scanning step as a parallel task in the DAG.
## Requirements
- Define a task template named `scan-semgrep`.
- Depend on the `clone-repo` task.
- Mount the shared PVC at `/workspace`.
- Run Semgrep with standard or configurable rulesets against the `/workspace` directory.
- Output findings in SARIF format.
- Save the output to `/workspace/reports/semgrep.sarif`.
- Ensure the task exits successfully even if vulnerabilities are found, so Phase 3 aggregation can run (e.g., wrap in a script that returns 0).
## Agent Instructions
1. Add the `scan-semgrep` template to the `ClusterWorkflowTemplate`.
2. Wire it into the DAG alongside the other scanners.
3. **CRITICAL: File Splitting:** Do NOT put everything into one giant file! Split your YAML manifests or configurations into separate, smaller files (e.g. using separate Helm template files, configmaps, or helper scripts) to prevent exhausting the context window.
+18
View File
@@ -0,0 +1,18 @@
# Implementation Plan: KICS IaC Scanner
## Objective
Implement the KICS (Keeping Infrastructure as Code Secure) scanning step as a parallel task in the DAG.
## Requirements
- Define a task template named `scan-kics`.
- Depend on the `clone-repo` task.
- Mount the shared PVC at `/workspace`.
- Run KICS against the `/workspace` directory (or the specific `working-dir` parameter).
- Output findings in SARIF and/or JSON format.
- Save the output to `/workspace/reports/kics.sarif`.
- Ensure the task exits successfully even if issues are found, to allow Phase 3 aggregation (e.g., wrap with `|| true`).
## Agent Instructions
1. Add the `scan-kics` template to the `ClusterWorkflowTemplate`.
2. Wire it into the DAG alongside the other scanners.
3. **CRITICAL: File Splitting:** Do NOT put everything into one giant file! Split your YAML manifests or configurations into separate, smaller files (e.g. using separate Helm template files, configmaps, or helper scripts) to prevent exhausting the context window.
+20
View File
@@ -0,0 +1,20 @@
# Implementation Plan: Socket.dev Scanner
## Objective
Implement the Socket.dev supply chain security scanning step as a parallel task in the DAG.
## Requirements
- Define a task template named `scan-socketdev`.
- Depend on the `clone-repo` task.
- Mount the shared PVC at `/workspace`.
- Expect the Socket.dev API key to be injected via Infisical as an environment variable (use the initContainer wait logic from Phase 1 Step 3).
- Run the Socket CLI against the dependency manifests in `/workspace`.
- Output findings in a standard format (JSON/SARIF).
- Save the output to `/workspace/reports/socketdev.json`.
- Ensure the task exits successfully (e.g. `|| true`) to allow Phase 3 aggregation.
## Agent Instructions
1. Add the `scan-socketdev` template to the `ClusterWorkflowTemplate`.
2. Configure the Infisical initContainer logic for this specific step to wait for the API key.
3. Wire it into the DAG alongside the other scanners.
4. **CRITICAL: File Splitting:** Do NOT put everything into one giant file! Split your YAML manifests or configurations into separate, smaller files (e.g. using separate Helm template files, configmaps, or helper scripts) to prevent exhausting the context window.
+19
View File
@@ -0,0 +1,19 @@
# Implementation Plan: Syft & Grype Scanner
## Objective
Implement the SBOM generation (Syft) and vulnerability scanning (Grype) step as a parallel task in the DAG.
## Requirements
- Define a task template named `scan-syft-grype`.
- Depend on the `clone-repo` task.
- Mount the shared PVC at `/workspace`.
- Step A: Run Syft against `/workspace` to generate an SBOM (SPDX/CycloneDX format) -> `/workspace/reports/sbom.json`.
- Step B: Run Grype against the generated SBOM (or the workspace directly) to find vulnerabilities.
- Output Grype findings in SARIF format.
- Save the Grype output to `/workspace/reports/grype.sarif`.
- Ensure the task exits successfully (`|| true`) to allow Phase 3 aggregation.
## Agent Instructions
1. Add the `scan-syft-grype` template to the `ClusterWorkflowTemplate`.
2. Wire it into the DAG alongside the other scanners.
3. **CRITICAL: File Splitting:** Do NOT put everything into one giant file! Split your YAML manifests or configurations into separate, smaller files (e.g. using separate Helm template files, configmaps, or helper scripts) to prevent exhausting the context window.
+19
View File
@@ -0,0 +1,19 @@
# Implementation Plan: Pulumi Crossguard
## Objective
Implement the Pulumi Crossguard policy enforcement step as a parallel task in the DAG.
## Requirements
- Define a task template named `scan-crossguard`.
- Depend on the `clone-repo` task.
- Mount the shared PVC at `/workspace`.
- Expect Pulumi credentials and cloud provider credentials (e.g., AWS/GCP) to be injected via Infisical as environment variables (using the initContainer logic).
- Run `pulumi preview --policy-pack <path>` inside the `/workspace`.
- Capture the output and convert/save it into a structured JSON/SARIF format at `/workspace/reports/crossguard.json`.
- Ensure the task exits successfully (`|| true`) to allow Phase 3 aggregation.
## Agent Instructions
1. Add the `scan-crossguard` template to the `ClusterWorkflowTemplate`.
2. Configure the Infisical initContainer to wait for Pulumi and Cloud credentials.
3. Wire it into the DAG alongside the other scanners.
4. **CRITICAL: File Splitting:** Do NOT put everything into one giant file! Split your YAML manifests or configurations into separate, smaller files (e.g. using separate Helm template files, configmaps, or helper scripts) to prevent exhausting the context window.
+17
View File
@@ -0,0 +1,17 @@
# Implementation Plan: Long-Term Storage Upload
## Objective
Implement an aggregation task that uploads all generated reports from the PVC to long-term storage (e.g., S3/MinIO) for audit trails and historical review.
## Requirements
- Define a task template named `upload-storage`.
- Depend on the successful completion of **all** parallel scanner tasks (Phase 2).
- Mount the shared PVC at `/workspace`.
- Expect S3/MinIO credentials to be injected as environment variables via Infisical (with initContainer wait logic).
- Use a CLI (like `aws s3 cp` or `mc`) to sync the `/workspace/reports/` directory to a designated bucket, keyed by repository name, date, and commit hash.
## Agent Instructions
1. Add the `upload-storage` template to the `ClusterWorkflowTemplate`.
2. Configure the DAG dependencies so it waits for all scanners.
3. Configure the Infisical initContainer to wait for the storage credentials.
4. **CRITICAL: File Splitting:** Do NOT put everything into one giant file! Split your YAML manifests or configurations into separate, smaller files (e.g. using separate Helm template files, configmaps, or helper scripts) to prevent exhausting the context window.
+18
View File
@@ -0,0 +1,18 @@
# Implementation Plan: DefectDojo Upload
## Objective
Implement a task that pushes all SARIF/JSON reports from the PVC to DefectDojo via its API.
## Requirements
- Define a task template named `upload-defectdojo`.
- Depend on the completion of all parallel scanner tasks (Phase 2).
- Mount the shared PVC at `/workspace`.
- Expect DefectDojo API keys and URL to be injected as environment variables via Infisical (with initContainer wait logic).
- Iterate over the `/workspace/reports/` directory.
- For each file, make an API request to DefectDojo to import the scan results (mapping the file type to the correct DefectDojo parser, e.g., SARIF -> Generic SARIF).
## Agent Instructions
1. Add the `upload-defectdojo` template to the `ClusterWorkflowTemplate`.
2. Write the API upload script (Python, curl, or a dedicated CLI) in the task template.
3. Configure the Infisical initContainer to wait for the DefectDojo credentials.
4. **CRITICAL: File Splitting:** Do NOT put everything into one giant file! Split your YAML manifests or configurations into separate, smaller files (e.g. using separate Helm template files, configmaps, or helper scripts) to prevent exhausting the context window.
+19
View File
@@ -0,0 +1,19 @@
# Implementation Plan: Policy Enforcement
## Objective
Implement the final task that parses the aggregated results and decides whether to Pass or Fail the Argo Workflow based on the `fail-on-cvss` input threshold.
## Requirements
- Define a task template named `enforce-policy`.
- Depend on the completion of the upload tasks (Phase 3 Steps 1 & 2).
- Mount the shared PVC at `/workspace`.
- Read the input parameter `fail-on-cvss` (e.g., `7.0`).
- Run a script (Python, jq, etc.) to parse all the reports in `/workspace/reports/`.
- If any vulnerability is found with a CVSS score >= the threshold, print an error summary and exit with a non-zero code (causing the Argo Workflow to fail).
- If no vulnerabilities exceed the threshold, print a success summary and exit with 0.
## Agent Instructions
1. Add the `enforce-policy` template to the `ClusterWorkflowTemplate`.
2. Write the parsing logic inside the task (e.g., extracting CVSS scores from SARIF and JSON formats).
3. Ensure this step acts as the final gatekeeper for the pipeline.
4. **CRITICAL: File Splitting:** Do NOT put everything into one giant file! Split your YAML manifests or configurations into separate, smaller files (e.g. using separate Helm template files, configmaps, or helper scripts) to prevent exhausting the context window.
@@ -0,0 +1,17 @@
# Implementation Plan: Renovate Bot Preset
## Objective
Create a centralized `renovate.json` (or `default.json`) preset in this repository that other projects can easily inherit to get standardized auto-merge and grouping behavior.
## Requirements
- Create a file at `renovate-preset/default.json` (or similar path).
- Configure auto-merge for patch and minor versions of dependencies.
- Enable grouping for monorepo packages (e.g., all `@babel/*` updates grouped into one PR).
- Configure the schedule (e.g., run on weekends or early mornings).
- Configure the severity levels for when notifications/PRs should block.
- Document how other repositories can `extend` this preset in their own `renovate.json` (e.g., `"extends": ["github>my-org/my-repo//renovate-preset"]`).
## Agent Instructions
1. Create the base Renovate configuration file.
2. Add a `README.md` to the `renovate-preset` directory explaining how to use it.
3. **CRITICAL: File Splitting:** Do NOT put everything into one giant file! Split your JSON configurations or manifests into separate, smaller files to prevent exhausting the context window.
@@ -0,0 +1,18 @@
# Implementation Plan: Renovate Bot CronJob / ArgoCD App
## Objective
Create the Kubernetes manifests to deploy Renovate Bot as a cluster-level service (CronJob) via ArgoCD, configured to scan repositories and open PRs (which will trigger the Phase 1-3 pipeline).
## Requirements
- Create Kubernetes manifests for a CronJob that runs the Renovate Bot Docker image.
- Expect Git Provider credentials (GitHub/GitLab token) to be injected as environment variables via Infisical (using standard operator annotations).
- Configure the CronJob to run periodically (e.g., hourly).
- Package this as an ArgoCD Application or a Helm chart located in `helm/renovate-bot/`.
- The configuration should instruct Renovate to scan the designated repositories and respect the presets defined in Phase 4 Step 1.
## Agent Instructions
1. Create the `helm/renovate-bot` directory.
2. Add the `CronJob`, `ServiceAccount`, and necessary RBAC manifests.
3. Configure the Infisical annotations for secrets injection.
4. Provide an `Application` manifest for ArgoCD to deploy it easily.
5. **CRITICAL: File Splitting:** Do NOT put everything into one giant file! Split your YAML manifests or configurations into separate, smaller files (e.g. using separate Helm template files, configmaps, or helper scripts) to prevent exhausting the context window.
+5 -1
View File
@@ -31,7 +31,7 @@ For solo personal projects, a complex CI/CD security pipeline is usually overkil
---
### The Chosen Solution: Dual-Layer Approach
### The Chosen Solution: Dual-Layer Approach + Infisical Runtime Injection
#### Layer 1: Gitleaks (The Local Guard)
* **Where:** Local developer machine (Pre-commit Hook).
@@ -41,6 +41,10 @@ For solo personal projects, a complex CI/CD security pipeline is usually overkil
* **Where:** GitHub Actions / CI Pipeline (Post-commit).
* **Why:** Uses active verification. If a secret slips past (via an AI agent pushing directly or a bypassed local hook), TruffleHog actively calls out to external APIs to verify if the key is live. By using the `--only-verified` flag, it guarantees zero false positives and only fails the pipeline if it proves a key is an active threat.
#### Layer 3: Infisical Operator (Pipeline Runtime Injection)
* **Where:** Inside the Kubernetes Cluster (via `InfisicalSecret` CRD).
* **Why:** The security pipeline itself requires numerous highly-privileged secrets (DefectDojo API tokens, AWS S3 keys, Pulumi access tokens, Socket.dev keys) to execute the scans and upload reports. We do not store these in GitOps. Instead, the Helm chart deploys an `InfisicalSecret` resource. The Infisical Kubernetes Operator authenticates with the central vault, pulls the secrets dynamically, and syncs them into a native Kubernetes `Secret` (`amp-security-pipeline-secrets`). The Argo Workflow containers then consume these safely at runtime as environment variables.
---
### Tradeoffs & Accepted Risks
+2
View File
@@ -23,6 +23,7 @@ To maintain developer velocity (the "Friction" principle), pipeline feedback mus
* **Tool:** `eslint` with `eslint-plugin-security` and `@typescript-eslint`.
* **Reasoning:** Linters are "dumb" but instantaneous. They will catch AI agents generating immediately dangerous syntax (like `eval()` or unsafe Regex) before a commit is even made.
**outdated, using pulumi crossguard**
### Layer 2: Infrastructure as Code (IaC) Scanning
* **Tool:** Checkov (Open Source)
* **Reasoning:** Lightweight CLI tool to ensure the AI agents do not accidentally expose internal homelab ports to the internet or misconfigure container permissions.
@@ -47,6 +48,7 @@ To maintain developer velocity (the "Friction" principle), pipeline feedback mus
| **Snyk Code** | Great UX, but lacks the ability to write custom rules. If the AI agent develops a specific bad habit unique to this codebase, Snyk cannot be easily tuned to block it. |
| **Checkmarx / Veracode** | Built for massive legacy enterprise compliance. Far too expensive, slow, and noisy for a modern, agile homelab setup. |
**outdated using harvester default registry**
## 5. Future Considerations / Phase 2
* **Build Caching:** If actual container build steps (`docker build`, `npm install`) become the bottleneck in Argo Workflows, evaluate adding open-source caching layers like **Kaniko** or **BuildKit** inside Argo pods before purchasing paid caching solutions.
* **Custom Semgrep Rules:** If the AI agent repeatedly makes domain-specific logic errors (e.g., misusing a specific custom Monad), write lightweight custom Semgrep YAML rules to permanently block those specific anti-patterns.
+5
View File
@@ -0,0 +1,5 @@
apiVersion: v2
name: renovate-bot
description: Renovate Bot deployment for agentguard-ci
version: 0.1.0
appVersion: "37.0.0"
@@ -0,0 +1,17 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: renovate-bot
spec:
project: default
source:
repoURL: https://git.example.com/agentguard-ci.git
targetRevision: main
path: helm/renovate-bot
destination:
server: https://kubernetes.default.svc
namespace: default
syncPolicy:
automated:
prune: true
selfHeal: true
@@ -0,0 +1,8 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: renovate-bot
rules:
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list", "watch"]
@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: renovate-bot
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: renovate-bot
subjects:
- kind: ServiceAccount
name: renovate-bot
namespace: default
@@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: renovate-bot-config
data:
renovate.json: |
{
"extends": [{{ .Values.preset | quote }}],
"onboarding": false,
"platform": "github",
"repositories": {{ toJson .Values.repositories }}
}
+40
View File
@@ -0,0 +1,40 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: renovate-bot
spec:
schedule: {{ .Values.schedule | quote }}
jobTemplate:
spec:
template:
spec:
serviceAccountName: renovate-bot
restartPolicy: Never
containers:
- name: renovate
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: RENOVATE_CONFIG_FILE
value: /etc/renovate/renovate.json
- name: RENOVATE_REPOSITORIES
value: {{ join "," .Values.repositories | quote }}
- name: GITHUB_TOKEN
valueFrom:
secretKeyRef:
name: renovate-bot
key: github-token
- name: GITLAB_TOKEN
valueFrom:
secretKeyRef:
name: renovate-bot
key: gitlab-token
args:
- renovate
volumeMounts:
- name: config
mountPath: /etc/renovate
volumes:
- name: config
configMap:
name: renovate-bot-config
@@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: renovate-bot
annotations:
secrets.infisical.com/auto-reload: "true"
+8
View File
@@ -0,0 +1,8 @@
image:
repository: renovate/renovate
tag: 37.0.0
pullPolicy: IfNotPresent
schedule: "0 * * * *"
preset: "github>my-org/my-repo//renovate-preset"
repositories: []
+17
View File
@@ -0,0 +1,17 @@
{{- define "template.enforce-policy" }}
- name: enforce-policy
inputs:
parameters:
- name: fail-on-cvss
container:
image: agentguard-tools:latest
command:
- node
- /app/dist/enforce-policy.js
env:
- name: FAIL_ON_CVSS
value: "{{inputs.parameters.fail-on-cvss}}"
volumeMounts:
- name: workspace
mountPath: /workspace
{{- end }}
+33
View File
@@ -0,0 +1,33 @@
{{- define "template.scan-defectdojo" }}
- name: scan-defectdojo
container:
image: pulumi/pulumi:3.154.0
env:
- name: PULUMI_ACCESS_TOKEN
valueFrom:
secretKeyRef:
name: amp-security-pipeline-secrets
key: PULUMI_ACCESS_TOKEN
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: amp-security-pipeline-secrets
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: amp-security-pipeline-secrets
key: AWS_SECRET_ACCESS_KEY
command:
- sh
- -c
args:
- |
set -eu
mkdir -p /workspace/reports
cd /workspace
pulumi preview --policy-pack ./policy-pack > /workspace/reports/crossguard.json 2>&1 || true
volumeMounts:
- name: workspace
mountPath: /workspace
{{- end }}
+22
View File
@@ -0,0 +1,22 @@
{{- define "template.scan-kics" }}
- name: scan-kics
container:
image: checkmarx/kics:1.7.14
command:
- sh
- -c
args:
- |
set -eu
mkdir -p /workspace/reports
kics scan -p /workspace -o /workspace/reports --report-formats sarif,json --output-name kics || true
if [ -f /workspace/reports/kics.sarif ]; then
exit 0
fi
if [ -f /workspace/reports/kics.json ]; then
cp /workspace/reports/kics.json /workspace/reports/kics.sarif
fi
volumeMounts:
- name: workspace
mountPath: /workspace
{{- end }}
+16
View File
@@ -0,0 +1,16 @@
{{- define "template.scan-semgrep" }}
- name: scan-semgrep
container:
image: returntocorp/semgrep:1.85.0
command:
- sh
- -c
args:
- |
set -eu
mkdir -p /workspace/reports
semgrep scan --config auto --sarif --output /workspace/reports/semgrep.sarif /workspace || true
volumeMounts:
- name: workspace
mountPath: /workspace
{{- end }}
+22
View File
@@ -0,0 +1,22 @@
{{- define "template.scan-socketdev" }}
- name: scan-socketdev
container:
image: socketdev/socketcli:latest
env:
- name: SOCKET_DEV_API_KEY
valueFrom:
secretKeyRef:
name: amp-security-pipeline-secrets
key: SOCKET_DEV_API_KEY
command:
- sh
- -c
args:
- |
set -eu
mkdir -p /workspace/reports
socketdev scan /workspace --format json --output /workspace/reports/socketdev.json || true
volumeMounts:
- name: workspace
mountPath: /workspace
{{- end }}
+17
View File
@@ -0,0 +1,17 @@
{{- define "template.scan-syft-grype" }}
- name: scan-syft-grype
container:
image: anchore/syft:latest
command:
- sh
- -c
args:
- |
set -eu
mkdir -p /workspace/reports
syft scan dir:/workspace -o cyclonedx-json=/workspace/reports/sbom.json || true
grype sbom:/workspace/reports/sbom.json -o sarif=/workspace/reports/grype.sarif || true
volumeMounts:
- name: workspace
mountPath: /workspace
{{- end }}
+16
View File
@@ -0,0 +1,16 @@
{{- define "template.scan-trufflehog" }}
- name: scan-trufflehog
container:
image: trufflesecurity/trufflehog:latest
command:
- sh
- -c
args:
- |
set -eu
mkdir -p /workspace/reports
trufflehog filesystem /workspace --json > /workspace/reports/trufflehog.json || true
volumeMounts:
- name: workspace
mountPath: /workspace
{{- end }}
+22
View File
@@ -0,0 +1,22 @@
{{- define "template.upload-defectdojo" }}
- name: upload-defectdojo
container:
image: agentguard-tools:latest
env:
- name: DEFECTDOJO_URL
valueFrom:
secretKeyRef:
name: amp-security-pipeline-secrets
key: DEFECTDOJO_URL
- name: DEFECTDOJO_API_TOKEN
valueFrom:
secretKeyRef:
name: amp-security-pipeline-secrets
key: DEFECTDOJO_API_TOKEN
command:
- node
- /app/dist/upload-defectdojo.js
volumeMounts:
- name: workspace
mountPath: /workspace
{{- end }}
+39
View File
@@ -0,0 +1,39 @@
{{- define "template.upload-storage" }}
- name: upload-storage
container:
image: amazon/aws-cli:2.15.40
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: amp-security-pipeline-secrets
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: amp-security-pipeline-secrets
key: AWS_SECRET_ACCESS_KEY
- name: MINIO_ROOT_USER
valueFrom:
secretKeyRef:
name: amp-security-pipeline-secrets
key: MINIO_ROOT_USER
- name: MINIO_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: amp-security-pipeline-secrets
key: MINIO_ROOT_PASSWORD
command:
- sh
- -c
args:
- |
set -eu
repo_name="${REPO_NAME:-repo}"
commit_sha="${GIT_COMMIT_SHA:-unknown}"
report_date="$(date -u +%F)"
aws s3 sync /workspace/reports "s3://${REPORTS_BUCKET:-security-reports}/${repo_name}/${report_date}/${commit_sha}/"
volumeMounts:
- name: workspace
mountPath: /workspace
{{- end }}
+122
View File
@@ -0,0 +1,122 @@
{{- if .Values.pipeline.enabled }}
apiVersion: argoproj.io/v1alpha1
kind: ClusterWorkflowTemplate
metadata:
name: amp-security-pipeline-v1.0.0
spec:
serviceAccountName: default
entrypoint: security-pipeline
volumeClaimTemplates:
- metadata:
name: workspace
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
arguments:
parameters:
- name: working-dir
value: .
- name: fail-on-cvss
value: "7.0"
- name: repo-url
- name: git-revision
value: main
templates:
- name: security-pipeline
dag:
tasks:
- name: clone
template: clone-repo
arguments:
parameters:
- name: repo-url
value: "{{workflow.parameters.repo-url}}"
- name: git-revision
value: "{{workflow.parameters.git-revision}}"
- name: scanners
dependencies:
- clone
template: parallel-scanners
arguments:
parameters:
- name: working-dir
value: "{{workflow.parameters.working-dir}}"
- name: fail-on-cvss
value: "{{workflow.parameters.fail-on-cvss}}"
- name: upload-storage
dependencies:
- scanners
template: upload-storage
- name: upload-defectdojo
dependencies:
- scanners
template: upload-defectdojo
- name: enforce-policy
dependencies:
- upload-storage
- upload-defectdojo
template: enforce-policy
arguments:
parameters:
- name: fail-on-cvss
value: "{{workflow.parameters.fail-on-cvss}}"
- name: sinks-and-enforcement
dependencies:
- scanners
template: sinks-and-enforcement
- name: clone-repo
inputs:
parameters:
- name: repo-url
- name: git-revision
container:
image: alpine/git:2.45.2
command:
- sh
- -c
args:
- git clone --branch "{{inputs.parameters.git-revision}}" --single-branch "{{inputs.parameters.repo-url}}" /workspace
volumeMounts:
- name: workspace
mountPath: /workspace
- name: parallel-scanners
inputs:
parameters:
- name: working-dir
- name: fail-on-cvss
dag:
tasks:
{{- range $scanner := list "trufflehog" "semgrep" "kics" "socketdev" "syft-grype" "defectdojo" }}
- name: {{ $scanner }}
template: scan-{{ $scanner }}
arguments:
parameters:
- name: working-dir
value: "{{inputs.parameters.working-dir}}"
{{- end }}
- name: sinks-and-enforcement
container:
image: curlimages/curl:latest
command:
- sh
- -c
args:
- |
set -eu
echo "Pipeline complete. You can configure a webhook notification here."
if [ -n "${SLACK_WEBHOOK_URL:-}" ]; then
curl -X POST -H 'Content-type: application/json' --data '{"text":"Security Pipeline Finished"}' "${SLACK_WEBHOOK_URL}" || true
fi
{{ include "template.scan-syft-grype" . | indent 4 }}
{{ include "template.scan-socketdev" . | indent 4 }}
{{ include "template.scan-defectdojo" . | indent 4 }}
{{ include "template.scan-semgrep" . | indent 4 }}
{{ include "template.scan-trufflehog" . | indent 4 }}
{{ include "template.scan-kics" . | indent 4 }}
{{ include "template.upload-defectdojo" . | indent 4 }}
{{ include "template.upload-storage" . | indent 4 }}
{{ include "template.enforce-policy" . | indent 4 }}
{{- end }}
+37
View File
@@ -0,0 +1,37 @@
{{- if .Values.pipeline.enabled }}
apiVersion: infisical.com/v1alpha1
kind: InfisicalSecret
metadata:
name: amp-security-pipeline-secrets
spec:
secretName: amp-security-pipeline-secrets
target:
creationPolicy: Owner
workspaceSlug: {{ .Values.infisical.workspaceSlug | quote }}
projectSlug: {{ .Values.infisical.projectSlug | quote }}
secrets:
- secretKey: SOCKET_DEV_API_KEY
remoteRef:
key: SOCKET_DEV_API_KEY
- secretKey: PULUMI_ACCESS_TOKEN
remoteRef:
key: PULUMI_ACCESS_TOKEN
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
key: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
key: AWS_SECRET_ACCESS_KEY
- secretKey: MINIO_ROOT_USER
remoteRef:
key: MINIO_ROOT_USER
- secretKey: MINIO_ROOT_PASSWORD
remoteRef:
key: MINIO_ROOT_PASSWORD
- secretKey: DEFECTDOJO_URL
remoteRef:
key: DEFECTDOJO_URL
- secretKey: DEFECTDOJO_API_TOKEN
remoteRef:
key: DEFECTDOJO_API_TOKEN
{{- end }}
+22
View File
@@ -0,0 +1,22 @@
# Renovate Preset
This directory contains a shared Renovate preset that other repositories can extend.
## Usage
In another repository's `renovate.json`:
```json
{
"extends": ["github>my-org/my-repo//renovate-preset"]
}
```
Adjust `my-org/my-repo` to point at this repository.
## Behavior
- Auto-merges patch and minor updates.
- Groups common monorepo package families into single PRs.
- Schedules Renovate runs on weekends before 6am UTC.
- Keeps security alerts from auto-merging.
+48
View File
@@ -0,0 +1,48 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["config:recommended"],
"timezone": "UTC",
"schedule": ["before 6am on saturday", "before 6am on sunday"],
"automerge": true,
"automergeType": "pr",
"automergeStrategy": "squash",
"automergeSchedule": ["before 6am on saturday", "before 6am on sunday"],
"packageRules": [
{
"matchUpdateTypes": ["patch", "minor"],
"automerge": true
},
{
"matchPackagePatterns": ["^@babel/"],
"groupName": "babel packages"
},
{
"matchPackagePatterns": ["^eslint"],
"groupName": "eslint packages"
},
{
"matchPackagePatterns": ["^jest"],
"groupName": "jest packages"
},
{
"matchPackagePatterns": ["^@types/"],
"groupName": "types packages"
},
{
"matchPackagePatterns": ["^react", "^react-dom"],
"groupName": "react packages"
},
{
"matchConfidence": ["high", "very-high"],
"dependencyDashboardApproval": false
},
{
"matchConfidence": ["low", "neutral"],
"dependencyDashboardApproval": true
}
],
"vulnerabilityAlerts": {
"labels": ["security"],
"automerge": false
}
}
+14
View File
@@ -0,0 +1,14 @@
FROM node:20-alpine
WORKDIR /app
COPY package.json package-lock.json ./
RUN npm ci
COPY tsconfig.json ./
COPY src ./src
RUN npm run build
# The default command isn't strictly necessary as Argo will override it
CMD ["node", "/app/dist/enforce-policy.js"]
+1853
View File
File diff suppressed because it is too large Load Diff
+21
View File
@@ -0,0 +1,21 @@
{
"name": "tools",
"version": "1.0.0",
"description": "",
"main": "index.js",
"type": "module",
"scripts": {
"test": "vitest run",
"build": "tsc"
},
"keywords": [],
"author": "",
"license": "ISC",
"type": "commonjs",
"devDependencies": {
"@types/node": "^25.6.0",
"tsx": "^4.21.0",
"typescript": "^6.0.3",
"vitest": "^4.1.4"
}
}
+58
View File
@@ -0,0 +1,58 @@
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import * as fs from 'node:fs';
import * as path from 'node:path';
import * as os from 'node:os';
import { checkReports } from './enforce-policy.js';
describe('enforce-policy', () => {
let tempDir: string;
beforeEach(() => {
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'reports-'));
});
afterEach(() => {
fs.rmSync(tempDir, { recursive: true, force: true });
});
it('should find vulnerabilities above threshold in SARIF', () => {
const sarifData = {
runs: [{
results: [
{ properties: { 'security-severity': '8.5' } },
{ properties: { 'security-severity': '5.0' } }
]
}]
};
fs.writeFileSync(path.join(tempDir, 'test.sarif'), JSON.stringify(sarifData));
const findings = checkReports(tempDir, 7.0);
expect(findings).toHaveLength(1);
expect(findings[0].name).toBe('test.sarif');
expect(findings[0].score).toBe(8.5);
});
it('should find vulnerabilities above threshold in JSON', () => {
const jsonData = {
findings: [
{ cvss: 9.0 },
{ score: 6.5 }
]
};
fs.writeFileSync(path.join(tempDir, 'test.json'), JSON.stringify(jsonData));
const findings = checkReports(tempDir, 7.0);
expect(findings).toHaveLength(1);
expect(findings[0].name).toBe('test.json');
expect(findings[0].score).toBe(9.0);
});
it('should set process.exitCode = 1 for invalid JSON', () => {
fs.writeFileSync(path.join(tempDir, 'invalid.json'), '{ "bad": json');
const findings = checkReports(tempDir, 7.0);
expect(findings).toHaveLength(0);
expect(process.exitCode).toBe(1);
process.exitCode = 0; // reset for other tests
});
});
+85
View File
@@ -0,0 +1,85 @@
import * as fs from 'node:fs';
import * as path from 'node:path';
export function checkReports(reportsDir: string, threshold: number): { name: string; score: number }[] {
const findings: { name: string; score: number }[] = [];
if (!fs.existsSync(reportsDir)) return findings;
const files = fs.readdirSync(reportsDir).sort();
for (const file of files) {
const fullPath = path.join(reportsDir, file);
if (!fs.statSync(fullPath).isFile()) continue;
const text = fs.readFileSync(fullPath, 'utf-8');
let data: any;
try {
data = JSON.parse(text);
} catch (e) {
console.error(`Error parsing ${file}: Invalid JSON`);
process.exitCode = 1;
continue;
}
if (file.endsWith('.sarif')) {
const runs = data.runs || [];
for (const run of runs) {
const results = run.results || [];
for (const result of results) {
const sev = result.properties?.['security-severity'];
if (sev === undefined) continue;
const score = parseFloat(sev);
if (isNaN(score)) continue;
if (score >= threshold) {
findings.push({ name: file, score });
}
}
}
} else if (file.endsWith('.json')) {
const items = data.findings || data.vulnerabilities || [];
for (const item of items) {
const rawScore = item.cvss || item.score;
if (rawScore === undefined) continue;
const score = parseFloat(rawScore);
if (isNaN(score)) continue;
if (score >= threshold) {
findings.push({ name: file, score });
}
}
}
}
return findings;
}
// Ensure the code runs when executed directly
import { fileURLToPath } from 'node:url';
if (process.argv[1] && fileURLToPath(import.meta.url) === process.argv[1]) {
const thresholdStr = process.env.FAIL_ON_CVSS;
if (!thresholdStr) {
console.error("FAIL_ON_CVSS environment variable is required.");
process.exit(1);
}
const threshold = parseFloat(thresholdStr);
if (isNaN(threshold)) {
console.error("FAIL_ON_CVSS must be a number.");
process.exit(1);
}
const reportsDir = "/workspace/reports";
const findings = checkReports(reportsDir, threshold);
if (findings.length > 0) {
for (const finding of findings) {
console.error(`${finding.name}: CVSS ${finding.score} >= ${threshold}`);
}
process.exit(1);
} else {
console.log(`No findings met or exceeded CVSS ${threshold}`);
}
}
+68
View File
@@ -0,0 +1,68 @@
import * as fs from 'node:fs';
import * as path from 'node:path';
import { fileURLToPath } from 'node:url';
export async function uploadReports() {
const baseUrl = (process.env.DEFECTDOJO_URL || "").replace(/\/$/, "");
const apiToken = process.env.DEFECTDOJO_API_TOKEN;
const productName = process.env.DEFECTDOJO_PRODUCT_NAME || "agentguard-ci";
if (!baseUrl || !apiToken) {
console.error("DEFECTDOJO_URL and DEFECTDOJO_API_TOKEN must be set.");
process.exit(1);
}
const scanMap: Record<string, string> = {
".sarif": "SARIF",
".json": "Generic Findings Import",
};
const reportsDir = "/workspace/reports";
if (!fs.existsSync(reportsDir)) {
console.log("No reports directory found.");
return;
}
const files = fs.readdirSync(reportsDir).sort();
for (const file of files) {
const fullPath = path.join(reportsDir, file);
if (!fs.statSync(fullPath).isFile()) continue;
const ext = path.extname(file);
const scanType = scanMap[ext];
if (!scanType) continue;
console.log(`Uploading ${file} as ${scanType}...`);
try {
const response = await fetch(`${baseUrl}/api/v2/import-scan/`, {
method: "POST",
headers: {
"Authorization": `Token ${apiToken}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
scan_type: scanType,
product_name: productName,
file_name: file,
})
});
if (!response.ok) {
const text = await response.text();
console.error(`Failed to upload ${file}: ${response.status} ${response.statusText} - ${text}`);
process.exitCode = 1;
} else {
console.log(`Successfully uploaded ${file}`);
}
} catch (e) {
console.error(`Network error uploading ${file}:`, e);
process.exitCode = 1;
}
}
}
if (process.argv[1] && fileURLToPath(import.meta.url) === process.argv[1]) {
uploadReports();
}
+14
View File
@@ -0,0 +1,14 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"outDir": "./dist",
"rootDir": "./src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true
},
"include": ["src/**/*"]
}